2019-07-10 23:12:15 +03:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0 */
|
2017-10-06 01:41:44 +03:00
|
|
|
#ifndef _BCACHEFS_IO_H
|
|
|
|
#define _BCACHEFS_IO_H
|
2017-03-20 02:56:34 +03:00
|
|
|
|
2017-12-14 00:01:18 +03:00
|
|
|
#include "checksum.h"
|
2021-01-08 03:49:15 +03:00
|
|
|
#include "bkey_buf.h"
|
2017-03-20 02:56:34 +03:00
|
|
|
#include "io_types.h"
|
|
|
|
|
|
|
|
#define to_wbio(_bio) \
|
|
|
|
container_of((_bio), struct bch_write_bio, bio)
|
|
|
|
|
|
|
|
#define to_rbio(_bio) \
|
|
|
|
container_of((_bio), struct bch_read_bio, bio)
|
|
|
|
|
|
|
|
void bch2_bio_free_pages_pool(struct bch_fs *, struct bio *);
|
|
|
|
void bch2_bio_alloc_pages_pool(struct bch_fs *, struct bio *, size_t);
|
2017-12-14 00:01:18 +03:00
|
|
|
|
2023-02-10 02:34:08 +03:00
|
|
|
#ifndef CONFIG_BCACHEFS_NO_LATENCY_ACCT
|
2018-05-04 21:04:31 +03:00
|
|
|
void bch2_latency_acct(struct bch_dev *, u64, int);
|
2023-02-10 02:34:08 +03:00
|
|
|
#else
|
|
|
|
static inline void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw) {}
|
|
|
|
#endif
|
2017-03-20 02:56:34 +03:00
|
|
|
|
2017-06-14 04:06:05 +03:00
|
|
|
void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
|
2022-11-19 02:21:11 +03:00
|
|
|
enum bch_data_type, const struct bkey_i *, bool);
|
2017-06-14 04:06:05 +03:00
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
#define BLK_STS_REMOVED ((__force blk_status_t)128)
|
|
|
|
|
2020-08-24 23:05:04 +03:00
|
|
|
const char *bch2_blk_status_to_str(blk_status_t);
|
|
|
|
|
2023-03-14 19:56:38 +03:00
|
|
|
#define BCH_WRITE_FLAGS() \
|
|
|
|
x(ALLOC_NOWAIT) \
|
|
|
|
x(CACHED) \
|
|
|
|
x(DATA_ENCODED) \
|
|
|
|
x(PAGES_STABLE) \
|
|
|
|
x(PAGES_OWNED) \
|
|
|
|
x(ONLY_SPECIFIED_DEVS) \
|
|
|
|
x(WROTE_DATA_INLINE) \
|
|
|
|
x(FROM_INTERNAL) \
|
|
|
|
x(CHECK_ENOSPC) \
|
|
|
|
x(SYNC) \
|
|
|
|
x(MOVE) \
|
|
|
|
x(IN_WORKER) \
|
|
|
|
x(DONE) \
|
|
|
|
x(IO_ERROR) \
|
|
|
|
x(CONVERT_UNWRITTEN)
|
|
|
|
|
|
|
|
enum __bch_write_flags {
|
|
|
|
#define x(f) __BCH_WRITE_##f,
|
|
|
|
BCH_WRITE_FLAGS()
|
|
|
|
#undef x
|
2017-03-20 02:56:34 +03:00
|
|
|
};
|
|
|
|
|
2023-03-14 19:56:38 +03:00
|
|
|
enum bch_write_flags {
|
|
|
|
#define x(f) BCH_WRITE_##f = 1U << __BCH_WRITE_##f,
|
|
|
|
BCH_WRITE_FLAGS()
|
|
|
|
#undef x
|
|
|
|
};
|
2023-02-10 02:34:08 +03:00
|
|
|
|
2017-12-14 00:01:18 +03:00
|
|
|
static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
|
|
|
|
{
|
2023-07-07 11:24:54 +03:00
|
|
|
return op->watermark == BCH_WATERMARK_copygc
|
2017-12-14 00:01:18 +03:00
|
|
|
? op->c->copygc_wq
|
2021-05-31 22:05:33 +03:00
|
|
|
: op->c->btree_update_wq;
|
2017-12-14 00:01:18 +03:00
|
|
|
}
|
|
|
|
|
2021-01-08 03:49:15 +03:00
|
|
|
int bch2_sum_sector_overwrites(struct btree_trans *, struct btree_iter *,
|
2021-11-16 17:12:15 +03:00
|
|
|
struct bkey_i *, bool *, s64 *, s64 *);
|
2021-09-27 01:19:46 +03:00
|
|
|
int bch2_extent_update(struct btree_trans *, subvol_inum,
|
|
|
|
struct btree_iter *, struct bkey_i *,
|
2022-11-14 04:04:21 +03:00
|
|
|
struct disk_reservation *, u64, s64 *, bool);
|
2022-11-19 02:21:11 +03:00
|
|
|
int bch2_extent_fallocate(struct btree_trans *, subvol_inum, struct btree_iter *,
|
|
|
|
unsigned, struct bch_io_opts, s64 *,
|
|
|
|
struct write_point_specifier);
|
2021-09-27 01:19:46 +03:00
|
|
|
|
2019-10-11 01:04:36 +03:00
|
|
|
int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
|
2021-11-06 19:46:36 +03:00
|
|
|
subvol_inum, u64, s64 *);
|
|
|
|
int bch2_fpunch(struct bch_fs *c, subvol_inum, u64, u64, s64 *);
|
2019-10-11 01:04:36 +03:00
|
|
|
|
2018-02-19 05:43:46 +03:00
|
|
|
static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
|
|
|
|
struct bch_io_opts opts)
|
2017-12-14 00:01:18 +03:00
|
|
|
{
|
|
|
|
op->c = c;
|
2019-11-04 20:53:59 +03:00
|
|
|
op->end_io = NULL;
|
2017-12-14 00:01:18 +03:00
|
|
|
op->flags = 0;
|
|
|
|
op->written = 0;
|
|
|
|
op->error = 0;
|
2022-11-19 02:21:11 +03:00
|
|
|
op->csum_type = bch2_data_checksum_type(c, opts);
|
2023-07-11 03:31:34 +03:00
|
|
|
op->compression_opt = opts.compression;
|
2017-12-14 00:01:18 +03:00
|
|
|
op->nr_replicas = 0;
|
|
|
|
op->nr_replicas_required = c->opts.data_replicas_required;
|
2023-07-07 11:24:54 +03:00
|
|
|
op->watermark = BCH_WATERMARK_normal;
|
2020-02-23 00:21:32 +03:00
|
|
|
op->incompressible = 0;
|
2018-10-12 21:55:27 +03:00
|
|
|
op->open_buckets.nr = 0;
|
2017-12-14 00:01:18 +03:00
|
|
|
op->devs_have.nr = 0;
|
2018-02-19 05:43:46 +03:00
|
|
|
op->target = 0;
|
|
|
|
op->opts = opts;
|
2021-09-27 01:19:46 +03:00
|
|
|
op->subvol = 0;
|
2017-12-14 00:01:18 +03:00
|
|
|
op->pos = POS_MAX;
|
|
|
|
op->version = ZERO_VERSION;
|
|
|
|
op->write_point = (struct write_point_specifier) { 0 };
|
|
|
|
op->res = (struct disk_reservation) { 0 };
|
2019-10-11 01:04:36 +03:00
|
|
|
op->new_i_size = U64_MAX;
|
|
|
|
op->i_sectors_delta = 0;
|
2022-11-19 02:21:11 +03:00
|
|
|
op->devs_need_flush = NULL;
|
2017-12-14 00:01:18 +03:00
|
|
|
}
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
void bch2_write(struct closure *);
|
|
|
|
|
2022-11-14 04:04:21 +03:00
|
|
|
void bch2_write_point_do_index_updates(struct work_struct *);
|
|
|
|
|
2017-05-13 05:45:15 +03:00
|
|
|
static inline struct bch_write_bio *wbio_init(struct bio *bio)
|
|
|
|
{
|
|
|
|
struct bch_write_bio *wbio = to_wbio(bio);
|
|
|
|
|
2023-04-26 23:34:57 +03:00
|
|
|
memset(&wbio->wbio, 0, sizeof(wbio->wbio));
|
2017-05-13 05:45:15 +03:00
|
|
|
return wbio;
|
|
|
|
}
|
|
|
|
|
2023-03-14 19:56:38 +03:00
|
|
|
void bch2_write_op_to_text(struct printbuf *, struct bch_write_op *);
|
|
|
|
|
2017-06-14 04:06:05 +03:00
|
|
|
struct bch_devs_mask;
|
|
|
|
struct cache_promote_op;
|
2018-11-04 03:11:29 +03:00
|
|
|
struct extent_ptr_decoded;
|
2017-03-20 02:56:34 +03:00
|
|
|
|
2019-08-28 00:36:21 +03:00
|
|
|
int __bch2_read_indirect_extent(struct btree_trans *, unsigned *,
|
2021-01-08 03:49:15 +03:00
|
|
|
struct bkey_buf *);
|
2019-08-28 00:36:21 +03:00
|
|
|
|
|
|
|
static inline int bch2_read_indirect_extent(struct btree_trans *trans,
|
2021-03-17 04:45:21 +03:00
|
|
|
enum btree_id *data_btree,
|
2019-08-28 00:36:21 +03:00
|
|
|
unsigned *offset_into_extent,
|
2021-01-08 03:49:15 +03:00
|
|
|
struct bkey_buf *k)
|
2019-08-28 00:36:21 +03:00
|
|
|
{
|
2021-03-17 04:45:21 +03:00
|
|
|
if (k->k->k.type != KEY_TYPE_reflink_p)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
*data_btree = BTREE_ID_reflink;
|
|
|
|
return __bch2_read_indirect_extent(trans, offset_into_extent, k);
|
2019-08-28 00:36:21 +03:00
|
|
|
}
|
2017-06-14 04:06:05 +03:00
|
|
|
|
|
|
|
enum bch_read_flags {
|
|
|
|
BCH_READ_RETRY_IF_STALE = 1 << 0,
|
|
|
|
BCH_READ_MAY_PROMOTE = 1 << 1,
|
|
|
|
BCH_READ_USER_MAPPED = 1 << 2,
|
2017-12-14 00:01:18 +03:00
|
|
|
BCH_READ_NODECODE = 1 << 3,
|
2018-05-04 21:04:31 +03:00
|
|
|
BCH_READ_LAST_FRAGMENT = 1 << 4,
|
2017-06-14 04:06:05 +03:00
|
|
|
|
|
|
|
/* internal: */
|
2018-05-04 21:04:31 +03:00
|
|
|
BCH_READ_MUST_BOUNCE = 1 << 5,
|
|
|
|
BCH_READ_MUST_CLONE = 1 << 6,
|
|
|
|
BCH_READ_IN_RETRY = 1 << 7,
|
2017-06-14 04:06:05 +03:00
|
|
|
};
|
2017-03-20 02:56:34 +03:00
|
|
|
|
2020-10-18 01:16:50 +03:00
|
|
|
int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
|
2021-03-17 04:45:21 +03:00
|
|
|
struct bvec_iter, struct bpos, enum btree_id,
|
|
|
|
struct bkey_s_c, unsigned,
|
2019-08-21 20:17:42 +03:00
|
|
|
struct bch_io_failures *, unsigned);
|
|
|
|
|
2020-10-18 01:16:50 +03:00
|
|
|
static inline void bch2_read_extent(struct btree_trans *trans,
|
2021-03-17 04:45:21 +03:00
|
|
|
struct bch_read_bio *rbio, struct bpos read_pos,
|
|
|
|
enum btree_id data_btree, struct bkey_s_c k,
|
|
|
|
unsigned offset_into_extent, unsigned flags)
|
2017-03-20 02:56:34 +03:00
|
|
|
{
|
2021-03-17 04:45:21 +03:00
|
|
|
__bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
|
|
|
|
data_btree, k, offset_into_extent, NULL, flags);
|
2017-06-14 04:06:05 +03:00
|
|
|
}
|
2017-03-20 02:56:34 +03:00
|
|
|
|
2021-03-17 04:45:21 +03:00
|
|
|
void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
|
2021-09-27 01:19:46 +03:00
|
|
|
subvol_inum, struct bch_io_failures *, unsigned flags);
|
2021-03-17 04:45:21 +03:00
|
|
|
|
|
|
|
static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
|
2021-09-27 01:19:46 +03:00
|
|
|
subvol_inum inum)
|
2021-03-17 04:45:21 +03:00
|
|
|
{
|
|
|
|
struct bch_io_failures failed = { .nr = 0 };
|
|
|
|
|
|
|
|
BUG_ON(rbio->_state);
|
|
|
|
|
|
|
|
rbio->c = c;
|
|
|
|
rbio->start_time = local_clock();
|
2021-09-27 01:19:46 +03:00
|
|
|
rbio->subvol = inum.subvol;
|
2021-03-17 04:45:21 +03:00
|
|
|
|
2021-09-27 01:19:46 +03:00
|
|
|
__bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
|
2021-03-17 04:45:21 +03:00
|
|
|
BCH_READ_RETRY_IF_STALE|
|
|
|
|
BCH_READ_MAY_PROMOTE|
|
|
|
|
BCH_READ_USER_MAPPED);
|
|
|
|
}
|
2019-08-21 20:17:42 +03:00
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
static inline struct bch_read_bio *rbio_init(struct bio *bio,
|
|
|
|
struct bch_io_opts opts)
|
2017-06-14 04:06:05 +03:00
|
|
|
{
|
|
|
|
struct bch_read_bio *rbio = to_rbio(bio);
|
2017-03-20 02:56:34 +03:00
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
rbio->_state = 0;
|
|
|
|
rbio->promote = NULL;
|
|
|
|
rbio->opts = opts;
|
2017-06-14 04:06:05 +03:00
|
|
|
return rbio;
|
|
|
|
}
|
2017-03-20 02:56:34 +03:00
|
|
|
|
2018-05-04 21:04:31 +03:00
|
|
|
void bch2_fs_io_exit(struct bch_fs *);
|
|
|
|
int bch2_fs_io_init(struct bch_fs *);
|
|
|
|
|
2017-10-06 01:41:44 +03:00
|
|
|
#endif /* _BCACHEFS_IO_H */
|