Update bcachefs sources to fad6d13aa5 fixup! bcachefs: Add persistent counters

This commit is contained in:
Kent Overstreet 2022-05-30 22:36:00 -04:00
parent 962390c0b2
commit 46b2c553aa
29 changed files with 200 additions and 223 deletions

View File

@ -1 +1 @@
969fbff4ef3a75ae25ef7cca17dd4e028443bfc2 fad6d13aa55f96e01cc6ff516cdfea53b2fc9eb1

View File

@ -294,11 +294,11 @@ static void print_node_ondisk(struct bch_fs *c, struct btree *b)
n_ondisk = malloc(btree_bytes(c)); n_ondisk = malloc(btree_bytes(c));
bio = bio_alloc_bioset(GFP_NOIO, bio = bio_alloc_bioset(ca->disk_sb.bdev,
buf_pages(n_ondisk, btree_bytes(c)), buf_pages(n_ondisk, btree_bytes(c)),
&c->btree_bio); REQ_OP_READ|REQ_META,
bio_set_dev(bio, ca->disk_sb.bdev); GFP_NOIO,
bio->bi_opf = REQ_OP_READ|REQ_META; &c->btree_bio);
bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_iter.bi_sector = pick.ptr.offset;
bch2_bio_map(bio, n_ondisk, btree_bytes(c)); bch2_bio_map(bio, n_ondisk, btree_bytes(c));

View File

@ -257,7 +257,7 @@ static void write_data(struct bch_fs *c,
closure_init_stack(&cl); closure_init_stack(&cl);
bio_init(&op.wbio.bio, bv, ARRAY_SIZE(bv)); bio_init(&op.wbio.bio, NULL, bv, ARRAY_SIZE(bv), 0);
bch2_bio_map(&op.wbio.bio, buf, len); bch2_bio_map(&op.wbio.bio, buf, len);
bch2_write_op_init(&op, c, bch2_opts_to_inode_opts(c->opts)); bch2_write_op_init(&op, c, bch2_opts_to_inode_opts(c->opts));

View File

@ -88,17 +88,19 @@ int cmd_set_option(int argc, char *argv[])
bch2_fs_stop(c); bch2_fs_stop(c);
return ret; return ret;
online: online:
unsigned dev_idx; {
struct bchfs_handle fs = bchu_fs_open_by_dev(argv[i], &dev_idx); unsigned dev_idx;
struct bchfs_handle fs = bchu_fs_open_by_dev(argv[i], &dev_idx);
for (i = 0; i < bch2_opts_nr; i++) { for (i = 0; i < bch2_opts_nr; i++) {
if (!new_opt_strs.by_id[i]) if (!new_opt_strs.by_id[i])
continue; continue;
char *path = mprintf("options/%s", bch2_opt_table[i].attr.name); char *path = mprintf("options/%s", bch2_opt_table[i].attr.name);
write_file_str(fs.sysfs_fd, path, new_opt_strs.by_id[i]); write_file_str(fs.sysfs_fd, path, new_opt_strs.by_id[i]);
free(path); free(path);
}
} }
return 0; return 0;
} }

View File

@ -233,28 +233,22 @@ enum {
BIOSET_NEED_RESCUER = 1 << 1, BIOSET_NEED_RESCUER = 1 << 1,
}; };
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); struct bio *bio_alloc_bioset(struct block_device *, unsigned,
unsigned, gfp_t, struct bio_set *);
extern void bio_put(struct bio *); extern void bio_put(struct bio *);
int bio_add_page(struct bio *, struct page *, unsigned, unsigned); int bio_add_page(struct bio *, struct page *, unsigned, unsigned);
extern void __bio_clone_fast(struct bio *, struct bio *); struct bio *bio_alloc_clone(struct block_device *, struct bio *,
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *); gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
struct bio *bio_kmalloc(gfp_t, unsigned int); struct bio *bio_kmalloc(gfp_t, unsigned int);
static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
{
return bio_clone_bioset(bio, gfp_mask, NULL);
}
extern void bio_endio(struct bio *); extern void bio_endio(struct bio *);
extern void bio_advance(struct bio *, unsigned); extern void bio_advance(struct bio *, unsigned);
extern void bio_reset(struct bio *); extern void bio_reset(struct bio *, struct block_device *, unsigned);
void bio_chain(struct bio *, struct bio *); void bio_chain(struct bio *, struct bio *);
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter, extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
@ -421,20 +415,15 @@ static inline void bio_inc_remaining(struct bio *bio)
atomic_inc(&bio->__bi_remaining); atomic_inc(&bio->__bi_remaining);
} }
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs) static inline void bio_init(struct bio *bio,
{ struct block_device *bdev,
return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL); struct bio_vec *table,
} unsigned short max_vecs,
unsigned int opf)
static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
{
return bio_clone_bioset(bio, gfp_mask, NULL);
}
static inline void bio_init(struct bio *bio, struct bio_vec *table,
unsigned short max_vecs)
{ {
memset(bio, 0, sizeof(*bio)); memset(bio, 0, sizeof(*bio));
bio->bi_bdev = bdev;
bio->bi_opf = opf;
atomic_set(&bio->__bi_remaining, 1); atomic_set(&bio->__bi_remaining, 1);
atomic_set(&bio->__bi_cnt, 1); atomic_set(&bio->__bi_cnt, 1);

View File

@ -29,7 +29,7 @@ struct kset;
struct kobj_type { struct kobj_type {
void (*release)(struct kobject *kobj); void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops; const struct sysfs_ops *sysfs_ops;
struct attribute **default_attrs; const struct attribute_group **default_groups;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj); const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj); const void *(*namespace)(struct kobject *kobj);
}; };
@ -48,7 +48,7 @@ struct kobj_attribute {
struct kobject { struct kobject {
struct kobject *parent; struct kobject *parent;
struct kset *kset; struct kset *kset;
struct kobj_type *ktype; const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */ struct kernfs_node *sd; /* sysfs directory entry */
atomic_t ref; atomic_t ref;
unsigned int state_initialized:1; unsigned int state_initialized:1;
@ -64,7 +64,7 @@ struct kset {
#define kobject_add(...) 0 #define kobject_add(...) 0
static inline void kobject_init(struct kobject *kobj, struct kobj_type *ktype) static inline void kobject_init(struct kobject *kobj, const struct kobj_type *ktype)
{ {
memset(kobj, 0, sizeof(*kobj)); memset(kobj, 0, sizeof(*kobj));
@ -77,7 +77,7 @@ static inline void kobject_del(struct kobject *kobj);
static inline void kobject_cleanup(struct kobject *kobj) static inline void kobject_cleanup(struct kobject *kobj)
{ {
struct kobj_type *t = kobj->ktype; const struct kobj_type *t = kobj->ktype;
/* remove from sysfs if the caller did not do it */ /* remove from sysfs if the caller did not do it */
if (kobj->state_in_sysfs) if (kobj->state_in_sysfs)

View File

@ -10,6 +10,10 @@ struct attribute {
umode_t mode; umode_t mode;
}; };
struct attribute_group {
struct attribute **attrs;
};
struct sysfs_ops { struct sysfs_ops {
ssize_t (*show)(struct kobject *, struct attribute *, char *); ssize_t (*show)(struct kobject *, struct attribute *, char *);
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t); ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);

View File

@ -639,8 +639,8 @@ union bch_extent_entry {
struct bch_btree_ptr { struct bch_btree_ptr {
struct bch_val v; struct bch_val v;
struct bch_extent_ptr start[0];
__u64 _data[0]; __u64 _data[0];
struct bch_extent_ptr start[];
} __attribute__((packed, aligned(8))); } __attribute__((packed, aligned(8)));
struct bch_btree_ptr_v2 { struct bch_btree_ptr_v2 {
@ -651,8 +651,8 @@ struct bch_btree_ptr_v2 {
__le16 sectors_written; __le16 sectors_written;
__le16 flags; __le16 flags;
struct bpos min_key; struct bpos min_key;
struct bch_extent_ptr start[0];
__u64 _data[0]; __u64 _data[0];
struct bch_extent_ptr start[];
} __attribute__((packed, aligned(8))); } __attribute__((packed, aligned(8)));
LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1); LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
@ -660,8 +660,8 @@ LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
struct bch_extent { struct bch_extent {
struct bch_val v; struct bch_val v;
union bch_extent_entry start[0];
__u64 _data[0]; __u64 _data[0];
union bch_extent_entry start[];
} __attribute__((packed, aligned(8))); } __attribute__((packed, aligned(8)));
struct bch_reservation { struct bch_reservation {
@ -982,7 +982,7 @@ struct bch_stripe {
__u8 csum_type; __u8 csum_type;
__u8 pad; __u8 pad;
struct bch_extent_ptr ptrs[0]; struct bch_extent_ptr ptrs[];
} __attribute__((packed, aligned(8))); } __attribute__((packed, aligned(8)));
/* Reflink: */ /* Reflink: */
@ -1262,19 +1262,19 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
struct bch_replicas_entry_v0 { struct bch_replicas_entry_v0 {
__u8 data_type; __u8 data_type;
__u8 nr_devs; __u8 nr_devs;
__u8 devs[0]; __u8 devs[];
} __attribute__((packed)); } __attribute__((packed));
struct bch_sb_field_replicas_v0 { struct bch_sb_field_replicas_v0 {
struct bch_sb_field field; struct bch_sb_field field;
struct bch_replicas_entry_v0 entries[0]; struct bch_replicas_entry_v0 entries[];
} __attribute__((packed, aligned(8))); } __attribute__((packed, aligned(8)));
struct bch_replicas_entry { struct bch_replicas_entry {
__u8 data_type; __u8 data_type;
__u8 nr_devs; __u8 nr_devs;
__u8 nr_required; __u8 nr_required;
__u8 devs[0]; __u8 devs[];
} __attribute__((packed)); } __attribute__((packed));
#define replicas_entry_bytes(_i) \ #define replicas_entry_bytes(_i) \

View File

@ -202,9 +202,10 @@ static bool bch2_bkey_transform_key(const struct bkey_format *out_f,
{ {
struct pack_state out_s = pack_state_init(out_f, out); struct pack_state out_s = pack_state_init(out_f, out);
struct unpack_state in_s = unpack_state_init(in_f, in); struct unpack_state in_s = unpack_state_init(in_f, in);
u64 *w = out->_data;
unsigned i; unsigned i;
out->_data[0] = 0; *w = 0;
for (i = 0; i < BKEY_NR_FIELDS; i++) for (i = 0; i < BKEY_NR_FIELDS; i++)
if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i))) if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i)))
@ -293,12 +294,13 @@ bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
const struct bkey_format *format) const struct bkey_format *format)
{ {
struct pack_state state = pack_state_init(format, out); struct pack_state state = pack_state_init(format, out);
u64 *w = out->_data;
EBUG_ON((void *) in == (void *) out); EBUG_ON((void *) in == (void *) out);
EBUG_ON(format->nr_fields != BKEY_NR_FIELDS); EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
EBUG_ON(in->format != KEY_FORMAT_CURRENT); EBUG_ON(in->format != KEY_FORMAT_CURRENT);
out->_data[0] = 0; *w = 0;
#define x(id, field) if (!set_inc_field(&state, id, in->field)) return false; #define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
bkey_fields() bkey_fields()
@ -440,6 +442,7 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
{ {
const struct bkey_format *f = &b->format; const struct bkey_format *f = &b->format;
struct pack_state state = pack_state_init(f, out); struct pack_state state = pack_state_init(f, out);
u64 *w = out->_data;
#ifdef CONFIG_BCACHEFS_DEBUG #ifdef CONFIG_BCACHEFS_DEBUG
struct bpos orig = in; struct bpos orig = in;
#endif #endif
@ -452,7 +455,7 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
* enough - we need to make sure to zero them out: * enough - we need to make sure to zero them out:
*/ */
for (i = 0; i < f->key_u64s; i++) for (i = 0; i < f->key_u64s; i++)
out->_data[i] = 0; w[i] = 0;
if (unlikely(in.snapshot < if (unlikely(in.snapshot <
le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) { le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {

View File

@ -1155,8 +1155,7 @@ static void btree_node_read_work(struct work_struct *work)
bch_info(c, "retrying read"); bch_info(c, "retrying read");
ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev); ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
rb->have_ioref = bch2_dev_get_ioref(ca, READ); rb->have_ioref = bch2_dev_get_ioref(ca, READ);
bio_reset(bio); bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
bio->bi_iter.bi_sector = rb->pick.ptr.offset; bio->bi_iter.bi_sector = rb->pick.ptr.offset;
bio->bi_iter.bi_size = btree_bytes(c); bio->bi_iter.bi_size = btree_bytes(c);
@ -1434,8 +1433,10 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
for (i = 0; i < ra->nr; i++) { for (i = 0; i < ra->nr; i++) {
ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS); ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
ra->bio[i] = bio_alloc_bioset(GFP_NOFS, buf_pages(ra->buf[i], ra->bio[i] = bio_alloc_bioset(NULL,
btree_bytes(c)), buf_pages(ra->buf[i], btree_bytes(c)),
REQ_OP_READ|REQ_SYNC|REQ_META,
GFP_NOFS,
&c->btree_bio); &c->btree_bio);
} }
@ -1451,7 +1452,6 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
rb->have_ioref = bch2_dev_get_ioref(ca, READ); rb->have_ioref = bch2_dev_get_ioref(ca, READ);
rb->idx = i; rb->idx = i;
rb->pick = pick; rb->pick = pick;
rb->bio.bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
rb->bio.bi_iter.bi_sector = pick.ptr.offset; rb->bio.bi_iter.bi_sector = pick.ptr.offset;
rb->bio.bi_end_io = btree_node_read_all_replicas_endio; rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c)); bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
@ -1509,8 +1509,10 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
ca = bch_dev_bkey_exists(c, pick.ptr.dev); ca = bch_dev_bkey_exists(c, pick.ptr.dev);
bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data, bio = bio_alloc_bioset(NULL,
btree_bytes(c)), buf_pages(b->data, btree_bytes(c)),
REQ_OP_READ|REQ_SYNC|REQ_META,
GFP_NOIO,
&c->btree_bio); &c->btree_bio);
rb = container_of(bio, struct btree_read_bio, bio); rb = container_of(bio, struct btree_read_bio, bio);
rb->c = c; rb->c = c;
@ -1520,7 +1522,6 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
rb->have_ioref = bch2_dev_get_ioref(ca, READ); rb->have_ioref = bch2_dev_get_ioref(ca, READ);
rb->pick = pick; rb->pick = pick;
INIT_WORK(&rb->work, btree_node_read_work); INIT_WORK(&rb->work, btree_node_read_work);
bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_iter.bi_sector = pick.ptr.offset;
bio->bi_end_io = btree_node_read_endio; bio->bi_end_io = btree_node_read_endio;
bch2_bio_map(bio, b->data, btree_bytes(c)); bch2_bio_map(bio, b->data, btree_bytes(c));
@ -1974,8 +1975,10 @@ do_write:
trace_btree_write(b, bytes_to_write, sectors_to_write); trace_btree_write(b, bytes_to_write, sectors_to_write);
wbio = container_of(bio_alloc_bioset(GFP_NOIO, wbio = container_of(bio_alloc_bioset(NULL,
buf_pages(data, sectors_to_write << 9), buf_pages(data, sectors_to_write << 9),
REQ_OP_WRITE|REQ_META,
GFP_NOIO,
&c->btree_bio), &c->btree_bio),
struct btree_write_bio, wbio.bio); struct btree_write_bio, wbio.bio);
wbio_init(&wbio->wbio.bio); wbio_init(&wbio->wbio.bio);
@ -1985,7 +1988,6 @@ do_write:
wbio->wbio.c = c; wbio->wbio.c = c;
wbio->wbio.used_mempool = used_mempool; wbio->wbio.used_mempool = used_mempool;
wbio->wbio.first_btree_write = !b->written; wbio->wbio.first_btree_write = !b->written;
wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
wbio->wbio.bio.bi_end_io = btree_node_write_endio; wbio->wbio.bio.bi_end_io = btree_node_write_endio;
wbio->wbio.bio.bi_private = b; wbio->wbio.bio.bi_private = b;

View File

@ -1672,10 +1672,11 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst, static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
struct btree_path *src) struct btree_path *src)
{ {
unsigned i; unsigned i, offset = offsetof(struct btree_path, pos);
memcpy(&dst->pos, &src->pos, memcpy((void *) dst + offset,
sizeof(struct btree_path) - offsetof(struct btree_path, pos)); (void *) src + offset,
sizeof(struct btree_path) - offset);
for (i = 0; i < BTREE_MAX_DEPTH; i++) for (i = 0; i < BTREE_MAX_DEPTH; i++)
if (btree_node_locked(dst, i)) if (btree_node_locked(dst, i))
@ -3197,23 +3198,16 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
*/ */
void bch2_trans_begin(struct btree_trans *trans) void bch2_trans_begin(struct btree_trans *trans)
{ {
struct btree_insert_entry *i;
struct btree_path *path; struct btree_path *path;
trans_for_each_update(trans, i) bch2_trans_reset_updates(trans);
__btree_path_put(i->path, true);
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
trans->extra_journal_res = 0;
trans->nr_updates = 0;
trans->mem_top = 0; trans->mem_top = 0;
trans->hooks = NULL;
trans->extra_journal_entries.nr = 0;
if (trans->fs_usage_deltas) { if (trans->fs_usage_deltas) {
trans->fs_usage_deltas->used = 0; trans->fs_usage_deltas->used = 0;
memset(&trans->fs_usage_deltas->memset_start, 0, memset((void *) trans->fs_usage_deltas +
offsetof(struct replicas_delta_list, memset_start), 0,
(void *) &trans->fs_usage_deltas->memset_end - (void *) &trans->fs_usage_deltas->memset_end -
(void *) &trans->fs_usage_deltas->memset_start); (void *) &trans->fs_usage_deltas->memset_start);
} }

View File

@ -140,4 +140,17 @@ static inline int bch2_trans_commit(struct btree_trans *trans,
(_i) < (_trans)->updates + (_trans)->nr_updates; \ (_i) < (_trans)->updates + (_trans)->nr_updates; \
(_i)++) (_i)++)
static inline void bch2_trans_reset_updates(struct btree_trans *trans)
{
struct btree_insert_entry *i;
trans_for_each_update(trans, i)
bch2_path_put(trans, i->path, true);
trans->extra_journal_res = 0;
trans->nr_updates = 0;
trans->hooks = NULL;
trans->extra_journal_entries.nr = 0;
}
#endif /* _BCACHEFS_BTREE_UPDATE_H */ #endif /* _BCACHEFS_BTREE_UPDATE_H */

View File

@ -1166,17 +1166,12 @@ out:
if (likely(!(trans->flags & BTREE_INSERT_NOCHECK_RW))) if (likely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
percpu_ref_put(&c->writes); percpu_ref_put(&c->writes);
out_reset: out_reset:
trans_for_each_update(trans, i) bch2_trans_reset_updates(trans);
bch2_path_put(trans, i->path, true);
trans->extra_journal_res = 0;
trans->nr_updates = 0;
trans->hooks = NULL;
trans->extra_journal_entries.nr = 0;
if (trans->fs_usage_deltas) { if (trans->fs_usage_deltas) {
trans->fs_usage_deltas->used = 0; trans->fs_usage_deltas->used = 0;
memset(&trans->fs_usage_deltas->memset_start, 0, memset((void *) trans->fs_usage_deltas +
offsetof(struct replicas_delta_list, memset_start), 0,
(void *) &trans->fs_usage_deltas->memset_end - (void *) &trans->fs_usage_deltas->memset_end -
(void *) &trans->fs_usage_deltas->memset_start); (void *) &trans->fs_usage_deltas->memset_start);
} }

View File

@ -466,7 +466,8 @@ static inline void update_replicas_list(struct btree_trans *trans,
n = (void *) d->d + d->used; n = (void *) d->d + d->used;
n->delta = sectors; n->delta = sectors;
memcpy(&n->r, r, replicas_entry_bytes(r)); memcpy((void *) n + offsetof(struct replicas_delta, r),
r, replicas_entry_bytes(r));
bch2_replicas_entry_sort(&n->r); bch2_replicas_entry_sort(&n->r);
d->used += b; d->used += b;
} }

View File

@ -87,18 +87,18 @@ int bch2_sb_counters_from_cpu(struct bch_fs *c)
return 0; return 0;
} }
void bch2_fs_counters_exit(struct bch_fs *c)
{
free_percpu(c->counters);
}
int bch2_fs_counters_init(struct bch_fs *c) int bch2_fs_counters_init(struct bch_fs *c)
{ {
int ret = 0;
c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64)); c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64));
if (!c->counters) if (!c->counters)
return -ENOMEM; return -ENOMEM;
ret = bch2_sb_counters_to_cpu(c); return bch2_sb_counters_to_cpu(c);
return ret;
} }
const struct bch_sb_field_ops bch_sb_field_ops_counters = { const struct bch_sb_field_ops bch_sb_field_ops_counters = {

View File

@ -6,11 +6,11 @@
#include "super-io.h" #include "super-io.h"
int bch2_sb_counters_to_cpu(struct bch_fs *c); int bch2_sb_counters_to_cpu(struct bch_fs *);
int bch2_sb_counters_from_cpu(struct bch_fs *);
int bch2_sb_counters_from_cpu(struct bch_fs *c); void bch2_fs_counters_exit(struct bch_fs *);
int bch2_fs_counters_init(struct bch_fs *);
int bch2_fs_counters_init(struct bch_fs *c);
extern const struct bch_sb_field_ops bch_sb_field_ops_counters; extern const struct bch_sb_field_ops bch_sb_field_ops_counters;

View File

@ -43,11 +43,11 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
if (!bch2_dev_get_ioref(ca, READ)) if (!bch2_dev_get_ioref(ca, READ))
return false; return false;
bio = bio_alloc_bioset(GFP_NOIO, bio = bio_alloc_bioset(ca->disk_sb.bdev,
buf_pages(n_sorted, btree_bytes(c)), buf_pages(n_sorted, btree_bytes(c)),
&c->btree_bio); REQ_OP_READ|REQ_META,
bio_set_dev(bio, ca->disk_sb.bdev); GFP_NOIO,
bio->bi_opf = REQ_OP_READ|REQ_META; &c->btree_bio);
bio->bi_iter.bi_sector = pick.ptr.offset; bio->bi_iter.bi_sector = pick.ptr.offset;
bch2_bio_map(bio, n_sorted, btree_bytes(c)); bch2_bio_map(bio, n_sorted, btree_bytes(c));

View File

@ -412,7 +412,10 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
nr_iovecs << PAGE_SHIFT); nr_iovecs << PAGE_SHIFT);
struct ec_bio *ec_bio; struct ec_bio *ec_bio;
ec_bio = container_of(bio_alloc_bioset(GFP_KERNEL, nr_iovecs, ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
nr_iovecs,
rw,
GFP_KERNEL,
&c->ec_bioset), &c->ec_bioset),
struct ec_bio, bio); struct ec_bio, bio);
@ -420,9 +423,6 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
ec_bio->buf = buf; ec_bio->buf = buf;
ec_bio->idx = idx; ec_bio->idx = idx;
bio_set_dev(&ec_bio->bio, ca->disk_sb.bdev);
bio_set_op_attrs(&ec_bio->bio, rw, 0);
ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9); ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
ec_bio->bio.bi_end_io = ec_block_endio; ec_bio->bio.bi_end_io = ec_block_endio;
ec_bio->bio.bi_private = cl; ec_bio->bio.bi_private = cl;

View File

@ -842,13 +842,12 @@ out:
return ret; return ret;
} }
void bch2_invalidatepage(struct page *page, unsigned int offset, void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
unsigned int length)
{ {
if (offset || length < PAGE_SIZE) if (offset || length < folio_size(folio))
return; return;
bch2_clear_page_bits(page); bch2_clear_page_bits(&folio->page);
} }
int bch2_releasepage(struct page *page, gfp_t gfp_mask) int bch2_releasepage(struct page *page, gfp_t gfp_mask)
@ -1139,12 +1138,12 @@ void bch2_readahead(struct readahead_control *ractl)
readpages_iter.idx, readpages_iter.idx,
BIO_MAX_VECS); BIO_MAX_VECS);
struct bch_read_bio *rbio = struct bch_read_bio *rbio =
rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read), rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
GFP_NOFS, &c->bio_read),
opts); opts);
readpages_iter.idx++; readpages_iter.idx++;
bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT; rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
rbio->bio.bi_end_io = bch2_readpages_end_io; rbio->bio.bi_end_io = bch2_readpages_end_io;
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0)); BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
@ -1183,7 +1182,7 @@ int bch2_readpage(struct file *file, struct page *page)
struct bch_io_opts opts = io_opts(c, &inode->ei_inode); struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
struct bch_read_bio *rbio; struct bch_read_bio *rbio;
rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts); rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read), opts);
rbio->bio.bi_end_io = bch2_readpages_end_io; rbio->bio.bi_end_io = bch2_readpages_end_io;
__bchfs_readpage(c, rbio, inode_inum(inode), page); __bchfs_readpage(c, rbio, inode_inum(inode), page);
@ -1204,7 +1203,7 @@ static int bch2_read_single_page(struct page *page,
int ret; int ret;
DECLARE_COMPLETION_ONSTACK(done); DECLARE_COMPLETION_ONSTACK(done);
rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
io_opts(c, &inode->ei_inode)); io_opts(c, &inode->ei_inode));
rbio->bio.bi_private = &done; rbio->bio.bi_private = &done;
rbio->bio.bi_end_io = bch2_read_single_page_end_io; rbio->bio.bi_end_io = bch2_read_single_page_end_io;
@ -1339,7 +1338,9 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
{ {
struct bch_write_op *op; struct bch_write_op *op;
w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS, w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
REQ_OP_WRITE,
GFP_NOFS,
&c->writepage_bioset), &c->writepage_bioset),
struct bch_writepage_io, op.wbio.bio); struct bch_writepage_io, op.wbio.bio);
@ -1916,8 +1917,10 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c)); shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
iter->count -= shorten; iter->count -= shorten;
bio = bio_alloc_bioset(GFP_KERNEL, bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS), bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
REQ_OP_READ,
GFP_KERNEL,
&c->dio_read_bioset); &c->dio_read_bioset);
bio->bi_end_io = bch2_direct_IO_read_endio; bio->bi_end_io = bch2_direct_IO_read_endio;
@ -1951,8 +1954,10 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
goto start; goto start;
while (iter->count) { while (iter->count) {
bio = bio_alloc_bioset(GFP_KERNEL, bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS), bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
REQ_OP_READ,
GFP_KERNEL,
&c->bio_read); &c->bio_read);
bio->bi_end_io = bch2_direct_IO_read_split_endio; bio->bi_end_io = bch2_direct_IO_read_split_endio;
start: start:
@ -2220,7 +2225,7 @@ loop:
if (!dio->iter.count) if (!dio->iter.count)
break; break;
bio_reset(bio); bio_reset(bio, NULL, REQ_OP_WRITE);
reinit_completion(&dio->done); reinit_completion(&dio->done);
} }
@ -2301,8 +2306,10 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
locked = false; locked = false;
} }
bio = bio_alloc_bioset(GFP_KERNEL, bio = bio_alloc_bioset(NULL,
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS), bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
REQ_OP_WRITE,
GFP_KERNEL,
&c->dio_write_bioset); &c->dio_write_bioset);
dio = container_of(bio, struct dio_write, op.wbio.bio); dio = container_of(bio, struct dio_write, op.wbio.bio);
init_completion(&dio->done); init_completion(&dio->done);

View File

@ -41,7 +41,7 @@ loff_t bch2_llseek(struct file *, loff_t, int);
vm_fault_t bch2_page_fault(struct vm_fault *); vm_fault_t bch2_page_fault(struct vm_fault *);
vm_fault_t bch2_page_mkwrite(struct vm_fault *); vm_fault_t bch2_page_mkwrite(struct vm_fault *);
void bch2_invalidatepage(struct page *, unsigned int, unsigned int); void bch2_invalidate_folio(struct folio *, size_t, size_t);
int bch2_releasepage(struct page *, gfp_t); int bch2_releasepage(struct page *, gfp_t);
int bch2_migrate_page(struct address_space *, struct page *, int bch2_migrate_page(struct address_space *, struct page *,
struct page *, enum migrate_mode); struct page *, enum migrate_mode);

View File

@ -1115,10 +1115,10 @@ static const struct address_space_operations bch_address_space_operations = {
.readpage = bch2_readpage, .readpage = bch2_readpage,
.writepages = bch2_writepages, .writepages = bch2_writepages,
.readahead = bch2_readahead, .readahead = bch2_readahead,
.set_page_dirty = __set_page_dirty_nobuffers, .dirty_folio = filemap_dirty_folio,
.write_begin = bch2_write_begin, .write_begin = bch2_write_begin,
.write_end = bch2_write_end, .write_end = bch2_write_end,
.invalidatepage = bch2_invalidatepage, .invalidate_folio = bch2_invalidate_folio,
.releasepage = bch2_releasepage, .releasepage = bch2_releasepage,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION

View File

@ -141,9 +141,9 @@ static noinline int bch2_inode_unpack_v1(struct bkey_s_c_inode inode,
#define x(_name, _bits) \ #define x(_name, _bits) \
if (fieldnr++ == INODE_NR_FIELDS(inode.v)) { \ if (fieldnr++ == INODE_NR_FIELDS(inode.v)) { \
memset(&unpacked->_name, 0, \ unsigned offset = offsetof(struct bch_inode_unpacked, _name);\
sizeof(*unpacked) - \ memset((void *) unpacked + offset, 0, \
offsetof(struct bch_inode_unpacked, _name)); \ sizeof(*unpacked) - offset); \
return 0; \ return 0; \
} \ } \
\ \

View File

@ -470,8 +470,8 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
ca = bch_dev_bkey_exists(c, ptr->dev); ca = bch_dev_bkey_exists(c, ptr->dev);
if (to_entry(ptr + 1) < ptrs.end) { if (to_entry(ptr + 1) < ptrs.end) {
n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO, n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
&ca->replica_set)); GFP_NOIO, &ca->replica_set));
n->bio.bi_end_io = wbio->bio.bi_end_io; n->bio.bi_end_io = wbio->bio.bi_end_io;
n->bio.bi_private = wbio->bio.bi_private; n->bio.bi_private = wbio->bio.bi_private;
@ -701,7 +701,8 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
pages = min(pages, BIO_MAX_VECS); pages = min(pages, BIO_MAX_VECS);
bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write); bio = bio_alloc_bioset(NULL, pages, 0,
GFP_NOIO, &c->bio_write);
wbio = wbio_init(bio); wbio = wbio_init(bio);
wbio->put_bio = true; wbio->put_bio = true;
/* copy WRITE_SYNC flag */ /* copy WRITE_SYNC flag */
@ -1442,7 +1443,7 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
goto err; goto err;
rbio_init(&(*rbio)->bio, opts); rbio_init(&(*rbio)->bio, opts);
bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs, pages); bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
GFP_NOIO)) GFP_NOIO))
@ -1457,7 +1458,7 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
goto err; goto err;
bio = &op->write.op.wbio.bio; bio = &op->write.op.wbio.bio;
bio_init(bio, bio->bi_inline_vecs, pages); bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
ret = bch2_migrate_write_init(c, &op->write, ret = bch2_migrate_write_init(c, &op->write,
writepoint_hashed((unsigned long) current), writepoint_hashed((unsigned long) current),
@ -2139,8 +2140,10 @@ get_bio:
} else if (bounce) { } else if (bounce) {
unsigned sectors = pick.crc.compressed_size; unsigned sectors = pick.crc.compressed_size;
rbio = rbio_init(bio_alloc_bioset(GFP_NOIO, rbio = rbio_init(bio_alloc_bioset(NULL,
DIV_ROUND_UP(sectors, PAGE_SECTORS), DIV_ROUND_UP(sectors, PAGE_SECTORS),
0,
GFP_NOIO,
&c->bio_read_split), &c->bio_read_split),
orig->opts); orig->opts);
@ -2156,8 +2159,8 @@ get_bio:
* from the whole bio, in which case we don't want to retry and * from the whole bio, in which case we don't want to retry and
* lose the error) * lose the error)
*/ */
rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO, rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO,
&c->bio_read_split), &c->bio_read_split),
orig->opts); orig->opts);
rbio->bio.bi_iter = iter; rbio->bio.bi_iter = iter;
rbio->split = true; rbio->split = true;

View File

@ -1523,12 +1523,10 @@ static void do_journal_write(struct closure *cl)
sectors); sectors);
bio = ca->journal.bio; bio = ca->journal.bio;
bio_reset(bio); bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
bio_set_dev(bio, ca->disk_sb.bdev);
bio->bi_iter.bi_sector = ptr->offset; bio->bi_iter.bi_sector = ptr->offset;
bio->bi_end_io = journal_write_endio; bio->bi_end_io = journal_write_endio;
bio->bi_private = ca; bio->bi_private = ca;
bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector); BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
ca->prev_journal_sector = bio->bi_iter.bi_sector; ca->prev_journal_sector = bio->bi_iter.bi_sector;
@ -1706,9 +1704,7 @@ retry_alloc:
percpu_ref_get(&ca->io_ref); percpu_ref_get(&ca->io_ref);
bio = ca->journal.bio; bio = ca->journal.bio;
bio_reset(bio); bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
bio_set_dev(bio, ca->disk_sb.bdev);
bio->bi_opf = REQ_OP_FLUSH;
bio->bi_end_io = journal_write_endio; bio->bi_end_io = journal_write_endio;
bio->bi_private = ca; bio->bi_private = ca;
closure_bio_submit(bio, cl); closure_bio_submit(bio, cl);

View File

@ -548,7 +548,7 @@ static int bch2_move_extent(struct btree_trans *trans,
io->read_sectors = k.k->size; io->read_sectors = k.k->size;
io->write_sectors = k.k->size; io->write_sectors = k.k->size;
bio_init(&io->write.op.wbio.bio, io->bi_inline_vecs, pages); bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0);
bio_set_prio(&io->write.op.wbio.bio, bio_set_prio(&io->write.op.wbio.bio,
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
@ -558,7 +558,7 @@ static int bch2_move_extent(struct btree_trans *trans,
io->rbio.c = c; io->rbio.c = c;
io->rbio.opts = io_opts; io->rbio.opts = io_opts;
bio_init(&io->rbio.bio, io->bi_inline_vecs, pages); bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0);
io->rbio.bio.bi_vcnt = pages; io->rbio.bio.bi_vcnt = pages;
bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
io->rbio.bio.bi_iter.bi_size = sectors << 9; io->rbio.bio.bi_iter.bi_size = sectors << 9;

View File

@ -528,10 +528,8 @@ static int read_one_super(struct bch_sb_handle *sb, u64 offset, struct printbuf
size_t bytes; size_t bytes;
int ret; int ret;
reread: reread:
bio_reset(sb->bio); bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
bio_set_dev(sb->bio, sb->bdev);
sb->bio->bi_iter.bi_sector = offset; sb->bio->bi_iter.bi_sector = offset;
bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
bch2_bio_map(sb->bio, sb->sb, sb->buffer_size); bch2_bio_map(sb->bio, sb->sb, sb->buffer_size);
ret = submit_bio_wait(sb->bio); ret = submit_bio_wait(sb->bio);
@ -659,10 +657,8 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
* Error reading primary superblock - read location of backup * Error reading primary superblock - read location of backup
* superblocks: * superblocks:
*/ */
bio_reset(sb->bio); bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
bio_set_dev(sb->bio, sb->bdev);
sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR; sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
/* /*
* use sb buffer to read layout, since sb buffer is page aligned but * use sb buffer to read layout, since sb buffer is page aligned but
* layout won't be: * layout won't be:
@ -746,12 +742,10 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
struct bch_sb *sb = ca->disk_sb.sb; struct bch_sb *sb = ca->disk_sb.sb;
struct bio *bio = ca->disk_sb.bio; struct bio *bio = ca->disk_sb.bio;
bio_reset(bio); bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
bio_set_dev(bio, ca->disk_sb.bdev);
bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]); bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]);
bio->bi_end_io = write_super_endio; bio->bi_end_io = write_super_endio;
bio->bi_private = ca; bio->bi_private = ca;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC|REQ_META);
bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE); bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb],
@ -772,12 +766,10 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb), sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
null_nonce(), sb); null_nonce(), sb);
bio_reset(bio); bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
bio_set_dev(bio, ca->disk_sb.bdev);
bio->bi_iter.bi_sector = le64_to_cpu(sb->offset); bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
bio->bi_end_io = write_super_endio; bio->bi_end_io = write_super_endio;
bio->bi_private = ca; bio->bi_private = ca;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
bch2_bio_map(bio, sb, bch2_bio_map(bio, sb,
roundup((size_t) vstruct_bytes(sb), roundup((size_t) vstruct_bytes(sb),
bdev_logical_block_size(ca->disk_sb.bdev))); bdev_logical_block_size(ca->disk_sb.bdev)));

View File

@ -50,7 +50,6 @@
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/genhd.h>
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/percpu.h> #include <linux/percpu.h>
@ -64,10 +63,19 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>"); MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
#define KTYPE(type) \ #define KTYPE(type) \
struct kobj_type type ## _ktype = { \ static const struct attribute_group type ## _group = { \
.attrs = type ## _files \
}; \
\
static const struct attribute_group *type ## _groups[] = { \
&type ## _group, \
NULL \
}; \
\
static const struct kobj_type type ## _ktype = { \
.release = type ## _release, \ .release = type ## _release, \
.sysfs_ops = &type ## _sysfs_ops, \ .sysfs_ops = &type ## _sysfs_ops, \
.default_attrs = type ## _files \ .default_groups = type ## _groups \
} }
static void bch2_fs_release(struct kobject *); static void bch2_fs_release(struct kobject *);
@ -88,12 +96,12 @@ static void bch2_fs_time_stats_release(struct kobject *k)
{ {
} }
static KTYPE(bch2_fs); KTYPE(bch2_fs);
static KTYPE(bch2_fs_counters); KTYPE(bch2_fs_counters);
static KTYPE(bch2_fs_internal); KTYPE(bch2_fs_internal);
static KTYPE(bch2_fs_opts_dir); KTYPE(bch2_fs_opts_dir);
static KTYPE(bch2_fs_time_stats); KTYPE(bch2_fs_time_stats);
static KTYPE(bch2_dev); KTYPE(bch2_dev);
static struct kset *bcachefs_kset; static struct kset *bcachefs_kset;
static LIST_HEAD(bch_fs_list); static LIST_HEAD(bch_fs_list);
@ -414,6 +422,7 @@ static void __bch2_fs_free(struct bch_fs *c)
for (i = 0; i < BCH_TIME_STAT_NR; i++) for (i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]); bch2_time_stats_exit(&c->times[i]);
bch2_fs_counters_exit(c);
bch2_fs_snapshots_exit(c); bch2_fs_snapshots_exit(c);
bch2_fs_quota_exit(c); bch2_fs_quota_exit(c);
bch2_fs_fsio_exit(c); bch2_fs_fsio_exit(c);
@ -774,7 +783,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?: bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
bch2_fs_btree_iter_init(c) ?: bch2_fs_btree_iter_init(c) ?:
bch2_fs_btree_interior_update_init(c) ?: bch2_fs_btree_interior_update_init(c) ?:
bch2_fs_buckets_waiting_for_journal_init(c); bch2_fs_buckets_waiting_for_journal_init(c) ?:
bch2_fs_subvolumes_init(c) ?: bch2_fs_subvolumes_init(c) ?:
bch2_fs_io_init(c) ?: bch2_fs_io_init(c) ?:
bch2_fs_encryption_init(c) ?: bch2_fs_encryption_init(c) ?:

View File

@ -305,7 +305,7 @@ static inline void pr_indent_pop(struct printbuf *buf, unsigned spaces)
{ {
if (buf->last_newline + buf->indent == buf->pos) { if (buf->last_newline + buf->indent == buf->pos) {
buf->pos -= spaces; buf->pos -= spaces;
buf->buf[buf->pos] = 0; buf->buf[buf->pos] = '\0';
} }
buf->indent -= spaces; buf->indent -= spaces;
} }

View File

@ -120,29 +120,30 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
} }
} }
void __bio_clone_fast(struct bio *bio, struct bio *bio_src) static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
{ {
/*
* most users will be overriding ->bi_bdev with a new target,
* so we don't set nor calculate new physical/hw segment counts here
*/
bio->bi_bdev = bio_src->bi_bdev;
bio_set_flag(bio, BIO_CLONED); bio_set_flag(bio, BIO_CLONED);
bio->bi_opf = bio_src->bi_opf; bio->bi_ioprio = bio_src->bi_ioprio;
bio->bi_iter = bio_src->bi_iter; bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec; return 0;
} }
struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs) struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
gfp_t gfp, struct bio_set *bs)
{ {
struct bio *b; struct bio *bio;
b = bio_alloc_bioset(gfp_mask, 0, bs); bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
if (!b) if (!bio)
return NULL; return NULL;
__bio_clone_fast(b, bio); if (__bio_clone(bio, bio_src, gfp) < 0) {
return b; bio_put(bio);
return NULL;
}
bio->bi_io_vec = bio_src->bi_io_vec;
return bio;
} }
struct bio *bio_split(struct bio *bio, int sectors, struct bio *bio_split(struct bio *bio, int sectors,
@ -153,15 +154,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
BUG_ON(sectors <= 0); BUG_ON(sectors <= 0);
BUG_ON(sectors >= bio_sectors(bio)); BUG_ON(sectors >= bio_sectors(bio));
/* split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
* Discards need a mutable bio_vec to accommodate the payload
* required by the DSM TRIM and UNMAP commands.
*/
if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
split = bio_clone_bioset(bio, gfp, bs);
else
split = bio_clone_fast(bio, gfp, bs);
if (!split) if (!split)
return NULL; return NULL;
@ -289,12 +282,14 @@ again:
bio->bi_end_io(bio); bio->bi_end_io(bio);
} }
void bio_reset(struct bio *bio) void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
{ {
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS); unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
memset(bio, 0, BIO_RESET_BYTES); memset(bio, 0, BIO_RESET_BYTES);
bio->bi_flags = flags; bio->bi_bdev = bdev;
bio->bi_opf = opf;
bio->bi_flags = flags;
atomic_set(&bio->__bi_remaining, 1); atomic_set(&bio->__bi_remaining, 1);
} }
@ -306,7 +301,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
sizeof(struct bio_vec) * nr_iovecs, gfp_mask); sizeof(struct bio_vec) * nr_iovecs, gfp_mask);
if (unlikely(!bio)) if (unlikely(!bio))
return NULL; return NULL;
bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs); bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs, 0);
bio->bi_pool = NULL; bio->bi_pool = NULL;
return bio; return bio;
} }
@ -332,7 +327,11 @@ static struct bio_vec *bvec_alloc(mempool_t *pool, int *nr_vecs,
return mempool_alloc(pool, gfp_mask); return mempool_alloc(pool, gfp_mask);
} }
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) struct bio *bio_alloc_bioset(struct block_device *bdev,
unsigned nr_iovecs,
unsigned opf,
gfp_t gfp_mask,
struct bio_set *bs)
{ {
struct bio *bio; struct bio *bio;
void *p; void *p;
@ -352,11 +351,11 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
if (unlikely(!bvl)) if (unlikely(!bvl))
goto err_free; goto err_free;
bio_init(bio, bvl, nr_iovecs); bio_init(bio, bdev, bvl, nr_iovecs, opf);
} else if (nr_iovecs) { } else if (nr_iovecs) {
bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS); bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
} else { } else {
bio_init(bio, NULL, 0); bio_init(bio, bdev, NULL, 0, opf);
} }
bio->bi_pool = bs; bio->bi_pool = bs;
@ -367,38 +366,6 @@ err_free:
return NULL; return NULL;
} }
struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
struct bio_set *bs)
{
struct bvec_iter iter;
struct bio_vec bv;
struct bio *bio;
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
if (!bio)
return NULL;
bio->bi_bdev = bio_src->bi_bdev;
bio->bi_opf = bio_src->bi_opf;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
switch (bio_op(bio)) {
case REQ_OP_DISCARD:
case REQ_OP_SECURE_ERASE:
break;
case REQ_OP_WRITE_SAME:
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
break;
default:
bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
break;
}
return bio;
}
void bioset_exit(struct bio_set *bs) void bioset_exit(struct bio_set *bs)
{ {
mempool_exit(&bs->bio_pool); mempool_exit(&bs->bio_pool);