mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-02 00:00:03 +03:00
Update bcachefs sources to fad6d13aa5 fixup! bcachefs: Add persistent counters
This commit is contained in:
parent
962390c0b2
commit
46b2c553aa
@ -1 +1 @@
|
||||
969fbff4ef3a75ae25ef7cca17dd4e028443bfc2
|
||||
fad6d13aa55f96e01cc6ff516cdfea53b2fc9eb1
|
||||
|
10
cmd_debug.c
10
cmd_debug.c
@ -294,11 +294,11 @@ static void print_node_ondisk(struct bch_fs *c, struct btree *b)
|
||||
|
||||
n_ondisk = malloc(btree_bytes(c));
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO,
|
||||
buf_pages(n_ondisk, btree_bytes(c)),
|
||||
&c->btree_bio);
|
||||
bio_set_dev(bio, ca->disk_sb.bdev);
|
||||
bio->bi_opf = REQ_OP_READ|REQ_META;
|
||||
bio = bio_alloc_bioset(ca->disk_sb.bdev,
|
||||
buf_pages(n_ondisk, btree_bytes(c)),
|
||||
REQ_OP_READ|REQ_META,
|
||||
GFP_NOIO,
|
||||
&c->btree_bio);
|
||||
bio->bi_iter.bi_sector = pick.ptr.offset;
|
||||
bch2_bio_map(bio, n_ondisk, btree_bytes(c));
|
||||
|
||||
|
@ -257,7 +257,7 @@ static void write_data(struct bch_fs *c,
|
||||
|
||||
closure_init_stack(&cl);
|
||||
|
||||
bio_init(&op.wbio.bio, bv, ARRAY_SIZE(bv));
|
||||
bio_init(&op.wbio.bio, NULL, bv, ARRAY_SIZE(bv), 0);
|
||||
bch2_bio_map(&op.wbio.bio, buf, len);
|
||||
|
||||
bch2_write_op_init(&op, c, bch2_opts_to_inode_opts(c->opts));
|
||||
|
18
cmd_option.c
18
cmd_option.c
@ -88,17 +88,19 @@ int cmd_set_option(int argc, char *argv[])
|
||||
bch2_fs_stop(c);
|
||||
return ret;
|
||||
online:
|
||||
unsigned dev_idx;
|
||||
struct bchfs_handle fs = bchu_fs_open_by_dev(argv[i], &dev_idx);
|
||||
{
|
||||
unsigned dev_idx;
|
||||
struct bchfs_handle fs = bchu_fs_open_by_dev(argv[i], &dev_idx);
|
||||
|
||||
for (i = 0; i < bch2_opts_nr; i++) {
|
||||
if (!new_opt_strs.by_id[i])
|
||||
continue;
|
||||
for (i = 0; i < bch2_opts_nr; i++) {
|
||||
if (!new_opt_strs.by_id[i])
|
||||
continue;
|
||||
|
||||
char *path = mprintf("options/%s", bch2_opt_table[i].attr.name);
|
||||
char *path = mprintf("options/%s", bch2_opt_table[i].attr.name);
|
||||
|
||||
write_file_str(fs.sysfs_fd, path, new_opt_strs.by_id[i]);
|
||||
free(path);
|
||||
write_file_str(fs.sysfs_fd, path, new_opt_strs.by_id[i]);
|
||||
free(path);
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -233,28 +233,22 @@ enum {
|
||||
BIOSET_NEED_RESCUER = 1 << 1,
|
||||
};
|
||||
|
||||
extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
|
||||
struct bio *bio_alloc_bioset(struct block_device *, unsigned,
|
||||
unsigned, gfp_t, struct bio_set *);
|
||||
extern void bio_put(struct bio *);
|
||||
|
||||
int bio_add_page(struct bio *, struct page *, unsigned, unsigned);
|
||||
|
||||
extern void __bio_clone_fast(struct bio *, struct bio *);
|
||||
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
|
||||
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
|
||||
struct bio *bio_alloc_clone(struct block_device *, struct bio *,
|
||||
gfp_t, struct bio_set *);
|
||||
|
||||
struct bio *bio_kmalloc(gfp_t, unsigned int);
|
||||
|
||||
static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
return bio_clone_bioset(bio, gfp_mask, NULL);
|
||||
|
||||
}
|
||||
|
||||
extern void bio_endio(struct bio *);
|
||||
|
||||
extern void bio_advance(struct bio *, unsigned);
|
||||
|
||||
extern void bio_reset(struct bio *);
|
||||
extern void bio_reset(struct bio *, struct block_device *, unsigned);
|
||||
void bio_chain(struct bio *, struct bio *);
|
||||
|
||||
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
|
||||
@ -421,20 +415,15 @@ static inline void bio_inc_remaining(struct bio *bio)
|
||||
atomic_inc(&bio->__bi_remaining);
|
||||
}
|
||||
|
||||
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
||||
{
|
||||
return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
|
||||
}
|
||||
|
||||
static inline struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
return bio_clone_bioset(bio, gfp_mask, NULL);
|
||||
}
|
||||
|
||||
static inline void bio_init(struct bio *bio, struct bio_vec *table,
|
||||
unsigned short max_vecs)
|
||||
static inline void bio_init(struct bio *bio,
|
||||
struct block_device *bdev,
|
||||
struct bio_vec *table,
|
||||
unsigned short max_vecs,
|
||||
unsigned int opf)
|
||||
{
|
||||
memset(bio, 0, sizeof(*bio));
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_opf = opf;
|
||||
atomic_set(&bio->__bi_remaining, 1);
|
||||
atomic_set(&bio->__bi_cnt, 1);
|
||||
|
||||
|
@ -29,7 +29,7 @@ struct kset;
|
||||
struct kobj_type {
|
||||
void (*release)(struct kobject *kobj);
|
||||
const struct sysfs_ops *sysfs_ops;
|
||||
struct attribute **default_attrs;
|
||||
const struct attribute_group **default_groups;
|
||||
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
|
||||
const void *(*namespace)(struct kobject *kobj);
|
||||
};
|
||||
@ -48,7 +48,7 @@ struct kobj_attribute {
|
||||
struct kobject {
|
||||
struct kobject *parent;
|
||||
struct kset *kset;
|
||||
struct kobj_type *ktype;
|
||||
const struct kobj_type *ktype;
|
||||
struct kernfs_node *sd; /* sysfs directory entry */
|
||||
atomic_t ref;
|
||||
unsigned int state_initialized:1;
|
||||
@ -64,7 +64,7 @@ struct kset {
|
||||
|
||||
#define kobject_add(...) 0
|
||||
|
||||
static inline void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
|
||||
static inline void kobject_init(struct kobject *kobj, const struct kobj_type *ktype)
|
||||
{
|
||||
memset(kobj, 0, sizeof(*kobj));
|
||||
|
||||
@ -77,7 +77,7 @@ static inline void kobject_del(struct kobject *kobj);
|
||||
|
||||
static inline void kobject_cleanup(struct kobject *kobj)
|
||||
{
|
||||
struct kobj_type *t = kobj->ktype;
|
||||
const struct kobj_type *t = kobj->ktype;
|
||||
|
||||
/* remove from sysfs if the caller did not do it */
|
||||
if (kobj->state_in_sysfs)
|
||||
|
@ -10,6 +10,10 @@ struct attribute {
|
||||
umode_t mode;
|
||||
};
|
||||
|
||||
struct attribute_group {
|
||||
struct attribute **attrs;
|
||||
};
|
||||
|
||||
struct sysfs_ops {
|
||||
ssize_t (*show)(struct kobject *, struct attribute *, char *);
|
||||
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
|
||||
|
@ -639,8 +639,8 @@ union bch_extent_entry {
|
||||
struct bch_btree_ptr {
|
||||
struct bch_val v;
|
||||
|
||||
struct bch_extent_ptr start[0];
|
||||
__u64 _data[0];
|
||||
struct bch_extent_ptr start[];
|
||||
} __attribute__((packed, aligned(8)));
|
||||
|
||||
struct bch_btree_ptr_v2 {
|
||||
@ -651,8 +651,8 @@ struct bch_btree_ptr_v2 {
|
||||
__le16 sectors_written;
|
||||
__le16 flags;
|
||||
struct bpos min_key;
|
||||
struct bch_extent_ptr start[0];
|
||||
__u64 _data[0];
|
||||
struct bch_extent_ptr start[];
|
||||
} __attribute__((packed, aligned(8)));
|
||||
|
||||
LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
|
||||
@ -660,8 +660,8 @@ LE16_BITMASK(BTREE_PTR_RANGE_UPDATED, struct bch_btree_ptr_v2, flags, 0, 1);
|
||||
struct bch_extent {
|
||||
struct bch_val v;
|
||||
|
||||
union bch_extent_entry start[0];
|
||||
__u64 _data[0];
|
||||
union bch_extent_entry start[];
|
||||
} __attribute__((packed, aligned(8)));
|
||||
|
||||
struct bch_reservation {
|
||||
@ -982,7 +982,7 @@ struct bch_stripe {
|
||||
__u8 csum_type;
|
||||
__u8 pad;
|
||||
|
||||
struct bch_extent_ptr ptrs[0];
|
||||
struct bch_extent_ptr ptrs[];
|
||||
} __attribute__((packed, aligned(8)));
|
||||
|
||||
/* Reflink: */
|
||||
@ -1262,19 +1262,19 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
|
||||
struct bch_replicas_entry_v0 {
|
||||
__u8 data_type;
|
||||
__u8 nr_devs;
|
||||
__u8 devs[0];
|
||||
__u8 devs[];
|
||||
} __attribute__((packed));
|
||||
|
||||
struct bch_sb_field_replicas_v0 {
|
||||
struct bch_sb_field field;
|
||||
struct bch_replicas_entry_v0 entries[0];
|
||||
struct bch_replicas_entry_v0 entries[];
|
||||
} __attribute__((packed, aligned(8)));
|
||||
|
||||
struct bch_replicas_entry {
|
||||
__u8 data_type;
|
||||
__u8 nr_devs;
|
||||
__u8 nr_required;
|
||||
__u8 devs[0];
|
||||
__u8 devs[];
|
||||
} __attribute__((packed));
|
||||
|
||||
#define replicas_entry_bytes(_i) \
|
||||
|
@ -202,9 +202,10 @@ static bool bch2_bkey_transform_key(const struct bkey_format *out_f,
|
||||
{
|
||||
struct pack_state out_s = pack_state_init(out_f, out);
|
||||
struct unpack_state in_s = unpack_state_init(in_f, in);
|
||||
u64 *w = out->_data;
|
||||
unsigned i;
|
||||
|
||||
out->_data[0] = 0;
|
||||
*w = 0;
|
||||
|
||||
for (i = 0; i < BKEY_NR_FIELDS; i++)
|
||||
if (!set_inc_field(&out_s, i, get_inc_field(&in_s, i)))
|
||||
@ -293,12 +294,13 @@ bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
|
||||
const struct bkey_format *format)
|
||||
{
|
||||
struct pack_state state = pack_state_init(format, out);
|
||||
u64 *w = out->_data;
|
||||
|
||||
EBUG_ON((void *) in == (void *) out);
|
||||
EBUG_ON(format->nr_fields != BKEY_NR_FIELDS);
|
||||
EBUG_ON(in->format != KEY_FORMAT_CURRENT);
|
||||
|
||||
out->_data[0] = 0;
|
||||
*w = 0;
|
||||
|
||||
#define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
|
||||
bkey_fields()
|
||||
@ -440,6 +442,7 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
|
||||
{
|
||||
const struct bkey_format *f = &b->format;
|
||||
struct pack_state state = pack_state_init(f, out);
|
||||
u64 *w = out->_data;
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
struct bpos orig = in;
|
||||
#endif
|
||||
@ -452,7 +455,7 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
|
||||
* enough - we need to make sure to zero them out:
|
||||
*/
|
||||
for (i = 0; i < f->key_u64s; i++)
|
||||
out->_data[i] = 0;
|
||||
w[i] = 0;
|
||||
|
||||
if (unlikely(in.snapshot <
|
||||
le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
|
||||
|
@ -1155,8 +1155,7 @@ static void btree_node_read_work(struct work_struct *work)
|
||||
bch_info(c, "retrying read");
|
||||
ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
|
||||
bio_reset(bio);
|
||||
bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
|
||||
bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
|
||||
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
|
||||
bio->bi_iter.bi_size = btree_bytes(c);
|
||||
|
||||
@ -1434,8 +1433,10 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
|
||||
|
||||
for (i = 0; i < ra->nr; i++) {
|
||||
ra->buf[i] = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS);
|
||||
ra->bio[i] = bio_alloc_bioset(GFP_NOFS, buf_pages(ra->buf[i],
|
||||
btree_bytes(c)),
|
||||
ra->bio[i] = bio_alloc_bioset(NULL,
|
||||
buf_pages(ra->buf[i], btree_bytes(c)),
|
||||
REQ_OP_READ|REQ_SYNC|REQ_META,
|
||||
GFP_NOFS,
|
||||
&c->btree_bio);
|
||||
}
|
||||
|
||||
@ -1451,7 +1452,6 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
|
||||
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
|
||||
rb->idx = i;
|
||||
rb->pick = pick;
|
||||
rb->bio.bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
|
||||
rb->bio.bi_iter.bi_sector = pick.ptr.offset;
|
||||
rb->bio.bi_end_io = btree_node_read_all_replicas_endio;
|
||||
bch2_bio_map(&rb->bio, ra->buf[i], btree_bytes(c));
|
||||
@ -1509,8 +1509,10 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO, buf_pages(b->data,
|
||||
btree_bytes(c)),
|
||||
bio = bio_alloc_bioset(NULL,
|
||||
buf_pages(b->data, btree_bytes(c)),
|
||||
REQ_OP_READ|REQ_SYNC|REQ_META,
|
||||
GFP_NOIO,
|
||||
&c->btree_bio);
|
||||
rb = container_of(bio, struct btree_read_bio, bio);
|
||||
rb->c = c;
|
||||
@ -1520,7 +1522,6 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
|
||||
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
|
||||
rb->pick = pick;
|
||||
INIT_WORK(&rb->work, btree_node_read_work);
|
||||
bio->bi_opf = REQ_OP_READ|REQ_SYNC|REQ_META;
|
||||
bio->bi_iter.bi_sector = pick.ptr.offset;
|
||||
bio->bi_end_io = btree_node_read_endio;
|
||||
bch2_bio_map(bio, b->data, btree_bytes(c));
|
||||
@ -1974,8 +1975,10 @@ do_write:
|
||||
|
||||
trace_btree_write(b, bytes_to_write, sectors_to_write);
|
||||
|
||||
wbio = container_of(bio_alloc_bioset(GFP_NOIO,
|
||||
wbio = container_of(bio_alloc_bioset(NULL,
|
||||
buf_pages(data, sectors_to_write << 9),
|
||||
REQ_OP_WRITE|REQ_META,
|
||||
GFP_NOIO,
|
||||
&c->btree_bio),
|
||||
struct btree_write_bio, wbio.bio);
|
||||
wbio_init(&wbio->wbio.bio);
|
||||
@ -1985,7 +1988,6 @@ do_write:
|
||||
wbio->wbio.c = c;
|
||||
wbio->wbio.used_mempool = used_mempool;
|
||||
wbio->wbio.first_btree_write = !b->written;
|
||||
wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
|
||||
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
|
||||
wbio->wbio.bio.bi_private = b;
|
||||
|
||||
|
@ -1672,10 +1672,11 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
|
||||
static void btree_path_copy(struct btree_trans *trans, struct btree_path *dst,
|
||||
struct btree_path *src)
|
||||
{
|
||||
unsigned i;
|
||||
unsigned i, offset = offsetof(struct btree_path, pos);
|
||||
|
||||
memcpy(&dst->pos, &src->pos,
|
||||
sizeof(struct btree_path) - offsetof(struct btree_path, pos));
|
||||
memcpy((void *) dst + offset,
|
||||
(void *) src + offset,
|
||||
sizeof(struct btree_path) - offset);
|
||||
|
||||
for (i = 0; i < BTREE_MAX_DEPTH; i++)
|
||||
if (btree_node_locked(dst, i))
|
||||
@ -3197,23 +3198,16 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
|
||||
*/
|
||||
void bch2_trans_begin(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_insert_entry *i;
|
||||
struct btree_path *path;
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
__btree_path_put(i->path, true);
|
||||
bch2_trans_reset_updates(trans);
|
||||
|
||||
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
|
||||
trans->extra_journal_res = 0;
|
||||
trans->nr_updates = 0;
|
||||
trans->mem_top = 0;
|
||||
|
||||
trans->hooks = NULL;
|
||||
trans->extra_journal_entries.nr = 0;
|
||||
|
||||
if (trans->fs_usage_deltas) {
|
||||
trans->fs_usage_deltas->used = 0;
|
||||
memset(&trans->fs_usage_deltas->memset_start, 0,
|
||||
memset((void *) trans->fs_usage_deltas +
|
||||
offsetof(struct replicas_delta_list, memset_start), 0,
|
||||
(void *) &trans->fs_usage_deltas->memset_end -
|
||||
(void *) &trans->fs_usage_deltas->memset_start);
|
||||
}
|
||||
|
@ -140,4 +140,17 @@ static inline int bch2_trans_commit(struct btree_trans *trans,
|
||||
(_i) < (_trans)->updates + (_trans)->nr_updates; \
|
||||
(_i)++)
|
||||
|
||||
static inline void bch2_trans_reset_updates(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_insert_entry *i;
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
bch2_path_put(trans, i->path, true);
|
||||
|
||||
trans->extra_journal_res = 0;
|
||||
trans->nr_updates = 0;
|
||||
trans->hooks = NULL;
|
||||
trans->extra_journal_entries.nr = 0;
|
||||
}
|
||||
|
||||
#endif /* _BCACHEFS_BTREE_UPDATE_H */
|
||||
|
@ -1166,17 +1166,12 @@ out:
|
||||
if (likely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
|
||||
percpu_ref_put(&c->writes);
|
||||
out_reset:
|
||||
trans_for_each_update(trans, i)
|
||||
bch2_path_put(trans, i->path, true);
|
||||
|
||||
trans->extra_journal_res = 0;
|
||||
trans->nr_updates = 0;
|
||||
trans->hooks = NULL;
|
||||
trans->extra_journal_entries.nr = 0;
|
||||
bch2_trans_reset_updates(trans);
|
||||
|
||||
if (trans->fs_usage_deltas) {
|
||||
trans->fs_usage_deltas->used = 0;
|
||||
memset(&trans->fs_usage_deltas->memset_start, 0,
|
||||
memset((void *) trans->fs_usage_deltas +
|
||||
offsetof(struct replicas_delta_list, memset_start), 0,
|
||||
(void *) &trans->fs_usage_deltas->memset_end -
|
||||
(void *) &trans->fs_usage_deltas->memset_start);
|
||||
}
|
||||
|
@ -466,7 +466,8 @@ static inline void update_replicas_list(struct btree_trans *trans,
|
||||
|
||||
n = (void *) d->d + d->used;
|
||||
n->delta = sectors;
|
||||
memcpy(&n->r, r, replicas_entry_bytes(r));
|
||||
memcpy((void *) n + offsetof(struct replicas_delta, r),
|
||||
r, replicas_entry_bytes(r));
|
||||
bch2_replicas_entry_sort(&n->r);
|
||||
d->used += b;
|
||||
}
|
||||
|
@ -87,18 +87,18 @@ int bch2_sb_counters_from_cpu(struct bch_fs *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bch2_fs_counters_exit(struct bch_fs *c)
|
||||
{
|
||||
free_percpu(c->counters);
|
||||
}
|
||||
|
||||
int bch2_fs_counters_init(struct bch_fs *c)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64));
|
||||
|
||||
if (!c->counters)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = bch2_sb_counters_to_cpu(c);
|
||||
|
||||
return ret;
|
||||
return bch2_sb_counters_to_cpu(c);
|
||||
}
|
||||
|
||||
const struct bch_sb_field_ops bch_sb_field_ops_counters = {
|
||||
|
@ -6,11 +6,11 @@
|
||||
#include "super-io.h"
|
||||
|
||||
|
||||
int bch2_sb_counters_to_cpu(struct bch_fs *c);
|
||||
int bch2_sb_counters_to_cpu(struct bch_fs *);
|
||||
int bch2_sb_counters_from_cpu(struct bch_fs *);
|
||||
|
||||
int bch2_sb_counters_from_cpu(struct bch_fs *c);
|
||||
|
||||
int bch2_fs_counters_init(struct bch_fs *c);
|
||||
void bch2_fs_counters_exit(struct bch_fs *);
|
||||
int bch2_fs_counters_init(struct bch_fs *);
|
||||
|
||||
extern const struct bch_sb_field_ops bch_sb_field_ops_counters;
|
||||
|
||||
|
@ -43,11 +43,11 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
|
||||
if (!bch2_dev_get_ioref(ca, READ))
|
||||
return false;
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO,
|
||||
buf_pages(n_sorted, btree_bytes(c)),
|
||||
&c->btree_bio);
|
||||
bio_set_dev(bio, ca->disk_sb.bdev);
|
||||
bio->bi_opf = REQ_OP_READ|REQ_META;
|
||||
bio = bio_alloc_bioset(ca->disk_sb.bdev,
|
||||
buf_pages(n_sorted, btree_bytes(c)),
|
||||
REQ_OP_READ|REQ_META,
|
||||
GFP_NOIO,
|
||||
&c->btree_bio);
|
||||
bio->bi_iter.bi_sector = pick.ptr.offset;
|
||||
bch2_bio_map(bio, n_sorted, btree_bytes(c));
|
||||
|
||||
|
@ -412,7 +412,10 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
|
||||
nr_iovecs << PAGE_SHIFT);
|
||||
struct ec_bio *ec_bio;
|
||||
|
||||
ec_bio = container_of(bio_alloc_bioset(GFP_KERNEL, nr_iovecs,
|
||||
ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
|
||||
nr_iovecs,
|
||||
rw,
|
||||
GFP_KERNEL,
|
||||
&c->ec_bioset),
|
||||
struct ec_bio, bio);
|
||||
|
||||
@ -420,9 +423,6 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
|
||||
ec_bio->buf = buf;
|
||||
ec_bio->idx = idx;
|
||||
|
||||
bio_set_dev(&ec_bio->bio, ca->disk_sb.bdev);
|
||||
bio_set_op_attrs(&ec_bio->bio, rw, 0);
|
||||
|
||||
ec_bio->bio.bi_iter.bi_sector = ptr->offset + buf->offset + (offset >> 9);
|
||||
ec_bio->bio.bi_end_io = ec_block_endio;
|
||||
ec_bio->bio.bi_private = cl;
|
||||
|
@ -842,13 +842,12 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_invalidatepage(struct page *page, unsigned int offset,
|
||||
unsigned int length)
|
||||
void bch2_invalidate_folio(struct folio *folio, size_t offset, size_t length)
|
||||
{
|
||||
if (offset || length < PAGE_SIZE)
|
||||
if (offset || length < folio_size(folio))
|
||||
return;
|
||||
|
||||
bch2_clear_page_bits(page);
|
||||
bch2_clear_page_bits(&folio->page);
|
||||
}
|
||||
|
||||
int bch2_releasepage(struct page *page, gfp_t gfp_mask)
|
||||
@ -1139,12 +1138,12 @@ void bch2_readahead(struct readahead_control *ractl)
|
||||
readpages_iter.idx,
|
||||
BIO_MAX_VECS);
|
||||
struct bch_read_bio *rbio =
|
||||
rbio_init(bio_alloc_bioset(GFP_NOFS, n, &c->bio_read),
|
||||
rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
|
||||
GFP_NOFS, &c->bio_read),
|
||||
opts);
|
||||
|
||||
readpages_iter.idx++;
|
||||
|
||||
bio_set_op_attrs(&rbio->bio, REQ_OP_READ, 0);
|
||||
rbio->bio.bi_iter.bi_sector = (sector_t) index << PAGE_SECTORS_SHIFT;
|
||||
rbio->bio.bi_end_io = bch2_readpages_end_io;
|
||||
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
|
||||
@ -1183,7 +1182,7 @@ int bch2_readpage(struct file *file, struct page *page)
|
||||
struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
|
||||
struct bch_read_bio *rbio;
|
||||
|
||||
rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read), opts);
|
||||
rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read), opts);
|
||||
rbio->bio.bi_end_io = bch2_readpages_end_io;
|
||||
|
||||
__bchfs_readpage(c, rbio, inode_inum(inode), page);
|
||||
@ -1204,7 +1203,7 @@ static int bch2_read_single_page(struct page *page,
|
||||
int ret;
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
|
||||
rbio = rbio_init(bio_alloc_bioset(GFP_NOFS, 1, &c->bio_read),
|
||||
rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
|
||||
io_opts(c, &inode->ei_inode));
|
||||
rbio->bio.bi_private = &done;
|
||||
rbio->bio.bi_end_io = bch2_read_single_page_end_io;
|
||||
@ -1339,7 +1338,9 @@ static void bch2_writepage_io_alloc(struct bch_fs *c,
|
||||
{
|
||||
struct bch_write_op *op;
|
||||
|
||||
w->io = container_of(bio_alloc_bioset(GFP_NOFS, BIO_MAX_VECS,
|
||||
w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
|
||||
REQ_OP_WRITE,
|
||||
GFP_NOFS,
|
||||
&c->writepage_bioset),
|
||||
struct bch_writepage_io, op.wbio.bio);
|
||||
|
||||
@ -1916,8 +1917,10 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
|
||||
shorten = iov_iter_count(iter) - round_up(ret, block_bytes(c));
|
||||
iter->count -= shorten;
|
||||
|
||||
bio = bio_alloc_bioset(GFP_KERNEL,
|
||||
bio = bio_alloc_bioset(NULL,
|
||||
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
|
||||
REQ_OP_READ,
|
||||
GFP_KERNEL,
|
||||
&c->dio_read_bioset);
|
||||
|
||||
bio->bi_end_io = bch2_direct_IO_read_endio;
|
||||
@ -1951,8 +1954,10 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
|
||||
|
||||
goto start;
|
||||
while (iter->count) {
|
||||
bio = bio_alloc_bioset(GFP_KERNEL,
|
||||
bio = bio_alloc_bioset(NULL,
|
||||
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
|
||||
REQ_OP_READ,
|
||||
GFP_KERNEL,
|
||||
&c->bio_read);
|
||||
bio->bi_end_io = bch2_direct_IO_read_split_endio;
|
||||
start:
|
||||
@ -2220,7 +2225,7 @@ loop:
|
||||
if (!dio->iter.count)
|
||||
break;
|
||||
|
||||
bio_reset(bio);
|
||||
bio_reset(bio, NULL, REQ_OP_WRITE);
|
||||
reinit_completion(&dio->done);
|
||||
}
|
||||
|
||||
@ -2301,8 +2306,10 @@ ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
|
||||
locked = false;
|
||||
}
|
||||
|
||||
bio = bio_alloc_bioset(GFP_KERNEL,
|
||||
bio = bio_alloc_bioset(NULL,
|
||||
bio_iov_vecs_to_alloc(iter, BIO_MAX_VECS),
|
||||
REQ_OP_WRITE,
|
||||
GFP_KERNEL,
|
||||
&c->dio_write_bioset);
|
||||
dio = container_of(bio, struct dio_write, op.wbio.bio);
|
||||
init_completion(&dio->done);
|
||||
|
@ -41,7 +41,7 @@ loff_t bch2_llseek(struct file *, loff_t, int);
|
||||
|
||||
vm_fault_t bch2_page_fault(struct vm_fault *);
|
||||
vm_fault_t bch2_page_mkwrite(struct vm_fault *);
|
||||
void bch2_invalidatepage(struct page *, unsigned int, unsigned int);
|
||||
void bch2_invalidate_folio(struct folio *, size_t, size_t);
|
||||
int bch2_releasepage(struct page *, gfp_t);
|
||||
int bch2_migrate_page(struct address_space *, struct page *,
|
||||
struct page *, enum migrate_mode);
|
||||
|
@ -1115,10 +1115,10 @@ static const struct address_space_operations bch_address_space_operations = {
|
||||
.readpage = bch2_readpage,
|
||||
.writepages = bch2_writepages,
|
||||
.readahead = bch2_readahead,
|
||||
.set_page_dirty = __set_page_dirty_nobuffers,
|
||||
.dirty_folio = filemap_dirty_folio,
|
||||
.write_begin = bch2_write_begin,
|
||||
.write_end = bch2_write_end,
|
||||
.invalidatepage = bch2_invalidatepage,
|
||||
.invalidate_folio = bch2_invalidate_folio,
|
||||
.releasepage = bch2_releasepage,
|
||||
.direct_IO = noop_direct_IO,
|
||||
#ifdef CONFIG_MIGRATION
|
||||
|
@ -141,9 +141,9 @@ static noinline int bch2_inode_unpack_v1(struct bkey_s_c_inode inode,
|
||||
|
||||
#define x(_name, _bits) \
|
||||
if (fieldnr++ == INODE_NR_FIELDS(inode.v)) { \
|
||||
memset(&unpacked->_name, 0, \
|
||||
sizeof(*unpacked) - \
|
||||
offsetof(struct bch_inode_unpacked, _name)); \
|
||||
unsigned offset = offsetof(struct bch_inode_unpacked, _name);\
|
||||
memset((void *) unpacked + offset, 0, \
|
||||
sizeof(*unpacked) - offset); \
|
||||
return 0; \
|
||||
} \
|
||||
\
|
||||
|
@ -470,8 +470,8 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
|
||||
ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (to_entry(ptr + 1) < ptrs.end) {
|
||||
n = to_wbio(bio_clone_fast(&wbio->bio, GFP_NOIO,
|
||||
&ca->replica_set));
|
||||
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
|
||||
GFP_NOIO, &ca->replica_set));
|
||||
|
||||
n->bio.bi_end_io = wbio->bio.bi_end_io;
|
||||
n->bio.bi_private = wbio->bio.bi_private;
|
||||
@ -701,7 +701,8 @@ static struct bio *bch2_write_bio_alloc(struct bch_fs *c,
|
||||
|
||||
pages = min(pages, BIO_MAX_VECS);
|
||||
|
||||
bio = bio_alloc_bioset(GFP_NOIO, pages, &c->bio_write);
|
||||
bio = bio_alloc_bioset(NULL, pages, 0,
|
||||
GFP_NOIO, &c->bio_write);
|
||||
wbio = wbio_init(bio);
|
||||
wbio->put_bio = true;
|
||||
/* copy WRITE_SYNC flag */
|
||||
@ -1442,7 +1443,7 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
|
||||
goto err;
|
||||
|
||||
rbio_init(&(*rbio)->bio, opts);
|
||||
bio_init(&(*rbio)->bio, (*rbio)->bio.bi_inline_vecs, pages);
|
||||
bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
|
||||
|
||||
if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
|
||||
GFP_NOIO))
|
||||
@ -1457,7 +1458,7 @@ static struct promote_op *__promote_alloc(struct bch_fs *c,
|
||||
goto err;
|
||||
|
||||
bio = &op->write.op.wbio.bio;
|
||||
bio_init(bio, bio->bi_inline_vecs, pages);
|
||||
bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
|
||||
|
||||
ret = bch2_migrate_write_init(c, &op->write,
|
||||
writepoint_hashed((unsigned long) current),
|
||||
@ -2139,8 +2140,10 @@ get_bio:
|
||||
} else if (bounce) {
|
||||
unsigned sectors = pick.crc.compressed_size;
|
||||
|
||||
rbio = rbio_init(bio_alloc_bioset(GFP_NOIO,
|
||||
rbio = rbio_init(bio_alloc_bioset(NULL,
|
||||
DIV_ROUND_UP(sectors, PAGE_SECTORS),
|
||||
0,
|
||||
GFP_NOIO,
|
||||
&c->bio_read_split),
|
||||
orig->opts);
|
||||
|
||||
@ -2156,8 +2159,8 @@ get_bio:
|
||||
* from the whole bio, in which case we don't want to retry and
|
||||
* lose the error)
|
||||
*/
|
||||
rbio = rbio_init(bio_clone_fast(&orig->bio, GFP_NOIO,
|
||||
&c->bio_read_split),
|
||||
rbio = rbio_init(bio_alloc_clone(NULL, &orig->bio, GFP_NOIO,
|
||||
&c->bio_read_split),
|
||||
orig->opts);
|
||||
rbio->bio.bi_iter = iter;
|
||||
rbio->split = true;
|
||||
|
@ -1523,12 +1523,10 @@ static void do_journal_write(struct closure *cl)
|
||||
sectors);
|
||||
|
||||
bio = ca->journal.bio;
|
||||
bio_reset(bio);
|
||||
bio_set_dev(bio, ca->disk_sb.bdev);
|
||||
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
|
||||
bio->bi_iter.bi_sector = ptr->offset;
|
||||
bio->bi_end_io = journal_write_endio;
|
||||
bio->bi_private = ca;
|
||||
bio->bi_opf = REQ_OP_WRITE|REQ_SYNC|REQ_META;
|
||||
|
||||
BUG_ON(bio->bi_iter.bi_sector == ca->prev_journal_sector);
|
||||
ca->prev_journal_sector = bio->bi_iter.bi_sector;
|
||||
@ -1706,9 +1704,7 @@ retry_alloc:
|
||||
percpu_ref_get(&ca->io_ref);
|
||||
|
||||
bio = ca->journal.bio;
|
||||
bio_reset(bio);
|
||||
bio_set_dev(bio, ca->disk_sb.bdev);
|
||||
bio->bi_opf = REQ_OP_FLUSH;
|
||||
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
|
||||
bio->bi_end_io = journal_write_endio;
|
||||
bio->bi_private = ca;
|
||||
closure_bio_submit(bio, cl);
|
||||
|
@ -548,7 +548,7 @@ static int bch2_move_extent(struct btree_trans *trans,
|
||||
io->read_sectors = k.k->size;
|
||||
io->write_sectors = k.k->size;
|
||||
|
||||
bio_init(&io->write.op.wbio.bio, io->bi_inline_vecs, pages);
|
||||
bio_init(&io->write.op.wbio.bio, NULL, io->bi_inline_vecs, pages, 0);
|
||||
bio_set_prio(&io->write.op.wbio.bio,
|
||||
IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
|
||||
@ -558,7 +558,7 @@ static int bch2_move_extent(struct btree_trans *trans,
|
||||
|
||||
io->rbio.c = c;
|
||||
io->rbio.opts = io_opts;
|
||||
bio_init(&io->rbio.bio, io->bi_inline_vecs, pages);
|
||||
bio_init(&io->rbio.bio, NULL, io->bi_inline_vecs, pages, 0);
|
||||
io->rbio.bio.bi_vcnt = pages;
|
||||
bio_set_prio(&io->rbio.bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
|
||||
io->rbio.bio.bi_iter.bi_size = sectors << 9;
|
||||
|
@ -528,10 +528,8 @@ static int read_one_super(struct bch_sb_handle *sb, u64 offset, struct printbuf
|
||||
size_t bytes;
|
||||
int ret;
|
||||
reread:
|
||||
bio_reset(sb->bio);
|
||||
bio_set_dev(sb->bio, sb->bdev);
|
||||
bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
|
||||
sb->bio->bi_iter.bi_sector = offset;
|
||||
bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
|
||||
bch2_bio_map(sb->bio, sb->sb, sb->buffer_size);
|
||||
|
||||
ret = submit_bio_wait(sb->bio);
|
||||
@ -659,10 +657,8 @@ int bch2_read_super(const char *path, struct bch_opts *opts,
|
||||
* Error reading primary superblock - read location of backup
|
||||
* superblocks:
|
||||
*/
|
||||
bio_reset(sb->bio);
|
||||
bio_set_dev(sb->bio, sb->bdev);
|
||||
bio_reset(sb->bio, sb->bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
|
||||
sb->bio->bi_iter.bi_sector = BCH_SB_LAYOUT_SECTOR;
|
||||
bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
|
||||
/*
|
||||
* use sb buffer to read layout, since sb buffer is page aligned but
|
||||
* layout won't be:
|
||||
@ -746,12 +742,10 @@ static void read_back_super(struct bch_fs *c, struct bch_dev *ca)
|
||||
struct bch_sb *sb = ca->disk_sb.sb;
|
||||
struct bio *bio = ca->disk_sb.bio;
|
||||
|
||||
bio_reset(bio);
|
||||
bio_set_dev(bio, ca->disk_sb.bdev);
|
||||
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_READ|REQ_SYNC|REQ_META);
|
||||
bio->bi_iter.bi_sector = le64_to_cpu(sb->layout.sb_offset[0]);
|
||||
bio->bi_end_io = write_super_endio;
|
||||
bio->bi_private = ca;
|
||||
bio_set_op_attrs(bio, REQ_OP_READ, REQ_SYNC|REQ_META);
|
||||
bch2_bio_map(bio, ca->sb_read_scratch, PAGE_SIZE);
|
||||
|
||||
this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb],
|
||||
@ -772,12 +766,10 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
|
||||
sb->csum = csum_vstruct(c, BCH_SB_CSUM_TYPE(sb),
|
||||
null_nonce(), sb);
|
||||
|
||||
bio_reset(bio);
|
||||
bio_set_dev(bio, ca->disk_sb.bdev);
|
||||
bio_reset(bio, ca->disk_sb.bdev, REQ_OP_WRITE|REQ_SYNC|REQ_META);
|
||||
bio->bi_iter.bi_sector = le64_to_cpu(sb->offset);
|
||||
bio->bi_end_io = write_super_endio;
|
||||
bio->bi_private = ca;
|
||||
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
|
||||
bch2_bio_map(bio, sb,
|
||||
roundup((size_t) vstruct_bytes(sb),
|
||||
bdev_logical_block_size(ca->disk_sb.bdev)));
|
||||
|
@ -50,7 +50,6 @@
|
||||
#include <linux/blkdev.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/genhd.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/percpu.h>
|
||||
@ -64,10 +63,19 @@ MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
|
||||
|
||||
#define KTYPE(type) \
|
||||
struct kobj_type type ## _ktype = { \
|
||||
static const struct attribute_group type ## _group = { \
|
||||
.attrs = type ## _files \
|
||||
}; \
|
||||
\
|
||||
static const struct attribute_group *type ## _groups[] = { \
|
||||
&type ## _group, \
|
||||
NULL \
|
||||
}; \
|
||||
\
|
||||
static const struct kobj_type type ## _ktype = { \
|
||||
.release = type ## _release, \
|
||||
.sysfs_ops = &type ## _sysfs_ops, \
|
||||
.default_attrs = type ## _files \
|
||||
.default_groups = type ## _groups \
|
||||
}
|
||||
|
||||
static void bch2_fs_release(struct kobject *);
|
||||
@ -88,12 +96,12 @@ static void bch2_fs_time_stats_release(struct kobject *k)
|
||||
{
|
||||
}
|
||||
|
||||
static KTYPE(bch2_fs);
|
||||
static KTYPE(bch2_fs_counters);
|
||||
static KTYPE(bch2_fs_internal);
|
||||
static KTYPE(bch2_fs_opts_dir);
|
||||
static KTYPE(bch2_fs_time_stats);
|
||||
static KTYPE(bch2_dev);
|
||||
KTYPE(bch2_fs);
|
||||
KTYPE(bch2_fs_counters);
|
||||
KTYPE(bch2_fs_internal);
|
||||
KTYPE(bch2_fs_opts_dir);
|
||||
KTYPE(bch2_fs_time_stats);
|
||||
KTYPE(bch2_dev);
|
||||
|
||||
static struct kset *bcachefs_kset;
|
||||
static LIST_HEAD(bch_fs_list);
|
||||
@ -414,6 +422,7 @@ static void __bch2_fs_free(struct bch_fs *c)
|
||||
for (i = 0; i < BCH_TIME_STAT_NR; i++)
|
||||
bch2_time_stats_exit(&c->times[i]);
|
||||
|
||||
bch2_fs_counters_exit(c);
|
||||
bch2_fs_snapshots_exit(c);
|
||||
bch2_fs_quota_exit(c);
|
||||
bch2_fs_fsio_exit(c);
|
||||
@ -774,7 +783,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
|
||||
bch2_fs_btree_iter_init(c) ?:
|
||||
bch2_fs_btree_interior_update_init(c) ?:
|
||||
bch2_fs_buckets_waiting_for_journal_init(c);
|
||||
bch2_fs_buckets_waiting_for_journal_init(c) ?:
|
||||
bch2_fs_subvolumes_init(c) ?:
|
||||
bch2_fs_io_init(c) ?:
|
||||
bch2_fs_encryption_init(c) ?:
|
||||
|
@ -305,7 +305,7 @@ static inline void pr_indent_pop(struct printbuf *buf, unsigned spaces)
|
||||
{
|
||||
if (buf->last_newline + buf->indent == buf->pos) {
|
||||
buf->pos -= spaces;
|
||||
buf->buf[buf->pos] = 0;
|
||||
buf->buf[buf->pos] = '\0';
|
||||
}
|
||||
buf->indent -= spaces;
|
||||
}
|
||||
|
91
linux/bio.c
91
linux/bio.c
@ -120,29 +120,30 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
|
||||
}
|
||||
}
|
||||
|
||||
void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
|
||||
static int __bio_clone(struct bio *bio, struct bio *bio_src, gfp_t gfp)
|
||||
{
|
||||
/*
|
||||
* most users will be overriding ->bi_bdev with a new target,
|
||||
* so we don't set nor calculate new physical/hw segment counts here
|
||||
*/
|
||||
bio->bi_bdev = bio_src->bi_bdev;
|
||||
bio_set_flag(bio, BIO_CLONED);
|
||||
bio->bi_opf = bio_src->bi_opf;
|
||||
bio->bi_ioprio = bio_src->bi_ioprio;
|
||||
bio->bi_iter = bio_src->bi_iter;
|
||||
bio->bi_io_vec = bio_src->bi_io_vec;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
|
||||
struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
|
||||
gfp_t gfp, struct bio_set *bs)
|
||||
{
|
||||
struct bio *b;
|
||||
struct bio *bio;
|
||||
|
||||
b = bio_alloc_bioset(gfp_mask, 0, bs);
|
||||
if (!b)
|
||||
bio = bio_alloc_bioset(bdev, 0, bio_src->bi_opf, gfp, bs);
|
||||
if (!bio)
|
||||
return NULL;
|
||||
|
||||
__bio_clone_fast(b, bio);
|
||||
return b;
|
||||
if (__bio_clone(bio, bio_src, gfp) < 0) {
|
||||
bio_put(bio);
|
||||
return NULL;
|
||||
}
|
||||
bio->bi_io_vec = bio_src->bi_io_vec;
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
||||
struct bio *bio_split(struct bio *bio, int sectors,
|
||||
@ -153,15 +154,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
||||
BUG_ON(sectors <= 0);
|
||||
BUG_ON(sectors >= bio_sectors(bio));
|
||||
|
||||
/*
|
||||
* Discards need a mutable bio_vec to accommodate the payload
|
||||
* required by the DSM TRIM and UNMAP commands.
|
||||
*/
|
||||
if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
|
||||
split = bio_clone_bioset(bio, gfp, bs);
|
||||
else
|
||||
split = bio_clone_fast(bio, gfp, bs);
|
||||
|
||||
split = bio_alloc_clone(bio->bi_bdev, bio, gfp, bs);
|
||||
if (!split)
|
||||
return NULL;
|
||||
|
||||
@ -289,12 +282,14 @@ again:
|
||||
bio->bi_end_io(bio);
|
||||
}
|
||||
|
||||
void bio_reset(struct bio *bio)
|
||||
void bio_reset(struct bio *bio, struct block_device *bdev, unsigned int opf)
|
||||
{
|
||||
unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
|
||||
|
||||
memset(bio, 0, BIO_RESET_BYTES);
|
||||
bio->bi_flags = flags;
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_opf = opf;
|
||||
bio->bi_flags = flags;
|
||||
atomic_set(&bio->__bi_remaining, 1);
|
||||
}
|
||||
|
||||
@ -306,7 +301,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
|
||||
sizeof(struct bio_vec) * nr_iovecs, gfp_mask);
|
||||
if (unlikely(!bio))
|
||||
return NULL;
|
||||
bio_init(bio, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs);
|
||||
bio_init(bio, NULL, nr_iovecs ? bio->bi_inline_vecs : NULL, nr_iovecs, 0);
|
||||
bio->bi_pool = NULL;
|
||||
return bio;
|
||||
}
|
||||
@ -332,7 +327,11 @@ static struct bio_vec *bvec_alloc(mempool_t *pool, int *nr_vecs,
|
||||
return mempool_alloc(pool, gfp_mask);
|
||||
}
|
||||
|
||||
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
struct bio *bio_alloc_bioset(struct block_device *bdev,
|
||||
unsigned nr_iovecs,
|
||||
unsigned opf,
|
||||
gfp_t gfp_mask,
|
||||
struct bio_set *bs)
|
||||
{
|
||||
struct bio *bio;
|
||||
void *p;
|
||||
@ -352,11 +351,11 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
if (unlikely(!bvl))
|
||||
goto err_free;
|
||||
|
||||
bio_init(bio, bvl, nr_iovecs);
|
||||
bio_init(bio, bdev, bvl, nr_iovecs, opf);
|
||||
} else if (nr_iovecs) {
|
||||
bio_init(bio, bio->bi_inline_vecs, BIO_INLINE_VECS);
|
||||
bio_init(bio, bdev, bio->bi_inline_vecs, BIO_INLINE_VECS, opf);
|
||||
} else {
|
||||
bio_init(bio, NULL, 0);
|
||||
bio_init(bio, bdev, NULL, 0, opf);
|
||||
}
|
||||
|
||||
bio->bi_pool = bs;
|
||||
@ -367,38 +366,6 @@ err_free:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
|
||||
struct bio_set *bs)
|
||||
{
|
||||
struct bvec_iter iter;
|
||||
struct bio_vec bv;
|
||||
struct bio *bio;
|
||||
|
||||
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
|
||||
if (!bio)
|
||||
return NULL;
|
||||
|
||||
bio->bi_bdev = bio_src->bi_bdev;
|
||||
bio->bi_opf = bio_src->bi_opf;
|
||||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
|
||||
break;
|
||||
default:
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
break;
|
||||
}
|
||||
|
||||
return bio;
|
||||
}
|
||||
|
||||
void bioset_exit(struct bio_set *bs)
|
||||
{
|
||||
mempool_exit(&bs->bio_pool);
|
||||
|
Loading…
Reference in New Issue
Block a user