mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
Update bcachefs sources to 1a739db0b256 bcachefs; guard against overflow in btree node split
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
b7453196fe
commit
f76d246fff
@ -1 +1 @@
|
||||
841a95c29f4caefb9c3875466024f3549f45f842
|
||||
1a739db0b256dc56d4e9fdc33a11d0728d7672d2
|
||||
|
10
cmd_device.c
10
cmd_device.c
@ -530,10 +530,9 @@ int cmd_device_resize(int argc, char *argv[])
|
||||
if (IS_ERR(c))
|
||||
die("error opening %s: %s", dev, bch2_err_str(PTR_ERR(c)));
|
||||
|
||||
struct bch_dev *ca, *resize = NULL;
|
||||
unsigned i;
|
||||
struct bch_dev *resize = NULL;
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
if (resize)
|
||||
die("confused: more than one online device?");
|
||||
resize = ca;
|
||||
@ -628,10 +627,9 @@ int cmd_device_resize_journal(int argc, char *argv[])
|
||||
if (IS_ERR(c))
|
||||
die("error opening %s: %s", dev, bch2_err_str(PTR_ERR(c)));
|
||||
|
||||
struct bch_dev *ca, *resize = NULL;
|
||||
unsigned i;
|
||||
struct bch_dev *resize = NULL;
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
if (resize)
|
||||
die("confused: more than one online device?");
|
||||
resize = ca;
|
||||
|
12
cmd_dump.c
12
cmd_dump.c
@ -114,9 +114,8 @@ int cmd_dump(int argc, char *argv[])
|
||||
{ NULL }
|
||||
};
|
||||
struct bch_opts opts = bch2_opts_empty();
|
||||
struct bch_dev *ca;
|
||||
char *out = NULL;
|
||||
unsigned i, nr_devices = 0;
|
||||
unsigned nr_devices = 0;
|
||||
bool force = false, entire_journal = true;
|
||||
int fd, opt;
|
||||
|
||||
@ -160,22 +159,19 @@ int cmd_dump(int argc, char *argv[])
|
||||
|
||||
down_read(&c->gc_lock);
|
||||
|
||||
for_each_online_member(ca, c, i)
|
||||
for_each_online_member(c, ca)
|
||||
nr_devices++;
|
||||
|
||||
BUG_ON(!nr_devices);
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
int flags = O_WRONLY|O_CREAT|O_TRUNC;
|
||||
|
||||
if (!force)
|
||||
flags |= O_EXCL;
|
||||
|
||||
if (!c->devs[i])
|
||||
continue;
|
||||
|
||||
char *path = nr_devices > 1
|
||||
? mprintf("%s.%u.qcow2", out, i)
|
||||
? mprintf("%s.%u.qcow2", out, ca->dev_idx)
|
||||
: mprintf("%s.qcow2", out);
|
||||
fd = xopen(path, flags, 0600);
|
||||
free(path);
|
||||
|
@ -117,7 +117,7 @@ int cmd_format(int argc, char *argv[])
|
||||
DARRAY(struct dev_opts) devices = { 0 };
|
||||
DARRAY(char *) device_paths = { 0 };
|
||||
struct format_opts opts = format_opts_default();
|
||||
struct dev_opts dev_opts = dev_opts_default(), *dev;
|
||||
struct dev_opts dev_opts = dev_opts_default();
|
||||
bool force = false, no_passphrase = false, quiet = false, initialize = true, verbose = false;
|
||||
bool unconsumed_dev_option = false;
|
||||
unsigned v;
|
||||
|
4
cmd_fs.c
4
cmd_fs.c
@ -119,12 +119,9 @@ static int dev_by_label_cmp(const void *_l, const void *_r)
|
||||
|
||||
static struct dev_name *dev_idx_to_name(dev_names *dev_names, unsigned idx)
|
||||
{
|
||||
struct dev_name *dev;
|
||||
|
||||
darray_for_each(*dev_names, dev)
|
||||
if (dev->idx == idx)
|
||||
return dev;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -185,7 +182,6 @@ static void fs_usage_to_text(struct printbuf *out, const char *path)
|
||||
|
||||
struct bchfs_handle fs = bcache_fs_open(path);
|
||||
|
||||
struct dev_name *dev;
|
||||
dev_names dev_names = bchu_fs_get_devices(fs);
|
||||
|
||||
struct bch_ioctl_fs_usage *u = bchu_fs_usage(fs);
|
||||
|
@ -55,8 +55,6 @@ typedef DARRAY(enum btree_id) d_btree_id;
|
||||
|
||||
static bool bkey_matches_filter(d_bbpos filter, struct jset_entry *entry, struct bkey_i *k)
|
||||
{
|
||||
struct bbpos *i;
|
||||
|
||||
darray_for_each(filter, i) {
|
||||
if (i->btree != entry->btree_id)
|
||||
continue;
|
||||
@ -106,7 +104,6 @@ static bool should_print_transaction(struct jset_entry *entry, struct jset_entry
|
||||
static bool should_print_entry(struct jset_entry *entry, d_btree_id filter)
|
||||
{
|
||||
struct bkey_i *k;
|
||||
enum btree_id *id;
|
||||
|
||||
if (!filter.nr)
|
||||
return true;
|
||||
|
@ -612,8 +612,6 @@ static void find_superblock_space(ranges extents,
|
||||
struct format_opts opts,
|
||||
struct dev_opts *dev)
|
||||
{
|
||||
struct range *i;
|
||||
|
||||
darray_for_each(extents, i) {
|
||||
u64 start = round_up(max(256ULL << 10, i->start),
|
||||
dev->bucket_size << 9);
|
||||
|
@ -534,14 +534,8 @@ void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bke
|
||||
int bch2_bucket_gens_init(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bch_alloc_v4 a;
|
||||
struct bkey_i_bucket_gens g;
|
||||
bool have_bucket_gens_key = false;
|
||||
unsigned offset;
|
||||
struct bpos pos;
|
||||
u8 gen;
|
||||
int ret;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
@ -553,8 +547,10 @@ int bch2_bucket_gens_init(struct bch_fs *c)
|
||||
if (!bch2_dev_bucket_exists(c, k.k->p))
|
||||
continue;
|
||||
|
||||
gen = bch2_alloc_to_v4(k, &a)->gen;
|
||||
pos = alloc_gens_pos(iter.pos, &offset);
|
||||
struct bch_alloc_v4 a;
|
||||
u8 gen = bch2_alloc_to_v4(k, &a)->gen;
|
||||
unsigned offset;
|
||||
struct bpos pos = alloc_gens_pos(iter.pos, &offset);
|
||||
|
||||
if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
|
||||
ret = commit_do(trans, NULL, NULL,
|
||||
@ -589,17 +585,11 @@ int bch2_bucket_gens_init(struct bch_fs *c)
|
||||
int bch2_alloc_read(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bch_dev *ca;
|
||||
int ret;
|
||||
|
||||
down_read(&c->gc_lock);
|
||||
|
||||
if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
|
||||
const struct bch_bucket_gens *g;
|
||||
u64 b;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
|
||||
@ -608,7 +598,7 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
if (k.k->type != KEY_TYPE_bucket_gens)
|
||||
continue;
|
||||
|
||||
g = bkey_s_c_to_bucket_gens(k).v;
|
||||
const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
|
||||
|
||||
/*
|
||||
* Not a fsck error because this is checked/repaired by
|
||||
@ -617,17 +607,15 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
if (!bch2_dev_exists2(c, k.k->p.inode))
|
||||
continue;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
|
||||
for (b = max_t(u64, ca->mi.first_bucket, start);
|
||||
for (u64 b = max_t(u64, ca->mi.first_bucket, start);
|
||||
b < min_t(u64, ca->mi.nbuckets, end);
|
||||
b++)
|
||||
*bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
|
||||
0;
|
||||
}));
|
||||
} else {
|
||||
struct bch_alloc_v4 a;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
/*
|
||||
@ -637,8 +625,9 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
if (!bch2_dev_bucket_exists(c, k.k->p))
|
||||
continue;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
|
||||
struct bch_alloc_v4 a;
|
||||
*bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
|
||||
0;
|
||||
}));
|
||||
@ -903,7 +892,6 @@ static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos
|
||||
static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned iter;
|
||||
|
||||
if (bch2_dev_bucket_exists(c, *bucket))
|
||||
return true;
|
||||
@ -921,8 +909,7 @@ static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
iter = bucket->inode;
|
||||
ca = __bch2_next_dev(c, &iter, NULL);
|
||||
ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
|
||||
if (ca)
|
||||
*bucket = POS(ca->dev_idx, ca->mi.first_bucket);
|
||||
rcu_read_unlock();
|
||||
@ -1471,7 +1458,6 @@ bkey_err:
|
||||
bch2_check_bucket_gens_key(trans, &iter, k));
|
||||
err:
|
||||
bch2_trans_put(trans);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1551,9 +1537,6 @@ fsck_err:
|
||||
|
||||
int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
@ -1682,8 +1665,6 @@ out:
|
||||
static void bch2_do_discards_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
u64 seen = 0, open = 0, need_journal_commit = 0, discarded = 0;
|
||||
struct bpos discard_pos_done = POS_MAX;
|
||||
int ret;
|
||||
@ -1805,22 +1786,18 @@ err:
|
||||
static void bch2_do_invalidates_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
|
||||
struct bch_dev *ca;
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_btree_write_buffer_tryflush(trans);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
s64 nr_to_invalidate =
|
||||
should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
|
||||
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
|
||||
lru_pos(ca->dev_idx, 0, 0),
|
||||
lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
|
||||
BTREE_ITER_INTENT, k,
|
||||
@ -1945,8 +1922,6 @@ bkey_err:
|
||||
|
||||
int bch2_fs_freespace_init(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
bool doing_init = false;
|
||||
|
||||
@ -1955,7 +1930,7 @@ int bch2_fs_freespace_init(struct bch_fs *c)
|
||||
* every mount:
|
||||
*/
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
if (ca->mi.freespace_initialized)
|
||||
continue;
|
||||
|
||||
@ -2015,15 +1990,13 @@ out:
|
||||
|
||||
void bch2_recalc_capacity(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
u64 capacity = 0, reserved_sectors = 0, gc_reserve;
|
||||
unsigned bucket_size_max = 0;
|
||||
unsigned long ra_pages = 0;
|
||||
unsigned i;
|
||||
|
||||
lockdep_assert_held(&c->state_lock);
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
|
||||
|
||||
ra_pages += bdi->ra_pages;
|
||||
@ -2031,7 +2004,7 @@ void bch2_recalc_capacity(struct bch_fs *c)
|
||||
|
||||
bch2_set_ra_pages(c, ra_pages);
|
||||
|
||||
for_each_rw_member(ca, c, i) {
|
||||
for_each_rw_member(c, ca) {
|
||||
u64 dev_reserve = 0;
|
||||
|
||||
/*
|
||||
@ -2087,11 +2060,9 @@ void bch2_recalc_capacity(struct bch_fs *c)
|
||||
|
||||
u64 bch2_min_rw_member_capacity(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
u64 ret = U64_MAX;
|
||||
|
||||
for_each_rw_member(ca, c, i)
|
||||
for_each_rw_member(c, ca)
|
||||
ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
|
||||
return ret;
|
||||
}
|
||||
|
@ -69,11 +69,8 @@ const char * const bch2_watermarks[] = {
|
||||
|
||||
void bch2_reset_alloc_cursors(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(ca, c, i, NULL)
|
||||
for_each_member_device_rcu(c, ca, NULL)
|
||||
ca->alloc_cursor = 0;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -391,16 +391,11 @@ fsck_err:
|
||||
/* verify that every backpointer has a corresponding alloc key */
|
||||
int bch2_check_btree_backpointers(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_backpointers, POS_MIN, 0, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_check_btree_backpointer(trans, &iter, k)));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -769,7 +764,6 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
|
||||
}
|
||||
bch2_trans_put(trans);
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -824,8 +818,6 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
|
||||
struct bbpos start,
|
||||
struct bbpos end)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bpos last_flushed_pos = SPOS_MAX;
|
||||
|
||||
return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
|
||||
@ -877,7 +869,6 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c)
|
||||
}
|
||||
bch2_trans_put(trans);
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -315,15 +315,20 @@ do { \
|
||||
#define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
|
||||
bch2_print_ratelimited(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
|
||||
|
||||
static inline bool should_print_err(int err)
|
||||
{
|
||||
return err && !bch2_err_matches(err, BCH_ERR_transaction_restart);
|
||||
}
|
||||
|
||||
#define bch_err_fn(_c, _ret) \
|
||||
do { \
|
||||
if (_ret && !bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
|
||||
if (should_print_err(_ret)) \
|
||||
bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
|
||||
} while (0)
|
||||
|
||||
#define bch_err_msg(_c, _ret, _msg, ...) \
|
||||
do { \
|
||||
if (_ret && !bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
|
||||
if (should_print_err(_ret)) \
|
||||
bch_err(_c, "%s(): error " _msg " %s", __func__, \
|
||||
##__VA_ARGS__, bch2_err_str(_ret)); \
|
||||
} while (0)
|
||||
|
@ -414,10 +414,9 @@ again:
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "getting btree node");
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = btree_repair_node_boundaries(c, b, prev, cur);
|
||||
|
||||
@ -482,10 +481,9 @@ again:
|
||||
false);
|
||||
ret = PTR_ERR_OR_ZERO(cur);
|
||||
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "getting btree node");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_btree_repair_topology_recurse(trans, cur);
|
||||
six_unlock_read(&cur->c.lock);
|
||||
@ -707,8 +705,8 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
|
||||
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
|
||||
if (!new) {
|
||||
bch_err_msg(c, ret, "allocating new key");
|
||||
ret = -BCH_ERR_ENOMEM_gc_repair_key;
|
||||
bch_err_msg(c, ret, "allocating new key");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -834,7 +832,6 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
|
||||
bch2_mark_key(trans, btree_id, level, old, *k, flags));
|
||||
fsck_err:
|
||||
err:
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1068,7 +1065,6 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
|
||||
fsck_err:
|
||||
six_unlock_read(&b->c.lock);
|
||||
|
||||
if (ret < 0)
|
||||
bch_err_fn(c, ret);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
@ -1105,10 +1101,8 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
|
||||
: bch2_gc_btree(trans, i, initial, metadata_only);
|
||||
}
|
||||
|
||||
if (ret < 0)
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
bch2_trans_put(trans);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1159,13 +1153,10 @@ static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
|
||||
|
||||
static void bch2_mark_superblocks(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
gc_pos_set(c, gc_phase(GC_PHASE_SB));
|
||||
|
||||
for_each_online_member(ca, c, i)
|
||||
for_each_online_member(c, ca)
|
||||
bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
}
|
||||
@ -1190,13 +1181,10 @@ static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
|
||||
|
||||
static void bch2_gc_free(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
genradix_free(&c->reflink_gc_table);
|
||||
genradix_free(&c->gc_stripes);
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
kvpfree(rcu_dereference_protected(ca->buckets_gc, 1),
|
||||
sizeof(struct bucket_array) +
|
||||
ca->mi.nbuckets * sizeof(struct bucket));
|
||||
@ -1218,7 +1206,7 @@ static int bch2_gc_done(struct bch_fs *c,
|
||||
bool verify = !metadata_only &&
|
||||
!c->opts.reconstruct_alloc &&
|
||||
(!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
|
||||
unsigned i, dev;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
percpu_down_write(&c->mark_lock);
|
||||
@ -1230,14 +1218,14 @@ static int bch2_gc_done(struct bch_fs *c,
|
||||
, ##__VA_ARGS__, dst->_f, src->_f))) \
|
||||
dst->_f = src->_f
|
||||
#define copy_dev_field(_err, _f, _msg, ...) \
|
||||
copy_field(_err, _f, "dev %u has wrong " _msg, dev, ##__VA_ARGS__)
|
||||
copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
|
||||
#define copy_fs_field(_err, _f, _msg, ...) \
|
||||
copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
|
||||
bch2_fs_usage_acc_to_base(c, i);
|
||||
|
||||
for_each_member_device(ca, c, dev) {
|
||||
__for_each_member_device(c, ca) {
|
||||
struct bch_dev_usage *dst = ca->usage_base;
|
||||
struct bch_dev_usage *src = (void *)
|
||||
bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
|
||||
@ -1304,7 +1292,6 @@ static int bch2_gc_done(struct bch_fs *c,
|
||||
fsck_err:
|
||||
if (ca)
|
||||
percpu_ref_put(&ca->ref);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
percpu_up_write(&c->mark_lock);
|
||||
@ -1314,9 +1301,6 @@ fsck_err:
|
||||
|
||||
static int bch2_gc_start(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca = NULL;
|
||||
unsigned i;
|
||||
|
||||
BUG_ON(c->usage_gc);
|
||||
|
||||
c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
|
||||
@ -1326,7 +1310,7 @@ static int bch2_gc_start(struct bch_fs *c)
|
||||
return -BCH_ERR_ENOMEM_gc_start;
|
||||
}
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
BUG_ON(ca->usage_gc);
|
||||
|
||||
ca->usage_gc = alloc_percpu(struct bch_dev_usage);
|
||||
@ -1345,10 +1329,7 @@ static int bch2_gc_start(struct bch_fs *c)
|
||||
|
||||
static int bch2_gc_reset(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
free_percpu(ca->usage_gc);
|
||||
ca->usage_gc = NULL;
|
||||
}
|
||||
@ -1386,9 +1367,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
enum bch_data_type type;
|
||||
int ret;
|
||||
|
||||
if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
|
||||
return 1;
|
||||
|
||||
old = bch2_alloc_to_v4(k, &old_convert);
|
||||
new = *old;
|
||||
|
||||
@ -1485,52 +1463,36 @@ fsck_err:
|
||||
|
||||
static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
|
||||
for_each_member_device(c, ca) {
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS(ca->dev_idx, ca->mi.first_bucket),
|
||||
POS(ca->dev_idx, ca->mi.nbuckets - 1),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
|
||||
bch2_alloc_write_key(trans, &iter, k, metadata_only));
|
||||
|
||||
if (ret < 0) {
|
||||
bch_err_fn(c, ret);
|
||||
bch2_alloc_write_key(trans, &iter, k, metadata_only)));
|
||||
if (ret) {
|
||||
percpu_ref_put(&ca->ref);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_trans_put(trans);
|
||||
return ret < 0 ? ret : 0;
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bucket *g;
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
struct bucket_array *buckets = kvpmalloc(sizeof(struct bucket_array) +
|
||||
ca->mi.nbuckets * sizeof(struct bucket),
|
||||
GFP_KERNEL|__GFP_ZERO);
|
||||
if (!buckets) {
|
||||
percpu_ref_put(&ca->ref);
|
||||
bch_err(c, "error allocating ca->buckets[gc]");
|
||||
ret = -BCH_ERR_ENOMEM_gc_alloc_start;
|
||||
goto err;
|
||||
return -BCH_ERR_ENOMEM_gc_alloc_start;
|
||||
}
|
||||
|
||||
buckets->first_bucket = ca->mi.first_bucket;
|
||||
@ -1538,12 +1500,14 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
|
||||
rcu_assign_pointer(ca->buckets_gc, buckets);
|
||||
}
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
g = gc_bucket(ca, k.k->p.offset);
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bucket *g = gc_bucket(ca, k.k->p.offset);
|
||||
|
||||
a = bch2_alloc_to_v4(k, &a_convert);
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
|
||||
|
||||
g->gen_valid = 1;
|
||||
g->gen = a->gen;
|
||||
@ -1560,20 +1524,14 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
|
||||
}
|
||||
|
||||
0;
|
||||
}));
|
||||
err:
|
||||
bch2_trans_put(trans);
|
||||
if (ret)
|
||||
})));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
struct bucket_array *buckets = gc_bucket_array(ca);
|
||||
struct bucket *g;
|
||||
|
||||
@ -1640,42 +1598,31 @@ fsck_err:
|
||||
|
||||
static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
|
||||
{
|
||||
struct btree_trans *trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
size_t idx = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (metadata_only)
|
||||
return 0;
|
||||
|
||||
trans = bch2_trans_get(c);
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_reflink, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_gc_write_reflink_key(trans, &iter, k, &idx));
|
||||
|
||||
bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
|
||||
c->reflink_gc_nr = 0;
|
||||
bch2_trans_put(trans);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_reflink_start(struct bch_fs *c,
|
||||
bool metadata_only)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct reflink_gc *r;
|
||||
int ret = 0;
|
||||
|
||||
if (metadata_only)
|
||||
return 0;
|
||||
|
||||
c->reflink_gc_nr = 0;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
const __le64 *refcount = bkey_refcount_c(k);
|
||||
@ -1683,8 +1630,8 @@ static int bch2_gc_reflink_start(struct bch_fs *c,
|
||||
if (!refcount)
|
||||
continue;
|
||||
|
||||
r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
|
||||
GFP_KERNEL);
|
||||
struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table,
|
||||
c->reflink_gc_nr++, GFP_KERNEL);
|
||||
if (!r) {
|
||||
ret = -BCH_ERR_ENOMEM_gc_reflink_start;
|
||||
break;
|
||||
@ -1764,24 +1711,15 @@ fsck_err:
|
||||
|
||||
static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
|
||||
{
|
||||
struct btree_trans *trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
if (metadata_only)
|
||||
return 0;
|
||||
|
||||
trans = bch2_trans_get(c);
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter,
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_stripes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_gc_write_stripes_key(trans, &iter, k));
|
||||
|
||||
bch2_trans_put(trans);
|
||||
return ret;
|
||||
bch2_gc_write_stripes_key(trans, &iter, k)));
|
||||
}
|
||||
|
||||
static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
|
||||
@ -1896,8 +1834,6 @@ out:
|
||||
* allocator thread - issue wakeup in case they blocked on gc_lock:
|
||||
*/
|
||||
closure_wake_up(&c->freelist_wait);
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1966,12 +1902,7 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
|
||||
|
||||
int bch2_gc_gens(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans *trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bch_dev *ca;
|
||||
u64 b, start_time = local_clock();
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
@ -1984,9 +1915,8 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
|
||||
trace_and_count(c, gc_gens_start, c);
|
||||
down_read(&c->gc_lock);
|
||||
trans = bch2_trans_get(c);
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
struct bucket_gens *gens = bucket_gens(ca);
|
||||
|
||||
BUG_ON(ca->oldest_gen);
|
||||
@ -2003,33 +1933,31 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
ca->oldest_gen[b] = gens->b[b];
|
||||
}
|
||||
|
||||
for (i = 0; i < BTREE_ID_NR; i++)
|
||||
for (unsigned i = 0; i < BTREE_ID_NR; i++)
|
||||
if (btree_type_has_ptrs(i)) {
|
||||
c->gc_gens_btree = i;
|
||||
c->gc_gens_pos = POS_MIN;
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter, i,
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, i,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
gc_btree_gens_key(trans, &iter, k));
|
||||
if (ret && !bch2_err_matches(ret, EROFS))
|
||||
bch_err_fn(c, ret);
|
||||
gc_btree_gens_key(trans, &iter, k)));
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_alloc_write_oldest_gen(trans, &iter, k));
|
||||
if (ret && !bch2_err_matches(ret, EROFS))
|
||||
bch_err_fn(c, ret);
|
||||
bch2_alloc_write_oldest_gen(trans, &iter, k)));
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -2041,14 +1969,15 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
|
||||
trace_and_count(c, gc_gens_end, c);
|
||||
err:
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
kvfree(ca->oldest_gen);
|
||||
ca->oldest_gen = NULL;
|
||||
}
|
||||
|
||||
bch2_trans_put(trans);
|
||||
up_read(&c->gc_lock);
|
||||
mutex_unlock(&c->gc_gens_lock);
|
||||
if (!bch2_err_matches(ret, EROFS))
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2100,9 +2029,6 @@ static int bch2_gc_thread(void *arg)
|
||||
#else
|
||||
ret = bch2_gc_gens(c);
|
||||
#endif
|
||||
if (ret < 0)
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
debug_check_no_locks_held();
|
||||
}
|
||||
|
||||
|
@ -652,7 +652,6 @@ void bch2_btree_path_level_init(struct btree_trans *trans,
|
||||
static void bch2_trans_revalidate_updates_in_node(struct btree_trans *trans, struct btree *b)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i;
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
if (!i->cached &&
|
||||
@ -1374,8 +1373,6 @@ void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
|
||||
noinline __cold
|
||||
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
|
||||
{
|
||||
struct btree_insert_entry *i;
|
||||
|
||||
prt_printf(buf, "transaction updates for %s journal seq %llu",
|
||||
trans->fn, trans->journal_res.seq);
|
||||
prt_newline(buf);
|
||||
@ -1881,7 +1878,6 @@ static noinline
|
||||
struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
|
||||
{
|
||||
struct btree_trans *trans = iter->trans;
|
||||
struct btree_insert_entry *i;
|
||||
struct bkey_i *ret = NULL;
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
@ -2322,7 +2318,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
goto got_key;
|
||||
}
|
||||
|
||||
if (bch2_snapshot_is_ancestor(iter->trans->c,
|
||||
if (bch2_snapshot_is_ancestor(trans->c,
|
||||
iter->snapshot,
|
||||
k.k->p.snapshot)) {
|
||||
if (saved_path)
|
||||
@ -3015,7 +3011,6 @@ leaked:
|
||||
void bch2_trans_put(struct btree_trans *trans)
|
||||
__releases(&c->btree_trans_barrier)
|
||||
{
|
||||
struct btree_insert_entry *i;
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
|
@ -648,38 +648,6 @@ static inline int btree_trans_too_many_iters(struct btree_trans *trans)
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
|
||||
|
||||
static inline struct bkey_s_c
|
||||
__bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
|
||||
struct btree_iter *iter, unsigned flags)
|
||||
{
|
||||
struct bkey_s_c k;
|
||||
|
||||
while (btree_trans_too_many_iters(trans) ||
|
||||
(k = bch2_btree_iter_peek_type(iter, flags),
|
||||
bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
static inline struct bkey_s_c
|
||||
__bch2_btree_iter_peek_upto_and_restart(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bpos end,
|
||||
unsigned flags)
|
||||
{
|
||||
struct bkey_s_c k;
|
||||
|
||||
while (btree_trans_too_many_iters(trans) ||
|
||||
(k = bch2_btree_iter_peek_upto_type(iter, end, flags),
|
||||
bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
/*
|
||||
* goto instead of loop, so that when used inside for_each_btree_key2()
|
||||
* break/continue work correctly
|
||||
@ -726,9 +694,11 @@ transaction_restart: \
|
||||
_ret2 ?: trans_was_restarted(_trans, _restart_count); \
|
||||
})
|
||||
|
||||
#define for_each_btree_key2_upto(_trans, _iter, _btree_id, \
|
||||
#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
|
||||
_start, _end, _flags, _k, _do) \
|
||||
({ \
|
||||
struct btree_iter _iter; \
|
||||
struct bkey_s_c _k; \
|
||||
int _ret3 = 0; \
|
||||
\
|
||||
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
@ -751,12 +721,14 @@ transaction_restart: \
|
||||
|
||||
#define for_each_btree_key(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _do) \
|
||||
for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, \
|
||||
for_each_btree_key_upto(_trans, _iter, _btree_id, _start, \
|
||||
SPOS_MAX, _flags, _k, _do)
|
||||
|
||||
#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _do) \
|
||||
({ \
|
||||
struct btree_iter _iter; \
|
||||
struct bkey_s_c _k; \
|
||||
int _ret3 = 0; \
|
||||
\
|
||||
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
@ -797,10 +769,26 @@ transaction_restart: \
|
||||
_start, _end, _iter_flags, _k, \
|
||||
_disk_res, _journal_seq, _commit_flags,\
|
||||
_do) \
|
||||
for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
|
||||
for_each_btree_key_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
|
||||
(_do) ?: bch2_trans_commit(_trans, (_disk_res),\
|
||||
(_journal_seq), (_commit_flags)))
|
||||
|
||||
struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *);
|
||||
|
||||
static inline struct bkey_s_c
|
||||
__bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
|
||||
struct btree_iter *iter, unsigned flags)
|
||||
{
|
||||
struct bkey_s_c k;
|
||||
|
||||
while (btree_trans_too_many_iters(trans) ||
|
||||
(k = bch2_btree_iter_peek_type(iter, flags),
|
||||
bch2_err_matches(bkey_err(k), BCH_ERR_transaction_restart)))
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
#define for_each_btree_key_old(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _ret) \
|
||||
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
@ -809,23 +797,6 @@ transaction_restart: \
|
||||
!((_ret) = bkey_err(_k)) && (_k).k; \
|
||||
bch2_btree_iter_advance(&(_iter)))
|
||||
|
||||
#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
|
||||
_start, _end, _flags, _k, _ret) \
|
||||
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
(_start), (_flags)); \
|
||||
(_k) = __bch2_btree_iter_peek_upto_and_restart((_trans), \
|
||||
&(_iter), _end, _flags),\
|
||||
!((_ret) = bkey_err(_k)) && (_k).k; \
|
||||
bch2_btree_iter_advance(&(_iter)))
|
||||
|
||||
#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _ret) \
|
||||
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
(_start), (_flags)); \
|
||||
(_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
|
||||
!((_ret) = bkey_err(_k)) && (_k).k; \
|
||||
bch2_btree_iter_advance(&(_iter)))
|
||||
|
||||
#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
|
||||
_start, _end, _flags, _k, _ret) \
|
||||
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
@ -834,24 +805,20 @@ transaction_restart: \
|
||||
!((_ret) = bkey_err(_k)) && (_k).k; \
|
||||
bch2_btree_iter_advance(&(_iter)))
|
||||
|
||||
#define for_each_btree_key_continue(_trans, _iter, _flags, _k, _ret) \
|
||||
for (; \
|
||||
(_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
|
||||
!((_ret) = bkey_err(_k)) && (_k).k; \
|
||||
bch2_btree_iter_advance(&(_iter)))
|
||||
|
||||
#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
|
||||
for (; \
|
||||
(_k) = bch2_btree_iter_peek_type(&(_iter), _flags), \
|
||||
!((_ret) = bkey_err(_k)) && (_k).k; \
|
||||
bch2_btree_iter_advance(&(_iter)))
|
||||
|
||||
#define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
|
||||
for (; \
|
||||
(_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
|
||||
!((_ret) = bkey_err(_k)) && (_k).k; \
|
||||
bch2_btree_iter_advance(&(_iter)))
|
||||
|
||||
#define for_each_btree_key_norestart(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _ret) \
|
||||
for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, _start,\
|
||||
SPOS_MAX, _flags, _k, _ret)
|
||||
|
||||
#define for_each_btree_key_continue_norestart(_iter, _flags, _k, _ret) \
|
||||
for_each_btree_key_upto_continue_norestart(_iter, SPOS_MAX, _flags, _k, _ret)
|
||||
|
||||
#define drop_locks_do(_trans, _do) \
|
||||
({ \
|
||||
bch2_trans_unlock(_trans); \
|
||||
@ -883,8 +850,6 @@ transaction_restart: \
|
||||
_p; \
|
||||
})
|
||||
|
||||
/* new multiple iterator interface: */
|
||||
|
||||
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
|
||||
void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
|
||||
void bch2_dump_trans_updates(struct btree_trans *);
|
||||
|
@ -93,8 +93,6 @@ static noinline int trans_lock_write_fail(struct btree_trans *trans, struct btre
|
||||
|
||||
static inline int bch2_trans_lock_write(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_insert_entry *i;
|
||||
|
||||
EBUG_ON(trans->write_locked);
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
@ -115,8 +113,6 @@ static inline int bch2_trans_lock_write(struct btree_trans *trans)
|
||||
static inline void bch2_trans_unlock_write(struct btree_trans *trans)
|
||||
{
|
||||
if (likely(trans->write_locked)) {
|
||||
struct btree_insert_entry *i;
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
if (!same_leaf_as_prev(trans, i))
|
||||
bch2_btree_node_unlock_write_inlined(trans,
|
||||
@ -363,7 +359,6 @@ noinline static int
|
||||
btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags,
|
||||
struct btree_path *path, unsigned new_u64s)
|
||||
{
|
||||
struct btree_insert_entry *i;
|
||||
struct bkey_cached *ck = (void *) path->l[0].b;
|
||||
struct bkey_i *new_k;
|
||||
int ret;
|
||||
@ -402,7 +397,6 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_cached *ck = (void *) path->l[0].b;
|
||||
struct btree_insert_entry *i;
|
||||
unsigned new_u64s;
|
||||
struct bkey_i *new_k;
|
||||
|
||||
@ -550,7 +544,7 @@ static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
|
||||
|
||||
static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_insert_entry *i = NULL, *btree_id_start = trans->updates;
|
||||
struct btree_insert_entry *btree_id_start = trans->updates;
|
||||
unsigned btree_id = 0;
|
||||
int ret = 0;
|
||||
|
||||
@ -597,7 +591,6 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
|
||||
static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i;
|
||||
int ret = 0;
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
@ -623,7 +616,6 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
||||
unsigned long trace_ip)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i;
|
||||
struct btree_trans_commit_hook *h;
|
||||
unsigned u64s = 0;
|
||||
int ret;
|
||||
@ -777,8 +769,6 @@ revert_fs_usage:
|
||||
|
||||
static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_insert_entry *i;
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
|
||||
}
|
||||
@ -822,7 +812,6 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
|
||||
unsigned long trace_ip)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i;
|
||||
int ret = 0, u64s_delta = 0;
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
@ -967,7 +956,6 @@ static noinline int
|
||||
do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i;
|
||||
int ret = 0;
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
@ -981,8 +969,8 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
|
||||
|
||||
int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
|
||||
{
|
||||
struct btree_insert_entry *errored_at = NULL;
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i = NULL;
|
||||
int ret = 0;
|
||||
|
||||
if (!trans->nr_updates &&
|
||||
@ -1063,11 +1051,12 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
|
||||
goto err;
|
||||
}
|
||||
retry:
|
||||
errored_at = NULL;
|
||||
bch2_trans_verify_not_in_restart(trans);
|
||||
if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res)))
|
||||
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
|
||||
|
||||
ret = do_bch2_trans_commit(trans, flags, &i, _RET_IP_);
|
||||
ret = do_bch2_trans_commit(trans, flags, &errored_at, _RET_IP_);
|
||||
|
||||
/* make sure we didn't drop or screw up locks: */
|
||||
bch2_trans_verify_locks(trans);
|
||||
@ -1086,7 +1075,7 @@ out_reset:
|
||||
|
||||
return ret;
|
||||
err:
|
||||
ret = bch2_trans_commit_error(trans, flags, i, ret, _RET_IP_);
|
||||
ret = bch2_trans_commit_error(trans, flags, errored_at, ret, _RET_IP_);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -409,7 +409,7 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
|
||||
* Pending updates are kept sorted: first, find position of new update,
|
||||
* then delete/trim any updates the new update overwrites:
|
||||
*/
|
||||
trans_for_each_update(trans, i) {
|
||||
for (i = trans->updates; i < trans->updates + trans->nr_updates; i++) {
|
||||
cmp = btree_insert_entry_cmp(&n, i);
|
||||
if (cmp <= 0)
|
||||
break;
|
||||
|
@ -187,14 +187,12 @@ static inline int bch2_trans_commit(struct btree_trans *trans,
|
||||
bch2_trans_run(_c, commit_do(trans, _disk_res, _journal_seq, _flags, _do))
|
||||
|
||||
#define trans_for_each_update(_trans, _i) \
|
||||
for ((_i) = (_trans)->updates; \
|
||||
for (struct btree_insert_entry *_i = (_trans)->updates; \
|
||||
(_i) < (_trans)->updates + (_trans)->nr_updates; \
|
||||
(_i)++)
|
||||
|
||||
static inline void bch2_trans_reset_updates(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_insert_entry *i;
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
bch2_path_put(trans, i->path, true);
|
||||
|
||||
|
@ -99,7 +99,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
|
||||
|
||||
/* Calculate ideal packed bkey format for new btree nodes: */
|
||||
|
||||
void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
|
||||
static void __bch2_btree_calc_format(struct bkey_format_state *s, struct btree *b)
|
||||
{
|
||||
struct bkey_packed *k;
|
||||
struct bset_tree *t;
|
||||
@ -125,21 +125,20 @@ static struct bkey_format bch2_btree_calc_format(struct btree *b)
|
||||
return bch2_bkey_format_done(&s);
|
||||
}
|
||||
|
||||
static size_t btree_node_u64s_with_format(struct btree *b,
|
||||
static size_t btree_node_u64s_with_format(struct btree_nr_keys nr,
|
||||
struct bkey_format *old_f,
|
||||
struct bkey_format *new_f)
|
||||
{
|
||||
struct bkey_format *old_f = &b->format;
|
||||
|
||||
/* stupid integer promotion rules */
|
||||
ssize_t delta =
|
||||
(((int) new_f->key_u64s - old_f->key_u64s) *
|
||||
(int) b->nr.packed_keys) +
|
||||
(int) nr.packed_keys) +
|
||||
(((int) new_f->key_u64s - BKEY_U64s) *
|
||||
(int) b->nr.unpacked_keys);
|
||||
(int) nr.unpacked_keys);
|
||||
|
||||
BUG_ON(delta + b->nr.live_u64s < 0);
|
||||
BUG_ON(delta + nr.live_u64s < 0);
|
||||
|
||||
return b->nr.live_u64s + delta;
|
||||
return nr.live_u64s + delta;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -153,10 +152,11 @@ static size_t btree_node_u64s_with_format(struct btree *b,
|
||||
*
|
||||
* Assumes all keys will successfully pack with the new format.
|
||||
*/
|
||||
bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
|
||||
static bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *b,
|
||||
struct btree_nr_keys nr,
|
||||
struct bkey_format *new_f)
|
||||
{
|
||||
size_t u64s = btree_node_u64s_with_format(b, new_f);
|
||||
size_t u64s = btree_node_u64s_with_format(nr, &b->format, new_f);
|
||||
|
||||
return __vstruct_bytes(struct btree_node, u64s) < btree_bytes(c);
|
||||
}
|
||||
@ -393,7 +393,7 @@ static struct btree *bch2_btree_node_alloc_replacement(struct btree_update *as,
|
||||
* The keys might expand with the new format - if they wouldn't fit in
|
||||
* the btree node anymore, use the old format for now:
|
||||
*/
|
||||
if (!bch2_btree_node_format_fits(as->c, b, &format))
|
||||
if (!bch2_btree_node_format_fits(as->c, b, b->nr, &format))
|
||||
format = b->format;
|
||||
|
||||
SET_BTREE_NODE_SEQ(n->data, BTREE_NODE_SEQ(b->data) + 1);
|
||||
@ -1353,8 +1353,11 @@ static void __btree_split_node(struct btree_update *as,
|
||||
struct bkey_packed *out[2];
|
||||
struct bkey uk;
|
||||
unsigned u64s, n1_u64s = (b->nr.live_u64s * 3) / 5;
|
||||
struct { unsigned nr_keys, val_u64s; } nr_keys[2];
|
||||
int i;
|
||||
|
||||
memset(&nr_keys, 0, sizeof(nr_keys));
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
BUG_ON(n[i]->nsets != 1);
|
||||
|
||||
@ -1376,6 +1379,9 @@ static void __btree_split_node(struct btree_update *as,
|
||||
if (!i)
|
||||
n1_pos = uk.p;
|
||||
bch2_bkey_format_add_key(&format[i], &uk);
|
||||
|
||||
nr_keys[i].nr_keys++;
|
||||
nr_keys[i].val_u64s += bkeyp_val_u64s(&b->format, k);
|
||||
}
|
||||
|
||||
btree_set_min(n[0], b->data->min_key);
|
||||
@ -1388,6 +1394,12 @@ static void __btree_split_node(struct btree_update *as,
|
||||
bch2_bkey_format_add_pos(&format[i], n[i]->data->max_key);
|
||||
|
||||
n[i]->data->format = bch2_bkey_format_done(&format[i]);
|
||||
|
||||
unsigned u64s = nr_keys[i].nr_keys * n[i]->data->format.key_u64s +
|
||||
nr_keys[i].val_u64s;
|
||||
if (__vstruct_bytes(struct btree_node, u64s) > btree_bytes(as->c))
|
||||
n[i]->data->format = b->format;
|
||||
|
||||
btree_node_set_format(n[i], n[i]->data->format);
|
||||
}
|
||||
|
||||
@ -1840,8 +1852,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
bch2_bkey_format_add_pos(&new_s, next->data->max_key);
|
||||
new_f = bch2_bkey_format_done(&new_s);
|
||||
|
||||
sib_u64s = btree_node_u64s_with_format(b, &new_f) +
|
||||
btree_node_u64s_with_format(m, &new_f);
|
||||
sib_u64s = btree_node_u64s_with_format(b->nr, &b->format, &new_f) +
|
||||
btree_node_u64s_with_format(m->nr, &m->format, &new_f);
|
||||
|
||||
if (sib_u64s > BTREE_FOREGROUND_MERGE_HYSTERESIS(c)) {
|
||||
sib_u64s -= BTREE_FOREGROUND_MERGE_HYSTERESIS(c);
|
||||
@ -2052,7 +2064,6 @@ static void async_btree_node_rewrite_work(struct work_struct *work)
|
||||
|
||||
ret = bch2_trans_do(c, NULL, NULL, 0,
|
||||
async_btree_node_rewrite_trans(trans, a));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_node_rewrite);
|
||||
kfree(a);
|
||||
@ -2091,8 +2102,8 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
|
||||
}
|
||||
|
||||
ret = bch2_fs_read_write_early(c);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "going read-write");
|
||||
if (ret) {
|
||||
kfree(a);
|
||||
return;
|
||||
}
|
||||
|
@ -6,10 +6,6 @@
|
||||
#include "btree_locking.h"
|
||||
#include "btree_update.h"
|
||||
|
||||
void __bch2_btree_calc_format(struct bkey_format_state *, struct btree *);
|
||||
bool bch2_btree_node_format_fits(struct bch_fs *c, struct btree *,
|
||||
struct bkey_format *);
|
||||
|
||||
#define BTREE_UPDATE_NODES_MAX ((BTREE_MAX_DEPTH - 2) * 2 + GC_MERGE_NODES)
|
||||
|
||||
#define BTREE_UPDATE_JOURNAL_RES (BTREE_UPDATE_NODES_MAX * (BKEY_BTREE_PTR_U64s_MAX + 1))
|
||||
|
@ -248,7 +248,6 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
|
||||
struct bch_fs *c = trans->c;
|
||||
struct journal *j = &c->journal;
|
||||
struct btree_write_buffer *wb = &c->btree_write_buffer;
|
||||
struct wb_key_ref *i;
|
||||
struct btree_iter iter = { NULL };
|
||||
size_t skipped = 0, fast = 0, slowpath = 0;
|
||||
bool write_locked = false;
|
||||
@ -359,7 +358,6 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
|
||||
*/
|
||||
trace_and_count(c, write_buffer_flush_slowpath, trans, slowpath, wb->flushing.keys.nr);
|
||||
|
||||
struct btree_write_buffered_key *i;
|
||||
darray_for_each(wb->flushing.keys, i) {
|
||||
if (!i->journal_seq)
|
||||
continue;
|
||||
|
@ -47,27 +47,23 @@ static inline void fs_usage_data_type_to_base(struct bch_fs_usage *fs_usage,
|
||||
|
||||
void bch2_fs_usage_initialize(struct bch_fs *c)
|
||||
{
|
||||
struct bch_fs_usage *usage;
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
percpu_down_write(&c->mark_lock);
|
||||
usage = c->usage_base;
|
||||
struct bch_fs_usage *usage = c->usage_base;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->usage); i++)
|
||||
bch2_fs_usage_acc_to_base(c, i);
|
||||
|
||||
for (i = 0; i < BCH_REPLICAS_MAX; i++)
|
||||
for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++)
|
||||
usage->reserved += usage->persistent_reserved[i];
|
||||
|
||||
for (i = 0; i < c->replicas.nr; i++) {
|
||||
for (unsigned i = 0; i < c->replicas.nr; i++) {
|
||||
struct bch_replicas_entry_v1 *e =
|
||||
cpu_replicas_entry(&c->replicas, i);
|
||||
|
||||
fs_usage_data_type_to_base(usage, e->data_type, usage->replicas[i]);
|
||||
}
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
struct bch_dev_usage dev = bch2_dev_usage_read(ca);
|
||||
|
||||
usage->hidden += (dev.d[BCH_DATA_sb].buckets +
|
||||
@ -158,8 +154,7 @@ retry:
|
||||
|
||||
void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i, u64s = fs_usage_u64s(c);
|
||||
unsigned u64s = fs_usage_u64s(c);
|
||||
|
||||
BUG_ON(idx >= ARRAY_SIZE(c->usage));
|
||||
|
||||
@ -171,7 +166,7 @@ void bch2_fs_usage_acc_to_base(struct bch_fs *c, unsigned idx)
|
||||
percpu_memset(c->usage[idx], 0, u64s * sizeof(u64));
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(ca, c, i, NULL) {
|
||||
for_each_member_device_rcu(c, ca, NULL) {
|
||||
u64s = dev_usage_u64s();
|
||||
|
||||
acc_u64s_percpu((u64 *) ca->usage_base,
|
||||
@ -1760,17 +1755,13 @@ int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_dev_sbs(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
int ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
if (ret) {
|
||||
percpu_ref_put(&ca->ref);
|
||||
|
@ -865,8 +865,6 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
|
||||
struct bch_ioctl_disk_get_idx arg)
|
||||
{
|
||||
dev_t dev = huge_decode_dev(arg.dev);
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
@ -874,10 +872,10 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
|
||||
if (!dev)
|
||||
return -EINVAL;
|
||||
|
||||
for_each_online_member(ca, c, i)
|
||||
for_each_online_member(c, ca)
|
||||
if (ca->dev == dev) {
|
||||
percpu_ref_put(&ca->io_ref);
|
||||
return i;
|
||||
return ca->dev_idx;
|
||||
}
|
||||
|
||||
return -BCH_ERR_ENOENT_dev_idx_not_found;
|
||||
|
@ -78,11 +78,14 @@ static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more,
|
||||
#define darray_remove_item(_d, _pos) \
|
||||
array_remove_item((_d)->data, (_d)->nr, (_pos) - (_d)->data)
|
||||
|
||||
#define __darray_for_each(_d, _i) \
|
||||
for ((_i) = (_d).data; _i < (_d).data + (_d).nr; _i++)
|
||||
|
||||
#define darray_for_each(_d, _i) \
|
||||
for (_i = (_d).data; _i < (_d).data + (_d).nr; _i++)
|
||||
for (typeof(&(_d).data[0]) _i = (_d).data; _i < (_d).data + (_d).nr; _i++)
|
||||
|
||||
#define darray_for_each_reverse(_d, _i) \
|
||||
for (_i = (_d).data + (_d).nr - 1; _i >= (_d).data; --_i)
|
||||
for (typeof(&(_d).data[0]) _i = (_d).data + (_d).nr - 1; _i >= (_d).data; --_i)
|
||||
|
||||
#define darray_init(_d) \
|
||||
do { \
|
||||
|
@ -366,36 +366,23 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
|
||||
size_t size, loff_t *ppos)
|
||||
{
|
||||
struct dump_iter *i = file->private_data;
|
||||
struct btree_trans *trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
ssize_t ret;
|
||||
|
||||
i->ubuf = buf;
|
||||
i->size = size;
|
||||
i->ret = 0;
|
||||
|
||||
ret = flush_buf(i);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trans = bch2_trans_get(i->c);
|
||||
ret = for_each_btree_key(trans, iter, i->id, i->from,
|
||||
return flush_buf(i) ?:
|
||||
bch2_trans_run(i->c,
|
||||
for_each_btree_key(trans, iter, i->id, i->from,
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
bch2_bkey_val_to_text(&i->buf, i->c, k);
|
||||
prt_newline(&i->buf);
|
||||
bch2_trans_unlock(trans);
|
||||
i->from = bpos_successor(iter.pos);
|
||||
flush_buf(i);
|
||||
}));
|
||||
i->from = iter.pos;
|
||||
|
||||
bch2_trans_put(trans);
|
||||
|
||||
if (!ret)
|
||||
ret = flush_buf(i);
|
||||
|
||||
return ret ?: i->ret;
|
||||
}))) ?:
|
||||
i->ret;
|
||||
}
|
||||
|
||||
static const struct file_operations btree_debug_ops = {
|
||||
@ -463,22 +450,14 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
|
||||
size_t size, loff_t *ppos)
|
||||
{
|
||||
struct dump_iter *i = file->private_data;
|
||||
struct btree_trans *trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
ssize_t ret;
|
||||
|
||||
i->ubuf = buf;
|
||||
i->size = size;
|
||||
i->ret = 0;
|
||||
|
||||
ret = flush_buf(i);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
trans = bch2_trans_get(i->c);
|
||||
|
||||
ret = for_each_btree_key(trans, iter, i->id, i->from,
|
||||
return flush_buf(i) ?:
|
||||
bch2_trans_run(i->c,
|
||||
for_each_btree_key(trans, iter, i->id, i->from,
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
struct btree_path_level *l =
|
||||
@ -493,16 +472,10 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
|
||||
|
||||
bch2_bfloat_to_text(&i->buf, l->b, _k);
|
||||
bch2_trans_unlock(trans);
|
||||
i->from = bpos_successor(iter.pos);
|
||||
flush_buf(i);
|
||||
}));
|
||||
i->from = iter.pos;
|
||||
|
||||
bch2_trans_put(trans);
|
||||
|
||||
if (!ret)
|
||||
ret = flush_buf(i);
|
||||
|
||||
return ret ?: i->ret;
|
||||
}))) ?:
|
||||
i->ret;
|
||||
}
|
||||
|
||||
static const struct file_operations bfloat_failed_debug_ops = {
|
||||
|
@ -65,7 +65,7 @@ static bool dirent_cmp_key(struct bkey_s_c _l, const void *_r)
|
||||
const struct qstr l_name = bch2_dirent_get_name(l);
|
||||
const struct qstr *r_name = _r;
|
||||
|
||||
return l_name.len - r_name->len ?: memcmp(l_name.name, r_name->name, l_name.len);
|
||||
return !qstr_eq(l_name, *r_name);
|
||||
}
|
||||
|
||||
static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
|
||||
@ -75,7 +75,7 @@ static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
|
||||
const struct qstr l_name = bch2_dirent_get_name(l);
|
||||
const struct qstr r_name = bch2_dirent_get_name(r);
|
||||
|
||||
return l_name.len - r_name.len ?: memcmp(l_name.name, r_name.name, l_name.len);
|
||||
return !qstr_eq(l_name, r_name);
|
||||
}
|
||||
|
||||
static bool dirent_is_visible(subvol_inum inum, struct bkey_s_c k)
|
||||
@ -471,16 +471,10 @@ u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
|
||||
const struct qstr *name, subvol_inum *inum)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
int ret;
|
||||
retry:
|
||||
bch2_trans_begin(trans);
|
||||
struct btree_iter iter = { NULL };
|
||||
|
||||
ret = __bch2_dirent_lookup_trans(trans, &iter, dir, hash_info,
|
||||
name, inum, 0);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
goto retry;
|
||||
if (!ret)
|
||||
int ret = lockrestart_do(trans,
|
||||
__bch2_dirent_lookup_trans(trans, &iter, dir, hash_info, name, inum, 0));
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
bch2_trans_put(trans);
|
||||
return ret;
|
||||
|
@ -89,19 +89,14 @@ err:
|
||||
|
||||
void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
{
|
||||
struct bch_disk_groups_cpu *g;
|
||||
struct bch_dev *ca;
|
||||
int i;
|
||||
unsigned iter;
|
||||
|
||||
out->atomic++;
|
||||
rcu_read_lock();
|
||||
|
||||
g = rcu_dereference(c->disk_groups);
|
||||
struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
|
||||
if (!g)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < g->nr; i++) {
|
||||
for (unsigned i = 0; i < g->nr; i++) {
|
||||
if (i)
|
||||
prt_printf(out, " ");
|
||||
|
||||
@ -111,7 +106,7 @@ void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
}
|
||||
|
||||
prt_printf(out, "[parent %d devs", g->entries[i].parent);
|
||||
for_each_member_device_rcu(ca, c, iter, &g->entries[i].devs)
|
||||
for_each_member_device_rcu(c, ca, &g->entries[i].devs)
|
||||
prt_printf(out, " %s", ca->name);
|
||||
prt_printf(out, "]");
|
||||
}
|
||||
|
@ -791,27 +791,21 @@ static void ec_stripe_delete_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_fs *c =
|
||||
container_of(work, struct bch_fs, ec_stripe_delete_work);
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
int ret;
|
||||
u64 idx;
|
||||
|
||||
while (1) {
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
idx = stripe_idx_to_delete(c);
|
||||
u64 idx = stripe_idx_to_delete(c);
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
|
||||
if (!idx)
|
||||
break;
|
||||
|
||||
ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
ec_stripe_delete(trans, idx));
|
||||
if (ret) {
|
||||
bch_err_fn(c, ret);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_trans_put(trans);
|
||||
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
|
||||
}
|
||||
@ -1126,16 +1120,15 @@ static void ec_stripe_create(struct ec_stripe_new *s)
|
||||
ec_stripe_key_update(trans,
|
||||
bkey_i_to_stripe(&s->new_stripe.key),
|
||||
!s->have_existing_stripe));
|
||||
bch_err_msg(c, ret, "creating stripe key");
|
||||
if (ret) {
|
||||
bch_err(c, "error creating stripe: error creating stripe key");
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = ec_stripe_update_extents(c, &s->new_stripe);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "creating stripe: error updating pointers");
|
||||
bch_err_msg(c, ret, "error updating extents");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
err:
|
||||
bch2_disk_reservation_put(c, &s->res);
|
||||
|
||||
@ -1250,18 +1243,17 @@ static int unsigned_cmp(const void *_l, const void *_r)
|
||||
static unsigned pick_blocksize(struct bch_fs *c,
|
||||
struct bch_devs_mask *devs)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i, nr = 0, sizes[BCH_SB_MEMBERS_MAX];
|
||||
unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
|
||||
struct {
|
||||
unsigned nr, size;
|
||||
} cur = { 0, 0 }, best = { 0, 0 };
|
||||
|
||||
for_each_member_device_rcu(ca, c, i, devs)
|
||||
for_each_member_device_rcu(c, ca, devs)
|
||||
sizes[nr++] = ca->mi.bucket_size;
|
||||
|
||||
sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
for (unsigned i = 0; i < nr; i++) {
|
||||
if (sizes[i] != cur.size) {
|
||||
if (cur.nr > best.nr)
|
||||
best = cur;
|
||||
@ -1344,8 +1336,6 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
struct ec_stripe_head *h;
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
h = kzalloc(sizeof(*h), GFP_KERNEL);
|
||||
if (!h)
|
||||
@ -1362,13 +1352,13 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
|
||||
rcu_read_lock();
|
||||
h->devs = target_rw_devs(c, BCH_DATA_user, target);
|
||||
|
||||
for_each_member_device_rcu(ca, c, i, &h->devs)
|
||||
for_each_member_device_rcu(c, ca, &h->devs)
|
||||
if (!ca->mi.durability)
|
||||
__clear_bit(i, h->devs.d);
|
||||
__clear_bit(ca->dev_idx, h->devs.d);
|
||||
|
||||
h->blocksize = pick_blocksize(c, &h->devs);
|
||||
|
||||
for_each_member_device_rcu(ca, c, i, &h->devs)
|
||||
for_each_member_device_rcu(c, ca, &h->devs)
|
||||
if (ca->mi.bucket_size == h->blocksize)
|
||||
h->nr_active_devs++;
|
||||
|
||||
@ -1833,14 +1823,7 @@ void bch2_fs_ec_flush(struct bch_fs *c)
|
||||
|
||||
int bch2_stripes_read(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
const struct bch_stripe *s;
|
||||
struct stripe *m;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
if (k.k->type != KEY_TYPE_stripe)
|
||||
@ -1850,22 +1833,21 @@ int bch2_stripes_read(struct bch_fs *c)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
s = bkey_s_c_to_stripe(k).v;
|
||||
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
|
||||
|
||||
m = genradix_ptr(&c->stripes, k.k->p.offset);
|
||||
struct stripe *m = genradix_ptr(&c->stripes, k.k->p.offset);
|
||||
m->sectors = le16_to_cpu(s->sectors);
|
||||
m->algorithm = s->algorithm;
|
||||
m->nr_blocks = s->nr_blocks;
|
||||
m->nr_redundant = s->nr_redundant;
|
||||
m->blocks_nonempty = 0;
|
||||
|
||||
for (i = 0; i < s->nr_blocks; i++)
|
||||
for (unsigned i = 0; i < s->nr_blocks; i++)
|
||||
m->blocks_nonempty += !!stripe_blockcount_get(s, i);
|
||||
|
||||
bch2_stripes_heap_insert(c, m, k.k->p.offset);
|
||||
0;
|
||||
})));
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -52,14 +52,11 @@ struct readpages_iter {
|
||||
static int readpages_iter_init(struct readpages_iter *iter,
|
||||
struct readahead_control *ractl)
|
||||
{
|
||||
struct folio **fi;
|
||||
int ret;
|
||||
|
||||
memset(iter, 0, sizeof(*iter));
|
||||
|
||||
iter->mapping = ractl->mapping;
|
||||
|
||||
ret = bch2_filemap_get_contig_folios_d(iter->mapping,
|
||||
int ret = bch2_filemap_get_contig_folios_d(iter->mapping,
|
||||
ractl->_index << PAGE_SHIFT,
|
||||
(ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
|
||||
0, mapping_gfp_mask(iter->mapping),
|
||||
@ -826,7 +823,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch2_folio_reservation res;
|
||||
folios fs;
|
||||
struct folio **fi, *f;
|
||||
struct folio *f;
|
||||
unsigned copied = 0, f_offset, f_copied;
|
||||
u64 end = pos + len, f_pos, f_len;
|
||||
loff_t last_folio_pos = inode->v.i_size;
|
||||
|
@ -998,15 +998,13 @@ static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
struct bch_inode_info *inode = file_bch_inode(file);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
int ret;
|
||||
|
||||
if (!dir_emit_dots(file, ctx))
|
||||
return 0;
|
||||
|
||||
ret = bch2_readdir(c, inode_inum(inode), ctx);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
int ret = bch2_readdir(c, inode_inum(inode), ctx);
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
return bch2_err_class(ret);
|
||||
}
|
||||
|
||||
@ -1472,7 +1470,7 @@ static void bch2_evict_inode(struct inode *vinode)
|
||||
|
||||
void bch2_evict_subvolume_inodes(struct bch_fs *c, snapshot_id_list *s)
|
||||
{
|
||||
struct bch_inode_info *inode, **i;
|
||||
struct bch_inode_info *inode;
|
||||
DARRAY(struct bch_inode_info *) grabbed;
|
||||
bool clean_pass = false, this_pass_clean;
|
||||
|
||||
@ -1668,11 +1666,9 @@ err:
|
||||
static int bch2_show_devname(struct seq_file *seq, struct dentry *root)
|
||||
{
|
||||
struct bch_fs *c = root->d_sb->s_fs_info;
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
bool first = true;
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
if (!first)
|
||||
seq_putc(seq, ':');
|
||||
first = false;
|
||||
@ -1796,13 +1792,12 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
|
||||
int flags, const char *dev_name, void *data)
|
||||
{
|
||||
struct bch_fs *c;
|
||||
struct bch_dev *ca;
|
||||
struct super_block *sb;
|
||||
struct inode *vinode;
|
||||
struct bch_opts opts = bch2_opts_empty();
|
||||
char **devs;
|
||||
struct bch_fs **devs_to_fs = NULL;
|
||||
unsigned i, nr_devs;
|
||||
unsigned nr_devs;
|
||||
int ret;
|
||||
|
||||
opt_set(opts, read_only, (flags & SB_RDONLY) != 0);
|
||||
@ -1824,7 +1819,7 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
|
||||
goto got_sb;
|
||||
}
|
||||
|
||||
for (i = 0; i < nr_devs; i++)
|
||||
for (unsigned i = 0; i < nr_devs; i++)
|
||||
devs_to_fs[i] = bch2_path_to_fs(devs[i]);
|
||||
|
||||
sb = sget(fs_type, bch2_test_super, bch2_noset_super,
|
||||
@ -1895,7 +1890,7 @@ got_sb:
|
||||
|
||||
sb->s_bdi->ra_pages = VM_READAHEAD_PAGES;
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
struct block_device *bdev = ca->disk_sb.bdev;
|
||||
|
||||
/* XXX: create an anonymous device for multi device filesystems */
|
||||
@ -1916,10 +1911,9 @@ got_sb:
|
||||
|
||||
vinode = bch2_vfs_inode_get(c, BCACHEFS_ROOT_SUBVOL_INUM);
|
||||
ret = PTR_ERR_OR_ZERO(vinode);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "mounting: error getting root inode");
|
||||
if (ret)
|
||||
goto err_put_super;
|
||||
}
|
||||
|
||||
sb->s_root = d_make_root(vinode);
|
||||
if (!sb->s_root) {
|
||||
|
@ -20,8 +20,6 @@
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/dcache.h> /* struct qstr */
|
||||
|
||||
#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
|
||||
|
||||
/*
|
||||
* XXX: this is handling transaction restarts without returning
|
||||
* -BCH_ERR_transaction_restart_nested, this is not how we do things anymore:
|
||||
@ -29,19 +27,16 @@
|
||||
static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
|
||||
u32 snapshot)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
u64 sectors = 0;
|
||||
int ret;
|
||||
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
|
||||
int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
|
||||
SPOS(inum, 0, snapshot),
|
||||
POS(inum, U64_MAX),
|
||||
0, k, ret)
|
||||
0, k, ({
|
||||
if (bkey_extent_is_allocation(k.k))
|
||||
sectors += k.k->size;
|
||||
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
0;
|
||||
}));
|
||||
|
||||
return ret ?: sectors;
|
||||
}
|
||||
@ -49,24 +44,17 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum,
|
||||
static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
|
||||
u32 snapshot)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_dirent d;
|
||||
u64 subdirs = 0;
|
||||
int ret;
|
||||
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
|
||||
int ret = for_each_btree_key_upto(trans, iter, BTREE_ID_dirents,
|
||||
SPOS(inum, 0, snapshot),
|
||||
POS(inum, U64_MAX),
|
||||
0, k, ret) {
|
||||
if (k.k->type != KEY_TYPE_dirent)
|
||||
continue;
|
||||
|
||||
d = bkey_s_c_to_dirent(k);
|
||||
if (d.v->d_type == DT_DIR)
|
||||
0, k, ({
|
||||
if (k.k->type == KEY_TYPE_dirent &&
|
||||
bkey_s_c_to_dirent(k).v->d_type == DT_DIR)
|
||||
subdirs++;
|
||||
}
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
0;
|
||||
}));
|
||||
|
||||
return ret ?: subdirs;
|
||||
}
|
||||
@ -209,7 +197,6 @@ static int fsck_write_inode(struct btree_trans *trans,
|
||||
{
|
||||
int ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
__write_inode(trans, inode, snapshot));
|
||||
if (ret)
|
||||
bch_err_fn(trans->c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -401,7 +388,7 @@ static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
darray_for_each(s->ids, i) {
|
||||
__darray_for_each(s->ids, i) {
|
||||
if (i->id == id)
|
||||
return 0;
|
||||
if (i->id > id)
|
||||
@ -418,7 +405,7 @@ static int snapshots_seen_add_inorder(struct bch_fs *c, struct snapshots_seen *s
|
||||
static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
|
||||
enum btree_id btree_id, struct bpos pos)
|
||||
{
|
||||
struct snapshots_seen_entry *i, n = {
|
||||
struct snapshots_seen_entry n = {
|
||||
.id = pos.snapshot,
|
||||
.equiv = bch2_snapshot_equiv(c, pos.snapshot),
|
||||
};
|
||||
@ -619,7 +606,7 @@ lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w,
|
||||
|
||||
snapshot = bch2_snapshot_equiv(c, snapshot);
|
||||
|
||||
darray_for_each(w->inodes, i)
|
||||
__darray_for_each(w->inodes, i)
|
||||
if (bch2_snapshot_is_ancestor(c, snapshot, i->snapshot))
|
||||
goto found;
|
||||
|
||||
@ -661,11 +648,8 @@ static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
} else if (bkey_cmp(w->last_pos, pos)) {
|
||||
struct inode_walker_entry *i;
|
||||
|
||||
darray_for_each(w->inodes, i)
|
||||
i->seen_this_pos = false;
|
||||
|
||||
}
|
||||
|
||||
w->last_pos = pos;
|
||||
@ -989,23 +973,19 @@ fsck_err:
|
||||
int bch2_check_inodes(struct bch_fs *c)
|
||||
{
|
||||
bool full = c->opts.fsck;
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bch_inode_unpacked prev = { 0 };
|
||||
struct snapshots_seen s;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
snapshots_seen_init(&s);
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_inode(trans, &iter, k, &prev, &s, full));
|
||||
check_inode(trans, &iter, k, &prev, &s, full)));
|
||||
|
||||
snapshots_seen_exit(&s);
|
||||
bch2_trans_put(trans);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1035,7 +1015,6 @@ static bool dirent_points_to_inode(struct bkey_s_c_dirent d,
|
||||
static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct inode_walker_entry *i;
|
||||
u32 restart_count = trans->restart_count;
|
||||
int ret = 0;
|
||||
s64 count2;
|
||||
@ -1084,11 +1063,8 @@ struct extent_ends {
|
||||
|
||||
static void extent_ends_reset(struct extent_ends *extent_ends)
|
||||
{
|
||||
struct extent_end *i;
|
||||
|
||||
darray_for_each(extent_ends->e, i)
|
||||
snapshots_seen_exit(&i->seen);
|
||||
|
||||
extent_ends->e.nr = 0;
|
||||
}
|
||||
|
||||
@ -1120,7 +1096,7 @@ static int extent_ends_at(struct bch_fs *c,
|
||||
if (!n.seen.ids.data)
|
||||
return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
|
||||
|
||||
darray_for_each(extent_ends->e, i) {
|
||||
__darray_for_each(extent_ends->e, i) {
|
||||
if (i->snapshot == k.k->p.snapshot) {
|
||||
snapshots_seen_exit(&i->seen);
|
||||
*i = n;
|
||||
@ -1259,7 +1235,6 @@ static int check_overlapping_extents(struct btree_trans *trans,
|
||||
bool *fixed)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct extent_end *i;
|
||||
int ret = 0;
|
||||
|
||||
/* transaction restart, running again */
|
||||
@ -1440,17 +1415,14 @@ int bch2_check_extents(struct bch_fs *c)
|
||||
{
|
||||
struct inode_walker w = inode_walker_init();
|
||||
struct snapshots_seen s;
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct extent_ends extent_ends;
|
||||
struct disk_reservation res = { 0 };
|
||||
int ret = 0;
|
||||
|
||||
snapshots_seen_init(&s);
|
||||
extent_ends_init(&extent_ends);
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
|
||||
POS(BCACHEFS_ROOT_INO, 0),
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
&res, NULL,
|
||||
@ -1459,13 +1431,12 @@ int bch2_check_extents(struct bch_fs *c)
|
||||
check_extent(trans, &iter, k, &w, &s, &extent_ends) ?:
|
||||
check_extent_overbig(trans, &iter, k);
|
||||
})) ?:
|
||||
check_i_sectors(trans, &w);
|
||||
check_i_sectors(trans, &w));
|
||||
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
extent_ends_exit(&extent_ends);
|
||||
inode_walker_exit(&w);
|
||||
snapshots_seen_exit(&s);
|
||||
bch2_trans_put(trans);
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
@ -1473,24 +1444,19 @@ int bch2_check_extents(struct bch_fs *c)
|
||||
|
||||
int bch2_check_indirect_extents(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct disk_reservation res = { 0 };
|
||||
int ret = 0;
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
&res, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc, ({
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
check_extent_overbig(trans, &iter, k);
|
||||
}));
|
||||
})));
|
||||
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
bch2_trans_put(trans);
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1498,7 +1464,6 @@ int bch2_check_indirect_extents(struct bch_fs *c)
|
||||
static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct inode_walker_entry *i;
|
||||
u32 restart_count = trans->restart_count;
|
||||
int ret = 0;
|
||||
s64 count2;
|
||||
@ -1844,22 +1809,18 @@ int bch2_check_dirents(struct bch_fs *c)
|
||||
struct inode_walker target = inode_walker_init();
|
||||
struct snapshots_seen s;
|
||||
struct bch_hash_info hash_info;
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
snapshots_seen_init(&s);
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
|
||||
POS(BCACHEFS_ROOT_INO, 0),
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s));
|
||||
check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)));
|
||||
|
||||
bch2_trans_put(trans);
|
||||
snapshots_seen_exit(&s);
|
||||
inode_walker_exit(&dir);
|
||||
inode_walker_exit(&target);
|
||||
@ -1910,8 +1871,6 @@ int bch2_check_xattrs(struct bch_fs *c)
|
||||
{
|
||||
struct inode_walker inode = inode_walker_init();
|
||||
struct bch_hash_info hash_info;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
@ -1995,13 +1954,10 @@ typedef DARRAY(struct pathbuf_entry) pathbuf;
|
||||
|
||||
static bool path_is_dup(pathbuf *p, u64 inum, u32 snapshot)
|
||||
{
|
||||
struct pathbuf_entry *i;
|
||||
|
||||
darray_for_each(*p, i)
|
||||
if (i->inum == inum &&
|
||||
i->snapshot == snapshot)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -2095,8 +2051,6 @@ static int check_path(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (path_is_dup(p, inode->bi_inum, snapshot)) {
|
||||
struct pathbuf_entry *i;
|
||||
|
||||
/* XXX print path */
|
||||
bch_err(c, "directory structure loop");
|
||||
|
||||
@ -2243,10 +2197,6 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
|
||||
struct nlink_table *t,
|
||||
u64 start, u64 *end)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bch_inode_unpacked u;
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_inodes,
|
||||
POS(0, start),
|
||||
@ -2257,6 +2207,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
|
||||
continue;
|
||||
|
||||
/* Should never fail, checked by bch2_inode_invalid: */
|
||||
struct bch_inode_unpacked u;
|
||||
BUG_ON(bch2_inode_unpack(k, &u));
|
||||
|
||||
/*
|
||||
@ -2287,9 +2238,6 @@ static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links
|
||||
u64 range_start, u64 range_end)
|
||||
{
|
||||
struct snapshots_seen s;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_dirent d;
|
||||
|
||||
snapshots_seen_init(&s);
|
||||
|
||||
@ -2302,16 +2250,14 @@ static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_dirent:
|
||||
d = bkey_s_c_to_dirent(k);
|
||||
if (k.k->type == KEY_TYPE_dirent) {
|
||||
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
|
||||
|
||||
if (d.v->d_type != DT_DIR &&
|
||||
d.v->d_type != DT_SUBVOL)
|
||||
inc_link(c, &s, links, range_start, range_end,
|
||||
le64_to_cpu(d.v->d_inum),
|
||||
bch2_snapshot_equiv(c, d.k->p.snapshot));
|
||||
break;
|
||||
}
|
||||
0;
|
||||
})));
|
||||
@ -2369,12 +2315,9 @@ static int check_nlinks_update_hardlinks(struct bch_fs *c,
|
||||
struct nlink_table *links,
|
||||
u64 range_start, u64 range_end)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
size_t idx = 0;
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
|
||||
POS(0, range_start),
|
||||
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
@ -2427,7 +2370,6 @@ static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
|
||||
{
|
||||
struct bkey_s_c_reflink_p p;
|
||||
struct bkey_i_reflink_p *u;
|
||||
int ret;
|
||||
|
||||
if (k.k->type != KEY_TYPE_reflink_p)
|
||||
return 0;
|
||||
@ -2438,7 +2380,7 @@ static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
|
||||
return 0;
|
||||
|
||||
u = bch2_trans_kmalloc(trans, sizeof(*u));
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
int ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -2451,14 +2393,10 @@ static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
|
||||
|
||||
int bch2_fix_reflink_p(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix)
|
||||
return 0;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_extents, POS_MIN,
|
||||
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
|
||||
|
@ -1155,8 +1155,6 @@ delete:
|
||||
int bch2_delete_dead_inodes(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
bool need_another_pass;
|
||||
int ret;
|
||||
again:
|
||||
@ -1200,6 +1198,5 @@ again:
|
||||
}
|
||||
err:
|
||||
bch2_trans_put(trans);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -1166,9 +1166,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
|
||||
{
|
||||
struct bch_fs *c = op->c;
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_i *orig;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
for_each_keylist_key(&op->insert_keys, orig) {
|
||||
|
@ -993,7 +993,6 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
unlock:
|
||||
up_write(&c->state_lock);
|
||||
@ -1024,17 +1023,13 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
|
||||
|
||||
ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
|
||||
err:
|
||||
if (ret)
|
||||
bch_err_fn(ca, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_fs_journal_alloc(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
if (ca->journal.nr)
|
||||
continue;
|
||||
|
||||
@ -1299,11 +1294,8 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
union journal_res_state s;
|
||||
struct bch_dev *ca;
|
||||
unsigned long now = jiffies;
|
||||
u64 nr_writes = j->nr_flush_writes + j->nr_noflush_writes;
|
||||
u64 seq;
|
||||
unsigned i;
|
||||
|
||||
if (!out->nr_tabstops)
|
||||
printbuf_tabstop_push(out, 24);
|
||||
@ -1348,10 +1340,10 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
|
||||
prt_newline(out);
|
||||
|
||||
for (seq = journal_cur_seq(j);
|
||||
for (u64 seq = journal_cur_seq(j);
|
||||
seq >= journal_last_unwritten_seq(j);
|
||||
--seq) {
|
||||
i = seq & JOURNAL_BUF_MASK;
|
||||
unsigned i = seq & JOURNAL_BUF_MASK;
|
||||
|
||||
prt_printf(out, "unwritten entry:");
|
||||
prt_tab(out);
|
||||
@ -1395,8 +1387,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
j->space[journal_space_total].next_entry,
|
||||
j->space[journal_space_total].total);
|
||||
|
||||
for_each_member_device_rcu(ca, c, i,
|
||||
&c->rw_devs[BCH_DATA_journal]) {
|
||||
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
|
||||
struct journal_device *ja = &ca->journal;
|
||||
|
||||
if (!test_bit(ca->dev_idx, c->rw_devs[BCH_DATA_journal].d))
|
||||
@ -1405,7 +1396,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
if (!ja->nr)
|
||||
continue;
|
||||
|
||||
prt_printf(out, "dev %u:\n", i);
|
||||
prt_printf(out, "dev %u:\n", ca->dev_idx);
|
||||
prt_printf(out, "\tnr\t\t%u\n", ja->nr);
|
||||
prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
|
||||
prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
|
||||
|
@ -238,8 +238,6 @@ bch2_journal_add_entry(struct journal *j, struct journal_res *res,
|
||||
|
||||
static inline bool journal_entry_empty(struct jset *j)
|
||||
{
|
||||
struct jset_entry *i;
|
||||
|
||||
if (j->seq != j->last_seq)
|
||||
return false;
|
||||
|
||||
|
@ -781,7 +781,6 @@ void bch2_journal_entry_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
static int jset_validate_entries(struct bch_fs *c, struct jset *jset,
|
||||
enum bkey_invalid_flags flags)
|
||||
{
|
||||
struct jset_entry *entry;
|
||||
unsigned version = le32_to_cpu(jset->version);
|
||||
int ret = 0;
|
||||
|
||||
@ -1169,8 +1168,6 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
struct journal_list jlist;
|
||||
struct journal_replay *i, **_i, *prev = NULL;
|
||||
struct genradix_iter radix_iter;
|
||||
struct bch_dev *ca;
|
||||
unsigned iter;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
bool degraded = false, last_write_torn = false;
|
||||
u64 seq;
|
||||
@ -1181,7 +1178,7 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
jlist.last_seq = 0;
|
||||
jlist.ret = 0;
|
||||
|
||||
for_each_member_device(ca, c, iter) {
|
||||
for_each_member_device(c, ca) {
|
||||
if (!c->opts.fsck &&
|
||||
!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
|
||||
continue;
|
||||
@ -1347,7 +1344,7 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
continue;
|
||||
|
||||
for (ptr = 0; ptr < i->nr_ptrs; ptr++) {
|
||||
ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, i->ptrs[ptr].dev);
|
||||
|
||||
if (!i->ptrs[ptr].csum_good)
|
||||
bch_err_dev_offset(ca, i->ptrs[ptr].sector,
|
||||
@ -1723,7 +1720,7 @@ static CLOSURE_CALLBACK(do_journal_write)
|
||||
static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct jset_entry *start, *end, *i;
|
||||
struct jset_entry *start, *end;
|
||||
struct jset *jset = w->data;
|
||||
struct journal_keys_to_wb wb = { NULL };
|
||||
unsigned sectors, bytes, u64s;
|
||||
@ -1891,12 +1888,11 @@ CLOSURE_CALLBACK(bch2_journal_write)
|
||||
{
|
||||
closure_type(j, struct journal, io);
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct bch_dev *ca;
|
||||
struct journal_buf *w = journal_last_unwritten_buf(j);
|
||||
struct bch_replicas_padded replicas;
|
||||
struct bio *bio;
|
||||
struct printbuf journal_debug_buf = PRINTBUF;
|
||||
unsigned i, nr_rw_members = 0;
|
||||
unsigned nr_rw_members = 0;
|
||||
int ret;
|
||||
|
||||
BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
|
||||
@ -1956,7 +1952,7 @@ CLOSURE_CALLBACK(bch2_journal_write)
|
||||
if (c->opts.nochanges)
|
||||
goto no_io;
|
||||
|
||||
for_each_rw_member(ca, c, i)
|
||||
for_each_rw_member(c, ca)
|
||||
nr_rw_members++;
|
||||
|
||||
if (nr_rw_members > 1)
|
||||
@ -1973,7 +1969,7 @@ CLOSURE_CALLBACK(bch2_journal_write)
|
||||
goto err;
|
||||
|
||||
if (!JSET_NO_FLUSH(w->data) && w->separate_flush) {
|
||||
for_each_rw_member(ca, c, i) {
|
||||
for_each_rw_member(c, ca) {
|
||||
percpu_ref_get(&ca->io_ref);
|
||||
|
||||
bio = ca->journal.bio;
|
||||
|
@ -136,15 +136,13 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne
|
||||
enum journal_space_from from)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct bch_dev *ca;
|
||||
unsigned i, pos, nr_devs = 0;
|
||||
unsigned pos, nr_devs = 0;
|
||||
struct journal_space space, dev_space[BCH_SB_MEMBERS_MAX];
|
||||
|
||||
BUG_ON(nr_devs_want > ARRAY_SIZE(dev_space));
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(ca, c, i,
|
||||
&c->rw_devs[BCH_DATA_journal]) {
|
||||
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
|
||||
if (!ca->journal.nr)
|
||||
continue;
|
||||
|
||||
@ -173,19 +171,17 @@ static struct journal_space __journal_space_available(struct journal *j, unsigne
|
||||
void bch2_journal_space_available(struct journal *j)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct bch_dev *ca;
|
||||
unsigned clean, clean_ondisk, total;
|
||||
unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
|
||||
j->buf[1].buf_size >> 9);
|
||||
unsigned i, nr_online = 0, nr_devs_want;
|
||||
unsigned nr_online = 0, nr_devs_want;
|
||||
bool can_discard = false;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&j->lock);
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(ca, c, i,
|
||||
&c->rw_devs[BCH_DATA_journal]) {
|
||||
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
|
||||
struct journal_device *ja = &ca->journal;
|
||||
|
||||
if (!ja->nr)
|
||||
@ -216,7 +212,7 @@ void bch2_journal_space_available(struct journal *j)
|
||||
|
||||
nr_devs_want = min_t(unsigned, nr_online, c->opts.metadata_replicas);
|
||||
|
||||
for (i = 0; i < journal_space_nr; i++)
|
||||
for (unsigned i = 0; i < journal_space_nr; i++)
|
||||
j->space[i] = __journal_space_available(j, nr_devs_want, i);
|
||||
|
||||
clean_ondisk = j->space[journal_space_clean_ondisk].total;
|
||||
@ -263,12 +259,10 @@ static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
|
||||
void bch2_journal_do_discards(struct journal *j)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct bch_dev *ca;
|
||||
unsigned iter;
|
||||
|
||||
mutex_lock(&j->discard_lock);
|
||||
|
||||
for_each_rw_member(ca, c, iter) {
|
||||
for_each_rw_member(c, ca) {
|
||||
struct journal_device *ja = &ca->journal;
|
||||
|
||||
while (should_discard_bucket(j, ja)) {
|
||||
@ -583,13 +577,11 @@ static size_t journal_flush_pins(struct journal *j,
|
||||
static u64 journal_seq_to_flush(struct journal *j)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct bch_dev *ca;
|
||||
u64 seq_to_flush = 0;
|
||||
unsigned iter;
|
||||
|
||||
spin_lock(&j->lock);
|
||||
|
||||
for_each_rw_member(ca, c, iter) {
|
||||
for_each_rw_member(c, ca) {
|
||||
struct journal_device *ja = &ca->journal;
|
||||
unsigned nr_buckets, bucket_to_flush;
|
||||
|
||||
@ -793,10 +785,9 @@ int bch2_journal_reclaim_start(struct journal *j)
|
||||
p = kthread_create(bch2_journal_reclaim_thread, j,
|
||||
"bch-reclaim/%s", c->name);
|
||||
ret = PTR_ERR_OR_ZERO(p);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "creating journal reclaim thread");
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
get_task_struct(p);
|
||||
j->reclaim_thread = p;
|
||||
|
@ -54,16 +54,11 @@ static int resume_logged_op(struct btree_trans *trans, struct btree_iter *iter,
|
||||
|
||||
int bch2_resume_logged_ops(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter,
|
||||
BTREE_ID_logged_ops, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
resume_logged_op(trans, &iter, k)));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -147,17 +147,12 @@ fsck_err:
|
||||
|
||||
int bch2_check_lrus(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bpos last_flushed_pos = POS_MIN;
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc|BCH_TRANS_COMMIT_lazy_rw,
|
||||
bch2_check_lru_key(trans, &iter, k, &last_flushed_pos)));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
|
||||
|
@ -79,8 +79,6 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
|
||||
static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
enum btree_id id;
|
||||
int ret = 0;
|
||||
|
||||
@ -145,10 +143,9 @@ retry:
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "updating btree node key");
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
next:
|
||||
bch2_btree_iter_next_node(&iter);
|
||||
}
|
||||
|
@ -372,9 +372,6 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
|
||||
int ret = 0;
|
||||
|
||||
if (io_opts->cur_inum != extent_k.k->p.inode) {
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
|
||||
io_opts->d.nr = 0;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode),
|
||||
@ -400,12 +397,10 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
if (extent_k.k->p.snapshot) {
|
||||
struct snapshot_io_opts_entry *i;
|
||||
if (extent_k.k->p.snapshot)
|
||||
darray_for_each(io_opts->d, i)
|
||||
if (bch2_snapshot_is_ancestor(c, extent_k.k->p.snapshot, i->snapshot))
|
||||
return &i->io_opts;
|
||||
}
|
||||
|
||||
return &io_opts->fs_io_opts;
|
||||
}
|
||||
@ -669,10 +664,9 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
|
||||
bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "looking up alloc key");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
a = bch2_alloc_to_v4(k, &a_convert);
|
||||
dirty_sectors = bch2_bucket_sectors_dirty(*a);
|
||||
|
@ -145,8 +145,6 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
|
||||
{
|
||||
struct btree_trans *trans = ctxt->trans;
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
size_t nr_to_get = max_t(size_t, 16U, buckets_in_flight->nr / 4);
|
||||
size_t saw = 0, in_flight = 0, not_movable = 0, sectors = 0;
|
||||
int ret;
|
||||
@ -161,7 +159,7 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
|
||||
__func__, bch2_err_str(ret)))
|
||||
return ret;
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
|
||||
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
|
||||
lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
|
||||
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
|
||||
0, k, ({
|
||||
@ -209,7 +207,6 @@ static int bch2_copygc(struct moving_context *ctxt,
|
||||
};
|
||||
move_buckets buckets = { 0 };
|
||||
struct move_bucket_in_flight *f;
|
||||
struct move_bucket *i;
|
||||
u64 moved = atomic64_read(&ctxt->stats->sectors_moved);
|
||||
int ret = 0;
|
||||
|
||||
@ -270,19 +267,16 @@ err:
|
||||
*/
|
||||
unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned dev_idx;
|
||||
s64 wait = S64_MAX, fragmented_allowed, fragmented;
|
||||
unsigned i;
|
||||
|
||||
for_each_rw_member(ca, c, dev_idx) {
|
||||
for_each_rw_member(c, ca) {
|
||||
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
|
||||
|
||||
fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
|
||||
ca->mi.bucket_size) >> 1);
|
||||
fragmented = 0;
|
||||
|
||||
for (i = 0; i < BCH_DATA_NR; i++)
|
||||
for (unsigned i = 0; i < BCH_DATA_NR; i++)
|
||||
if (data_type_movable(i))
|
||||
fragmented += usage.d[i].fragmented;
|
||||
|
||||
@ -324,9 +318,9 @@ static int bch2_copygc_thread(void *arg)
|
||||
if (!buckets)
|
||||
return -ENOMEM;
|
||||
ret = rhashtable_init(&buckets->table, &bch_move_bucket_params);
|
||||
bch_err_msg(c, ret, "allocating copygc buckets in flight");
|
||||
if (ret) {
|
||||
kfree(buckets);
|
||||
bch_err_msg(c, ret, "allocating copygc buckets in flight");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -423,10 +417,9 @@ int bch2_copygc_start(struct bch_fs *c)
|
||||
|
||||
t = kthread_create(bch2_copygc_thread, c, "bch-copygc/%s", c->name);
|
||||
ret = PTR_ERR_OR_ZERO(t);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "creating copygc thread");
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
get_task_struct(t);
|
||||
|
||||
|
@ -599,14 +599,9 @@ advance:
|
||||
|
||||
int bch2_fs_quota_read(struct bch_fs *c)
|
||||
{
|
||||
struct bch_sb_field_quota *sb_quota;
|
||||
struct btree_trans *trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
|
||||
struct bch_sb_field_quota *sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
|
||||
if (!sb_quota) {
|
||||
mutex_unlock(&c->sb_lock);
|
||||
return -BCH_ERR_ENOSPC_sb_quota;
|
||||
@ -615,18 +610,13 @@ int bch2_fs_quota_read(struct bch_fs *c)
|
||||
bch2_sb_quota_read(c);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
|
||||
trans = bch2_trans_get(c);
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
__bch2_quota_set(c, k, NULL)) ?:
|
||||
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
bch2_fs_quota_read_inode(trans, &iter, k));
|
||||
|
||||
bch2_trans_put(trans);
|
||||
|
||||
if (ret)
|
||||
bch2_fs_quota_read_inode(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -469,10 +469,9 @@ int bch2_rebalance_start(struct bch_fs *c)
|
||||
|
||||
p = kthread_create(bch2_rebalance_thread, c, "bch-rebalance/%s", c->name);
|
||||
ret = PTR_ERR_OR_ZERO(p);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "creating rebalance thread");
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
get_task_struct(p);
|
||||
rcu_assign_pointer(c->rebalance.thread, p);
|
||||
|
@ -145,7 +145,6 @@ static int bch2_journal_replay(struct bch_fs *c)
|
||||
{
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
DARRAY(struct journal_key *) keys_sorted = { 0 };
|
||||
struct journal_key **kp;
|
||||
struct journal *j = &c->journal;
|
||||
u64 start_seq = c->journal_replay_seq_start;
|
||||
u64 end_seq = c->journal_replay_seq_start;
|
||||
@ -344,14 +343,11 @@ static int journal_replay_entry_early(struct bch_fs *c,
|
||||
static int journal_replay_early(struct bch_fs *c,
|
||||
struct bch_sb_field_clean *clean)
|
||||
{
|
||||
struct jset_entry *entry;
|
||||
int ret;
|
||||
|
||||
if (clean) {
|
||||
for (entry = clean->start;
|
||||
for (struct jset_entry *entry = clean->start;
|
||||
entry != vstruct_end(&clean->field);
|
||||
entry = vstruct_next(entry)) {
|
||||
ret = journal_replay_entry_early(c, entry);
|
||||
int ret = journal_replay_entry_early(c, entry);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -366,7 +362,7 @@ static int journal_replay_early(struct bch_fs *c,
|
||||
continue;
|
||||
|
||||
vstruct_for_each(&i->j, entry) {
|
||||
ret = journal_replay_entry_early(c, entry);
|
||||
int ret = journal_replay_entry_early(c, entry);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -462,7 +458,6 @@ static int bch2_initialize_subvolumes(struct bch_fs *c)
|
||||
ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees, &root_tree.k_i, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_snapshots, &root_snapshot.k_i, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_subvolumes, &root_volume.k_i, NULL, 0);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -503,7 +498,6 @@ static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_do(c, NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
|
||||
__bch2_fs_upgrade_for_subvolumes(trans));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -990,7 +984,6 @@ out:
|
||||
bch2_delete_dead_snapshots_async(c);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
err:
|
||||
@ -1004,8 +997,6 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
struct bch_inode_unpacked root_inode, lostfound_inode;
|
||||
struct bkey_inode_buf packed_inode;
|
||||
struct qstr lostfound = QSTR("lost+found");
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
bch_notice(c, "initializing new filesystem");
|
||||
@ -1027,10 +1018,10 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
set_bit(BCH_FS_may_go_rw, &c->flags);
|
||||
set_bit(BCH_FS_fsck_done, &c->flags);
|
||||
|
||||
for (i = 0; i < BTREE_ID_NR; i++)
|
||||
for (unsigned i = 0; i < BTREE_ID_NR; i++)
|
||||
bch2_btree_root_alloc(c, i);
|
||||
|
||||
for_each_member_device(ca, c, i)
|
||||
for_each_member_device(c, ca)
|
||||
bch2_dev_usage_init(ca);
|
||||
|
||||
ret = bch2_fs_journal_alloc(c);
|
||||
@ -1058,7 +1049,7 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
for_each_online_member(ca, c, i)
|
||||
for_each_online_member(c, ca)
|
||||
ca->new_fs_bucket_idx = 0;
|
||||
|
||||
ret = bch2_fs_freespace_init(c);
|
||||
@ -1082,10 +1073,9 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
packed_inode.inode.k.p.snapshot = U32_MAX;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed_inode.inode.k_i, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "creating root directory");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
bch2_inode_init_early(c, &lostfound_inode);
|
||||
|
||||
@ -1096,10 +1086,9 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
&lostfound,
|
||||
0, 0, S_IFDIR|0700, 0,
|
||||
NULL, NULL, (subvol_inum) { 0 }, 0));
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "creating lost+found");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (enabled_qtypes(c)) {
|
||||
ret = bch2_fs_quota_read(c);
|
||||
@ -1108,10 +1097,9 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
}
|
||||
|
||||
ret = bch2_journal_flush(&c->journal);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "writing first journal entry");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
|
||||
@ -1122,6 +1110,6 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
|
||||
return 0;
|
||||
err:
|
||||
bch_err_fn(ca, ret);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -10,6 +10,9 @@ extern const char * const bch2_recovery_passes[];
|
||||
static inline int bch2_run_explicit_recovery_pass(struct bch_fs *c,
|
||||
enum bch_recovery_pass pass)
|
||||
{
|
||||
if (c->recovery_passes_explicit & BIT_ULL(pass))
|
||||
return 0;
|
||||
|
||||
bch_info(c, "running explicit recovery pass %s (%u), currently at %s (%u)",
|
||||
bch2_recovery_passes[pass], pass,
|
||||
bch2_recovery_passes[c->curr_recovery_pass], c->curr_recovery_pass);
|
||||
|
@ -191,13 +191,10 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
struct jset_entry **end,
|
||||
u64 journal_seq)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i, dev;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
|
||||
if (!journal_seq) {
|
||||
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->usage); i++)
|
||||
bch2_fs_usage_acc_to_base(c, i);
|
||||
} else {
|
||||
bch2_fs_usage_acc_to_base(c, journal_seq & JOURNAL_BUF_MASK);
|
||||
@ -223,7 +220,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
u->v = cpu_to_le64(atomic64_read(&c->key_version));
|
||||
}
|
||||
|
||||
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
|
||||
for (unsigned i = 0; i < BCH_REPLICAS_MAX; i++) {
|
||||
struct jset_entry_usage *u =
|
||||
container_of(jset_entry_init(end, sizeof(*u)),
|
||||
struct jset_entry_usage, entry);
|
||||
@ -234,7 +231,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
u->v = cpu_to_le64(c->usage_base->persistent_reserved[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < c->replicas.nr; i++) {
|
||||
for (unsigned i = 0; i < c->replicas.nr; i++) {
|
||||
struct bch_replicas_entry_v1 *e =
|
||||
cpu_replicas_entry(&c->replicas, i);
|
||||
struct jset_entry_data_usage *u =
|
||||
@ -247,7 +244,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
"embedded variable length struct");
|
||||
}
|
||||
|
||||
for_each_member_device(ca, c, dev) {
|
||||
for_each_member_device(c, ca) {
|
||||
unsigned b = sizeof(struct jset_entry_dev_usage) +
|
||||
sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR;
|
||||
struct jset_entry_dev_usage *u =
|
||||
@ -255,9 +252,9 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
struct jset_entry_dev_usage, entry);
|
||||
|
||||
u->entry.type = BCH_JSET_ENTRY_dev_usage;
|
||||
u->dev = cpu_to_le32(dev);
|
||||
u->dev = cpu_to_le32(ca->dev_idx);
|
||||
|
||||
for (i = 0; i < BCH_DATA_NR; i++) {
|
||||
for (unsigned i = 0; i < BCH_DATA_NR; i++) {
|
||||
u->d[i].buckets = cpu_to_le64(ca->usage_base->d[i].buckets);
|
||||
u->d[i].sectors = cpu_to_le64(ca->usage_base->d[i].sectors);
|
||||
u->d[i].fragmented = cpu_to_le64(ca->usage_base->d[i].fragmented);
|
||||
@ -266,7 +263,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
for (unsigned i = 0; i < 2; i++) {
|
||||
struct jset_entry_clock *clock =
|
||||
container_of(jset_entry_init(end, sizeof(*clock)),
|
||||
struct jset_entry_clock, entry);
|
||||
|
@ -358,14 +358,12 @@ const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = {
|
||||
void bch2_sb_members_from_cpu(struct bch_fs *c)
|
||||
{
|
||||
struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2);
|
||||
struct bch_dev *ca;
|
||||
unsigned i, e;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(ca, c, i, NULL) {
|
||||
struct bch_member *m = __bch2_members_v2_get_mut(mi, i);
|
||||
for_each_member_device_rcu(c, ca, NULL) {
|
||||
struct bch_member *m = __bch2_members_v2_get_mut(mi, ca->dev_idx);
|
||||
|
||||
for (e = 0; e < BCH_MEMBER_ERROR_NR; e++)
|
||||
for (unsigned e = 0; e < BCH_MEMBER_ERROR_NR; e++)
|
||||
m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e]));
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
@ -82,30 +82,38 @@ static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
|
||||
return (struct bch_devs_list) { .nr = 1, .devs[0] = dev };
|
||||
}
|
||||
|
||||
static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
|
||||
static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx,
|
||||
const struct bch_devs_mask *mask)
|
||||
{
|
||||
struct bch_dev *ca = NULL;
|
||||
|
||||
while ((*iter = mask
|
||||
? find_next_bit(mask->d, c->sb.nr_devices, *iter)
|
||||
: *iter) < c->sb.nr_devices &&
|
||||
!(ca = rcu_dereference_check(c->devs[*iter],
|
||||
while ((idx = mask
|
||||
? find_next_bit(mask->d, c->sb.nr_devices, idx)
|
||||
: idx) < c->sb.nr_devices &&
|
||||
!(ca = rcu_dereference_check(c->devs[idx],
|
||||
lockdep_is_held(&c->state_lock))))
|
||||
(*iter)++;
|
||||
idx++;
|
||||
|
||||
return ca;
|
||||
}
|
||||
|
||||
#define for_each_member_device_rcu(ca, c, iter, mask) \
|
||||
for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
|
||||
|
||||
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
|
||||
static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca,
|
||||
const struct bch_devs_mask *mask)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask);
|
||||
}
|
||||
|
||||
#define for_each_member_device_rcu(_c, _ca, _mask) \
|
||||
for (struct bch_dev *_ca = NULL; \
|
||||
(_ca = __bch2_next_dev((_c), _ca, (_mask)));)
|
||||
|
||||
static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
if (ca)
|
||||
percpu_ref_put(&ca->ref);
|
||||
|
||||
rcu_read_lock();
|
||||
if ((ca = __bch2_next_dev(c, iter, NULL)))
|
||||
if ((ca = __bch2_next_dev(c, ca, NULL)))
|
||||
percpu_ref_get(&ca->ref);
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -115,41 +123,42 @@ static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter
|
||||
/*
|
||||
* If you break early, you must drop your ref on the current device
|
||||
*/
|
||||
#define for_each_member_device(ca, c, iter) \
|
||||
for ((iter) = 0; \
|
||||
(ca = bch2_get_next_dev(c, &(iter))); \
|
||||
percpu_ref_put(&ca->ref), (iter)++)
|
||||
#define __for_each_member_device(_c, _ca) \
|
||||
for (; (_ca = bch2_get_next_dev(_c, _ca));)
|
||||
|
||||
#define for_each_member_device(_c, _ca) \
|
||||
for (struct bch_dev *_ca = NULL; \
|
||||
(_ca = bch2_get_next_dev(_c, _ca));)
|
||||
|
||||
static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
|
||||
unsigned *iter,
|
||||
int state_mask)
|
||||
struct bch_dev *ca,
|
||||
unsigned state_mask)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
if (ca)
|
||||
percpu_ref_put(&ca->io_ref);
|
||||
|
||||
rcu_read_lock();
|
||||
while ((ca = __bch2_next_dev(c, iter, NULL)) &&
|
||||
while ((ca = __bch2_next_dev(c, ca, NULL)) &&
|
||||
(!((1 << ca->mi.state) & state_mask) ||
|
||||
!percpu_ref_tryget(&ca->io_ref)))
|
||||
(*iter)++;
|
||||
;
|
||||
rcu_read_unlock();
|
||||
|
||||
return ca;
|
||||
}
|
||||
|
||||
#define __for_each_online_member(ca, c, iter, state_mask) \
|
||||
for ((iter) = 0; \
|
||||
(ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \
|
||||
percpu_ref_put(&ca->io_ref), (iter)++)
|
||||
#define __for_each_online_member(_c, _ca, state_mask) \
|
||||
for (struct bch_dev *_ca = NULL; \
|
||||
(_ca = bch2_get_next_online_dev(_c, _ca, state_mask));)
|
||||
|
||||
#define for_each_online_member(ca, c, iter) \
|
||||
__for_each_online_member(ca, c, iter, ~0)
|
||||
#define for_each_online_member(c, ca) \
|
||||
__for_each_online_member(c, ca, ~0)
|
||||
|
||||
#define for_each_rw_member(ca, c, iter) \
|
||||
__for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw)
|
||||
#define for_each_rw_member(c, ca) \
|
||||
__for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw))
|
||||
|
||||
#define for_each_readable_member(ca, c, iter) \
|
||||
__for_each_online_member(ca, c, iter, \
|
||||
(1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
|
||||
#define for_each_readable_member(c, ca) \
|
||||
__for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
|
||||
|
||||
/*
|
||||
* If a key exists that references a device, the device won't be going away and
|
||||
@ -175,11 +184,9 @@ static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
|
||||
static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
|
||||
{
|
||||
struct bch_devs_mask devs;
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
memset(&devs, 0, sizeof(devs));
|
||||
for_each_online_member(ca, c, i)
|
||||
for_each_online_member(c, ca)
|
||||
__set_bit(ca->dev_idx, devs.d);
|
||||
return devs;
|
||||
}
|
||||
|
@ -459,7 +459,6 @@ static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_subvolume s;
|
||||
bool found = false;
|
||||
int ret;
|
||||
|
||||
@ -468,7 +467,7 @@ static int bch2_snapshot_tree_master_subvol(struct btree_trans *trans,
|
||||
if (k.k->type != KEY_TYPE_subvolume)
|
||||
continue;
|
||||
|
||||
s = bkey_s_c_to_subvolume(k);
|
||||
struct bkey_s_c_subvolume s = bkey_s_c_to_subvolume(k);
|
||||
if (!bch2_snapshot_is_ancestor(c, le32_to_cpu(s.v->snapshot), snapshot_root))
|
||||
continue;
|
||||
if (!BCH_SUBVOLUME_SNAP(s.v)) {
|
||||
@ -582,19 +581,13 @@ fsck_err:
|
||||
*/
|
||||
int bch2_check_snapshot_trees(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_snapshot_trees, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_snapshot_tree(trans, &iter, k)));
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "error %i checking snapshot trees", ret);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -856,21 +849,16 @@ fsck_err:
|
||||
|
||||
int bch2_check_snapshots(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We iterate backwards as checking/fixing the depth field requires that
|
||||
* the parent's depth already be correct:
|
||||
*/
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_reverse_commit(trans, iter,
|
||||
BTREE_ID_snapshots, POS_MAX,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_snapshot(trans, &iter, k)));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1315,7 +1303,6 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
u32 nr_deleted_ancestors = 0;
|
||||
struct bkey_i_snapshot *s;
|
||||
u32 *i;
|
||||
int ret;
|
||||
|
||||
if (k.k->type != KEY_TYPE_snapshot)
|
||||
@ -1368,12 +1355,9 @@ static int bch2_fix_child_of_deleted_snapshot(struct btree_trans *trans,
|
||||
int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans *trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_snapshot snap;
|
||||
snapshot_id_list deleted = { 0 };
|
||||
snapshot_id_list deleted_interior = { 0 };
|
||||
u32 *i, id;
|
||||
u32 id;
|
||||
int ret = 0;
|
||||
|
||||
if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
|
||||
@ -1381,11 +1365,10 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
|
||||
if (!test_bit(BCH_FS_started, &c->flags)) {
|
||||
ret = bch2_fs_read_write_early(c);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "deleting dead snapshots: error going rw");
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
trans = bch2_trans_get(c);
|
||||
|
||||
@ -1397,34 +1380,29 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
POS_MIN, 0, k,
|
||||
NULL, NULL, 0,
|
||||
bch2_delete_redundant_snapshot(trans, k));
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "deleting redundant snapshots");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
|
||||
POS_MIN, 0, k,
|
||||
bch2_snapshot_set_equiv(trans, k));
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "in bch2_snapshots_set_equiv");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_snapshots,
|
||||
POS_MIN, 0, k, ({
|
||||
if (k.k->type != KEY_TYPE_snapshot)
|
||||
continue;
|
||||
|
||||
snap = bkey_s_c_to_snapshot(k);
|
||||
BCH_SNAPSHOT_DELETED(snap.v)
|
||||
BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v)
|
||||
? snapshot_list_add(c, &deleted, k.k->p.offset)
|
||||
: 0;
|
||||
}));
|
||||
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "walking snapshots");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (id = 0; id < BTREE_ID_NR; id++) {
|
||||
struct bpos last_pos = POS_MIN;
|
||||
@ -1457,11 +1435,10 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
darray_exit(&equiv_seen);
|
||||
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "deleting keys from dying snapshots");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
down_write(&c->snapshot_create_lock);
|
||||
@ -1476,10 +1453,9 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
: 0;
|
||||
}));
|
||||
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "walking snapshots");
|
||||
if (ret)
|
||||
goto err_create_lock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Fixing children of deleted snapshots can't be done completely
|
||||
@ -1496,27 +1472,24 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
darray_for_each(deleted, i) {
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_snapshot_node_delete(trans, *i));
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "deleting snapshot %u", *i);
|
||||
if (ret)
|
||||
goto err_create_lock;
|
||||
}
|
||||
}
|
||||
|
||||
darray_for_each(deleted_interior, i) {
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_snapshot_node_delete(trans, *i));
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "deleting snapshot %u", *i);
|
||||
if (ret)
|
||||
goto err_create_lock;
|
||||
}
|
||||
}
|
||||
err_create_lock:
|
||||
up_write(&c->snapshot_create_lock);
|
||||
err:
|
||||
darray_exit(&deleted_interior);
|
||||
darray_exit(&deleted);
|
||||
bch2_trans_put(trans);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -1688,11 +1661,7 @@ static int bch2_check_snapshot_needs_deletion(struct btree_trans *trans, struct
|
||||
|
||||
int bch2_snapshots_read(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_snapshots,
|
||||
POS_MIN, 0, k,
|
||||
bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
|
||||
@ -1701,7 +1670,6 @@ int bch2_snapshots_read(struct bch_fs *c)
|
||||
for_each_btree_key(trans, iter, BTREE_ID_snapshots,
|
||||
POS_MIN, 0, k,
|
||||
(set_is_ancestor_bitmap(c, k.k->p.offset), 0)));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -202,8 +202,6 @@ static inline bool bch2_snapshot_has_children(struct bch_fs *c, u32 id)
|
||||
|
||||
static inline bool snapshot_list_has_id(snapshot_id_list *s, u32 id)
|
||||
{
|
||||
u32 *i;
|
||||
|
||||
darray_for_each(*s, i)
|
||||
if (*i == id)
|
||||
return true;
|
||||
@ -212,8 +210,6 @@ static inline bool snapshot_list_has_id(snapshot_id_list *s, u32 id)
|
||||
|
||||
static inline bool snapshot_list_has_ancestor(struct bch_fs *c, snapshot_id_list *s, u32 id)
|
||||
{
|
||||
u32 *i;
|
||||
|
||||
darray_for_each(*s, i)
|
||||
if (bch2_snapshot_is_ancestor(c, id, *i))
|
||||
return true;
|
||||
|
@ -38,7 +38,6 @@ static int check_subvol(struct btree_trans *trans,
|
||||
|
||||
if (BCH_SUBVOLUME_UNLINKED(subvol.v)) {
|
||||
ret = bch2_subvolume_delete(trans, iter->pos.offset);
|
||||
if (ret)
|
||||
bch_err_msg(c, ret, "deleting subvolume %llu", iter->pos.offset);
|
||||
return ret ?: -BCH_ERR_transaction_restart_nested;
|
||||
}
|
||||
@ -80,16 +79,11 @@ fsck_err:
|
||||
|
||||
int bch2_check_subvols(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_subvol(trans, &iter, k)));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -208,8 +202,6 @@ static int bch2_subvolume_reparent(struct btree_trans *trans,
|
||||
*/
|
||||
static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_delete)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bch_subvolume s;
|
||||
|
||||
return lockrestart_do(trans,
|
||||
@ -279,11 +271,10 @@ static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *wor
|
||||
|
||||
for (id = s.data; id < s.data + s.nr; id++) {
|
||||
ret = bch2_trans_run(c, bch2_subvolume_delete(trans, *id));
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "deleting subvolume %u", *id);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
darray_exit(&s);
|
||||
}
|
||||
|
@ -100,8 +100,6 @@ static int bch2_sb_field_validate(struct bch_sb *, struct bch_sb_field *,
|
||||
struct bch_sb_field *bch2_sb_field_get_id(struct bch_sb *sb,
|
||||
enum bch_sb_field_type type)
|
||||
{
|
||||
struct bch_sb_field *f;
|
||||
|
||||
/* XXX: need locking around superblock to access optional fields */
|
||||
|
||||
vstruct_for_each(sb, f)
|
||||
@ -240,14 +238,12 @@ struct bch_sb_field *bch2_sb_field_resize_id(struct bch_sb_handle *sb,
|
||||
|
||||
if (sb->fs_sb) {
|
||||
struct bch_fs *c = container_of(sb, struct bch_fs, disk_sb);
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
lockdep_assert_held(&c->sb_lock);
|
||||
|
||||
/* XXX: we're not checking that offline device have enough space */
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
struct bch_sb_handle *dev_sb = &ca->disk_sb;
|
||||
|
||||
if (bch2_sb_realloc(dev_sb, le32_to_cpu(dev_sb->sb->u64s) + d)) {
|
||||
@ -356,7 +352,6 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
|
||||
int rw)
|
||||
{
|
||||
struct bch_sb *sb = disk_sb->sb;
|
||||
struct bch_sb_field *f;
|
||||
struct bch_sb_field_members_v1 *mi;
|
||||
enum bch_opt_id opt_id;
|
||||
u16 block_size;
|
||||
@ -487,8 +482,6 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
|
||||
static void bch2_sb_update(struct bch_fs *c)
|
||||
{
|
||||
struct bch_sb *src = c->disk_sb.sb;
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
lockdep_assert_held(&c->sb_lock);
|
||||
|
||||
@ -512,8 +505,8 @@ static void bch2_sb_update(struct bch_fs *c)
|
||||
c->sb.features = le64_to_cpu(src->features[0]);
|
||||
c->sb.compat = le64_to_cpu(src->compat[0]);
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
struct bch_member m = bch2_sb_member_get(src, i);
|
||||
for_each_member_device(c, ca) {
|
||||
struct bch_member m = bch2_sb_member_get(src, ca->dev_idx);
|
||||
ca->mi = bch2_mi_to_cpu(&m);
|
||||
}
|
||||
}
|
||||
@ -892,9 +885,8 @@ static void write_one_super(struct bch_fs *c, struct bch_dev *ca, unsigned idx)
|
||||
int bch2_write_super(struct bch_fs *c)
|
||||
{
|
||||
struct closure *cl = &c->sb_write;
|
||||
struct bch_dev *ca;
|
||||
struct printbuf err = PRINTBUF;
|
||||
unsigned i, sb = 0, nr_wrote;
|
||||
unsigned sb = 0, nr_wrote;
|
||||
struct bch_devs_mask sb_written;
|
||||
bool wrote, can_mount_without_written, can_mount_with_written;
|
||||
unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
|
||||
@ -928,10 +920,10 @@ int bch2_write_super(struct bch_fs *c)
|
||||
bch2_sb_members_cpy_v2_v1(&c->disk_sb);
|
||||
bch2_sb_errors_from_cpu(c);
|
||||
|
||||
for_each_online_member(ca, c, i)
|
||||
for_each_online_member(c, ca)
|
||||
bch2_sb_from_fs(c, ca);
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
printbuf_reset(&err);
|
||||
|
||||
ret = bch2_sb_validate(&ca->disk_sb, &err, WRITE);
|
||||
@ -952,16 +944,16 @@ int bch2_write_super(struct bch_fs *c)
|
||||
if (!BCH_SB_INITIALIZED(c->disk_sb.sb))
|
||||
goto out;
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
__set_bit(ca->dev_idx, sb_written.d);
|
||||
ca->sb_write_error = 0;
|
||||
}
|
||||
|
||||
for_each_online_member(ca, c, i)
|
||||
for_each_online_member(c, ca)
|
||||
read_back_super(c, ca);
|
||||
closure_sync(cl);
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
if (ca->sb_write_error)
|
||||
continue;
|
||||
|
||||
@ -988,7 +980,7 @@ int bch2_write_super(struct bch_fs *c)
|
||||
|
||||
do {
|
||||
wrote = false;
|
||||
for_each_online_member(ca, c, i)
|
||||
for_each_online_member(c, ca)
|
||||
if (!ca->sb_write_error &&
|
||||
sb < ca->disk_sb.sb->layout.nr_superblocks) {
|
||||
write_one_super(c, ca, sb);
|
||||
@ -998,7 +990,7 @@ int bch2_write_super(struct bch_fs *c)
|
||||
sb++;
|
||||
} while (wrote);
|
||||
|
||||
for_each_online_member(ca, c, i) {
|
||||
for_each_online_member(c, ca) {
|
||||
if (ca->sb_write_error)
|
||||
__clear_bit(ca->dev_idx, sb_written.d);
|
||||
else
|
||||
@ -1010,7 +1002,7 @@ int bch2_write_super(struct bch_fs *c)
|
||||
can_mount_with_written =
|
||||
bch2_have_enough_devs(c, sb_written, degraded_flags, false);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sb_written.d); i++)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(sb_written.d); i++)
|
||||
sb_written.d[i] = ~sb_written.d[i];
|
||||
|
||||
can_mount_without_written =
|
||||
@ -1161,7 +1153,6 @@ void bch2_sb_layout_to_text(struct printbuf *out, struct bch_sb_layout *l)
|
||||
void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
bool print_layout, unsigned fields)
|
||||
{
|
||||
struct bch_sb_field *f;
|
||||
u64 fields_have = 0;
|
||||
unsigned nr_devices = 0;
|
||||
|
||||
|
@ -167,14 +167,12 @@ static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
|
||||
struct bch_fs *bch2_dev_to_fs(dev_t dev)
|
||||
{
|
||||
struct bch_fs *c;
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
mutex_lock(&bch_fs_list_lock);
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry(c, &bch_fs_list, list)
|
||||
for_each_member_device_rcu(ca, c, i, NULL)
|
||||
for_each_member_device_rcu(c, ca, NULL)
|
||||
if (ca->disk_sb.bdev && ca->disk_sb.bdev->bd_dev == dev) {
|
||||
closure_get(&c->cl);
|
||||
goto found;
|
||||
@ -215,14 +213,13 @@ struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
|
||||
|
||||
static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i, nr = 0, u64s =
|
||||
unsigned nr = 0, u64s =
|
||||
((sizeof(struct jset_entry_dev_usage) +
|
||||
sizeof(struct jset_entry_dev_usage_type) * BCH_DATA_NR)) /
|
||||
sizeof(u64);
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(ca, c, i, NULL)
|
||||
for_each_member_device_rcu(c, ca, NULL)
|
||||
nr++;
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -249,8 +246,7 @@ static void bch2_dev_usage_journal_reserve(struct bch_fs *c)
|
||||
|
||||
static void __bch2_fs_read_only(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i, clean_passes = 0;
|
||||
unsigned clean_passes = 0;
|
||||
u64 seq = 0;
|
||||
|
||||
bch2_fs_ec_stop(c);
|
||||
@ -286,7 +282,7 @@ static void __bch2_fs_read_only(struct bch_fs *c)
|
||||
/*
|
||||
* After stopping journal:
|
||||
*/
|
||||
for_each_member_device(ca, c, i)
|
||||
for_each_member_device(c, ca)
|
||||
bch2_dev_allocator_remove(c, ca);
|
||||
}
|
||||
|
||||
@ -427,8 +423,6 @@ static int bch2_fs_read_write_late(struct bch_fs *c)
|
||||
|
||||
static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
|
||||
@ -469,7 +463,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
*/
|
||||
set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
|
||||
|
||||
for_each_rw_member(ca, c, i)
|
||||
for_each_rw_member(c, ca)
|
||||
bch2_dev_allocator_add(c, ca);
|
||||
bch2_recalc_capacity(c);
|
||||
|
||||
@ -479,7 +473,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
#ifndef BCH_WRITE_REF_DEBUG
|
||||
percpu_ref_reinit(&c->writes);
|
||||
#else
|
||||
for (i = 0; i < BCH_WRITE_REF_NR; i++) {
|
||||
for (unsigned i = 0; i < BCH_WRITE_REF_NR; i++) {
|
||||
BUG_ON(atomic_long_read(&c->writes[i]));
|
||||
atomic_long_inc(&c->writes[i]);
|
||||
}
|
||||
@ -602,9 +596,6 @@ static void bch2_fs_release(struct kobject *kobj)
|
||||
|
||||
void __bch2_fs_stop(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
bch_verbose(c, "shutting down");
|
||||
|
||||
set_bit(BCH_FS_stopping, &c->flags);
|
||||
@ -615,7 +606,7 @@ void __bch2_fs_stop(struct bch_fs *c)
|
||||
bch2_fs_read_only(c);
|
||||
up_write(&c->state_lock);
|
||||
|
||||
for_each_member_device(ca, c, i)
|
||||
for_each_member_device(c, ca)
|
||||
if (ca->kobj.state_in_sysfs &&
|
||||
ca->disk_sb.bdev)
|
||||
sysfs_remove_link(bdev_kobj(ca->disk_sb.bdev), "bcachefs");
|
||||
@ -637,7 +628,7 @@ void __bch2_fs_stop(struct bch_fs *c)
|
||||
/* btree prefetch might have kicked off reads in the background: */
|
||||
bch2_btree_flush_all_reads(c);
|
||||
|
||||
for_each_member_device(ca, c, i)
|
||||
for_each_member_device(c, ca)
|
||||
cancel_work_sync(&ca->io_error_work);
|
||||
|
||||
cancel_work_sync(&c->read_only_work);
|
||||
@ -676,8 +667,6 @@ void bch2_fs_stop(struct bch_fs *c)
|
||||
|
||||
static int bch2_fs_online(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&bch_fs_list_lock);
|
||||
@ -710,7 +699,7 @@ static int bch2_fs_online(struct bch_fs *c)
|
||||
|
||||
down_write(&c->state_lock);
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
for_each_member_device(c, ca) {
|
||||
ret = bch2_dev_sysfs_online(c, ca);
|
||||
if (ret) {
|
||||
bch_err(c, "error creating sysfs objects");
|
||||
@ -1000,9 +989,7 @@ static void print_mount_opts(struct bch_fs *c)
|
||||
|
||||
int bch2_fs_start(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
time64_t now = ktime_get_real_seconds();
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
print_mount_opts(c);
|
||||
@ -1019,12 +1006,12 @@ int bch2_fs_start(struct bch_fs *c)
|
||||
goto err;
|
||||
}
|
||||
|
||||
for_each_online_member(ca, c, i)
|
||||
bch2_members_v2_get_mut(c->disk_sb.sb, i)->last_mount = cpu_to_le64(now);
|
||||
for_each_online_member(c, ca)
|
||||
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = cpu_to_le64(now);
|
||||
|
||||
mutex_unlock(&c->sb_lock);
|
||||
|
||||
for_each_rw_member(ca, c, i)
|
||||
for_each_rw_member(c, ca)
|
||||
bch2_dev_allocator_add(c, ca);
|
||||
bch2_recalc_capacity(c);
|
||||
|
||||
@ -1361,8 +1348,7 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
|
||||
enum bch_member_state new_state, int flags)
|
||||
{
|
||||
struct bch_devs_mask new_online_devs;
|
||||
struct bch_dev *ca2;
|
||||
int i, nr_rw = 0, required;
|
||||
int nr_rw = 0, required;
|
||||
|
||||
lockdep_assert_held(&c->state_lock);
|
||||
|
||||
@ -1374,7 +1360,7 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
|
||||
return true;
|
||||
|
||||
/* do we have enough devices to write to? */
|
||||
for_each_member_device(ca2, c, i)
|
||||
for_each_member_device(c, ca2)
|
||||
if (ca2 != ca)
|
||||
nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
|
||||
|
||||
@ -1522,9 +1508,7 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL);
|
||||
if (ret)
|
||||
bch_err_msg(c, ret, "removing dev alloc info");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1551,34 +1535,29 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
|
||||
__bch2_dev_read_only(c, ca);
|
||||
|
||||
ret = bch2_dev_data_drop(c, ca->dev_idx, flags);
|
||||
if (ret) {
|
||||
bch_err_msg(ca, ret, "dropping data");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_dev_remove_alloc(c, ca);
|
||||
if (ret) {
|
||||
bch_err_msg(ca, ret, "deleting alloc info");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_journal_flush_device_pins(&c->journal, ca->dev_idx);
|
||||
if (ret) {
|
||||
bch_err_msg(ca, ret, "flushing journal");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_journal_flush(&c->journal);
|
||||
if (ret) {
|
||||
bch_err(ca, "journal error");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_replicas_gc2(c);
|
||||
if (ret) {
|
||||
bch_err_msg(ca, ret, "in replicas_gc2()");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
data = bch2_dev_has_data(c, ca);
|
||||
if (data) {
|
||||
@ -1650,10 +1629,9 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
int ret;
|
||||
|
||||
ret = bch2_read_super(path, &opts, &sb);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "reading super");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
dev_mi = bch2_sb_member_get(sb.sb, sb.sb->dev_idx);
|
||||
|
||||
@ -1666,10 +1644,8 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
}
|
||||
|
||||
ret = bch2_dev_may_add(sb.sb, c);
|
||||
if (ret) {
|
||||
bch_err_fn(c, ret);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ca = __bch2_dev_alloc(c, &dev_mi);
|
||||
if (!ca) {
|
||||
@ -1684,19 +1660,17 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
goto err;
|
||||
|
||||
ret = bch2_dev_journal_alloc(ca);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "allocating journal");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
down_write(&c->state_lock);
|
||||
mutex_lock(&c->sb_lock);
|
||||
|
||||
ret = bch2_sb_from_fs(c, ca);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "setting up new superblock");
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (dynamic_fault("bcachefs:add:no_slot"))
|
||||
goto no_slot;
|
||||
@ -1735,11 +1709,10 @@ have_slot:
|
||||
|
||||
if (BCH_MEMBER_GROUP(&dev_mi)) {
|
||||
ret = __bch2_dev_group_set(c, ca, label.buf);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "creating new label");
|
||||
if (ret)
|
||||
goto err_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_write_super(c);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
@ -1747,16 +1720,14 @@ have_slot:
|
||||
bch2_dev_usage_journal_reserve(c);
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
if (ret) {
|
||||
bch_err_msg(ca, ret, "marking new superblock");
|
||||
if (ret)
|
||||
goto err_late;
|
||||
}
|
||||
|
||||
ret = bch2_fs_freespace_init(c);
|
||||
if (ret) {
|
||||
bch_err_msg(ca, ret, "initializing free space");
|
||||
if (ret)
|
||||
goto err_late;
|
||||
}
|
||||
|
||||
ca->new_fs_bucket_idx = 0;
|
||||
|
||||
@ -1775,6 +1746,7 @@ err:
|
||||
bch2_free_super(&sb);
|
||||
printbuf_exit(&label);
|
||||
printbuf_exit(&errbuf);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
err_late:
|
||||
up_write(&c->state_lock);
|
||||
@ -1802,10 +1774,9 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
|
||||
dev_idx = sb.sb->dev_idx;
|
||||
|
||||
ret = bch2_dev_in_fs(c->disk_sb.sb, sb.sb);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "bringing %s online", path);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_dev_attach_bdev(c, &sb);
|
||||
if (ret)
|
||||
@ -1814,10 +1785,9 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
|
||||
ca = bch_dev_locked(c, dev_idx);
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_rw)
|
||||
__bch2_dev_read_write(c, ca);
|
||||
@ -1896,10 +1866,9 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
}
|
||||
|
||||
ret = bch2_dev_buckets_resize(c, ca, nbuckets);
|
||||
if (ret) {
|
||||
bch_err_msg(ca, ret, "resizing buckets");
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
if (ret)
|
||||
@ -1933,18 +1902,14 @@ err:
|
||||
/* return with ref on ca->ref: */
|
||||
struct bch_dev *bch2_dev_lookup(struct bch_fs *c, const char *name)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
|
||||
rcu_read_lock();
|
||||
for_each_member_device_rcu(ca, c, i, NULL)
|
||||
if (!strcmp(name, ca->name))
|
||||
goto found;
|
||||
ca = ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
|
||||
found:
|
||||
for_each_member_device_rcu(c, ca, NULL)
|
||||
if (!strcmp(name, ca->name)) {
|
||||
rcu_read_unlock();
|
||||
|
||||
return ca;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
return ERR_PTR(-BCH_ERR_ENOENT_dev_not_found);
|
||||
}
|
||||
|
||||
/* Filesystem open: */
|
||||
@ -1954,7 +1919,7 @@ struct bch_fs *bch2_fs_open(char * const *devices, unsigned nr_devices,
|
||||
{
|
||||
DARRAY(struct bch_sb_handle) sbs = { 0 };
|
||||
struct bch_fs *c = NULL;
|
||||
struct bch_sb_handle *sb, *best = NULL;
|
||||
struct bch_sb_handle *best = NULL;
|
||||
struct printbuf errbuf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -256,8 +256,6 @@ static size_t bch2_btree_cache_size(struct bch_fs *c)
|
||||
static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans *trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
enum btree_id id;
|
||||
struct compression_type_stats {
|
||||
u64 nr_extents;
|
||||
|
@ -107,9 +107,6 @@ err:
|
||||
|
||||
static int test_iterate(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter = { NULL };
|
||||
struct bkey_s_c k;
|
||||
u64 i;
|
||||
int ret = 0;
|
||||
|
||||
@ -127,49 +124,43 @@ static int test_iterate(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_info("iterating forwards");
|
||||
|
||||
i = 0;
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
0, k, ({
|
||||
BUG_ON(k.k->p.offset != i++);
|
||||
0;
|
||||
}));
|
||||
})));
|
||||
bch_err_msg(c, ret, "error iterating forwards");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
BUG_ON(i != nr);
|
||||
|
||||
pr_info("iterating backwards");
|
||||
|
||||
ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, U64_MAX, U32_MAX), 0, k,
|
||||
({
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_reverse(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, U64_MAX, U32_MAX), 0, k, ({
|
||||
BUG_ON(k.k->p.offset != --i);
|
||||
0;
|
||||
}));
|
||||
})));
|
||||
bch_err_msg(c, ret, "error iterating backwards");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
BUG_ON(i);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
bch2_trans_put(trans);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_iterate_extents(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter = { NULL };
|
||||
struct bkey_s_c k;
|
||||
u64 i;
|
||||
int ret = 0;
|
||||
|
||||
@ -188,51 +179,45 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_info("iterating forwards");
|
||||
|
||||
i = 0;
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
0, k, ({
|
||||
BUG_ON(bkey_start_offset(k.k) != i);
|
||||
i = k.k->p.offset;
|
||||
0;
|
||||
}));
|
||||
})));
|
||||
bch_err_msg(c, ret, "error iterating forwards");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
BUG_ON(i != nr);
|
||||
|
||||
pr_info("iterating backwards");
|
||||
|
||||
ret = for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
|
||||
SPOS(0, U64_MAX, U32_MAX), 0, k,
|
||||
({
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_reverse(trans, iter, BTREE_ID_extents,
|
||||
SPOS(0, U64_MAX, U32_MAX), 0, k, ({
|
||||
BUG_ON(k.k->p.offset != i);
|
||||
i = bkey_start_offset(k.k);
|
||||
0;
|
||||
}));
|
||||
})));
|
||||
bch_err_msg(c, ret, "error iterating backwards");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
BUG_ON(i);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
bch2_trans_put(trans);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter = { NULL };
|
||||
struct bkey_s_c k;
|
||||
u64 i;
|
||||
int ret = 0;
|
||||
|
||||
@ -250,31 +235,31 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &ck.k_i, NULL, 0);
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_info("iterating forwards");
|
||||
|
||||
i = 0;
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
0, k, ({
|
||||
BUG_ON(k.k->p.offset != i);
|
||||
i += 2;
|
||||
0;
|
||||
}));
|
||||
})));
|
||||
bch_err_msg(c, ret, "error iterating forwards");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
BUG_ON(i != nr * 2);
|
||||
|
||||
pr_info("iterating forwards by slots");
|
||||
|
||||
i = 0;
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
BTREE_ITER_SLOTS, k, ({
|
||||
if (i >= nr * 2)
|
||||
@ -285,22 +270,13 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
|
||||
i++;
|
||||
0;
|
||||
}));
|
||||
if (ret < 0) {
|
||||
})));
|
||||
bch_err_msg(c, ret, "error iterating forwards by slots");
|
||||
goto err;
|
||||
}
|
||||
ret = 0;
|
||||
err:
|
||||
bch2_trans_put(trans);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter = { NULL };
|
||||
struct bkey_s_c k;
|
||||
u64 i;
|
||||
int ret = 0;
|
||||
|
||||
@ -319,32 +295,32 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &ck.k_i, NULL, 0);
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
pr_info("iterating forwards");
|
||||
|
||||
i = 0;
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
0, k, ({
|
||||
BUG_ON(bkey_start_offset(k.k) != i + 8);
|
||||
BUG_ON(k.k->size != 8);
|
||||
i += 16;
|
||||
0;
|
||||
}));
|
||||
})));
|
||||
bch_err_msg(c, ret, "error iterating forwards");
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
BUG_ON(i != nr);
|
||||
|
||||
pr_info("iterating forwards by slots");
|
||||
|
||||
i = 0;
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_extents,
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
BTREE_ITER_SLOTS, k, ({
|
||||
if (i == nr)
|
||||
@ -355,14 +331,9 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
BUG_ON(k.k->size != 8);
|
||||
i = k.k->p.offset;
|
||||
0;
|
||||
}));
|
||||
})));
|
||||
bch_err_msg(c, ret, "error iterating forwards by slots");
|
||||
if (ret)
|
||||
goto err;
|
||||
ret = 0;
|
||||
err:
|
||||
bch2_trans_put(trans);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -736,8 +707,6 @@ static int rand_delete(struct bch_fs *c, u64 nr)
|
||||
|
||||
static int seq_insert(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i_cookie insert;
|
||||
|
||||
bkey_cookie_init(&insert.k_i);
|
||||
@ -756,11 +725,8 @@ static int seq_insert(struct bch_fs *c, u64 nr)
|
||||
|
||||
static int seq_lookup(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key2_upto(trans, iter, BTREE_ID_xattrs,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
0, k,
|
||||
0));
|
||||
@ -768,9 +734,6 @@ static int seq_lookup(struct bch_fs *c, u64 nr)
|
||||
|
||||
static int seq_overwrite(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
|
@ -297,8 +297,6 @@ int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task)
|
||||
|
||||
void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack)
|
||||
{
|
||||
unsigned long *i;
|
||||
|
||||
darray_for_each(*stack, i) {
|
||||
prt_printf(out, "[<0>] %pB", (void *) *i);
|
||||
prt_newline(out);
|
||||
|
@ -855,4 +855,11 @@ static inline int cmp_le32(__le32 l, __le32 r)
|
||||
|
||||
#include <linux/uuid.h>
|
||||
|
||||
#define QSTR(n) { { { .len = strlen(n) } }, .name = n }
|
||||
|
||||
static inline bool qstr_eq(const struct qstr l, const struct qstr r)
|
||||
{
|
||||
return l.len == r.len && !memcmp(l.name, r.name, l.len);
|
||||
}
|
||||
|
||||
#endif /* _BCACHEFS_UTIL_H */
|
||||
|
@ -48,14 +48,14 @@
|
||||
((void *) ((u64 *) (_s)->_data + __vstruct_u64s(_s)))
|
||||
|
||||
#define vstruct_for_each(_s, _i) \
|
||||
for (_i = (_s)->start; \
|
||||
for (typeof(&(_s)->start[0]) _i = (_s)->start; \
|
||||
_i < vstruct_last(_s); \
|
||||
_i = vstruct_next(_i))
|
||||
|
||||
#define vstruct_for_each_safe(_s, _i, _t) \
|
||||
for (_i = (_s)->start; \
|
||||
_i < vstruct_last(_s) && (_t = vstruct_next(_i), true); \
|
||||
_i = _t)
|
||||
#define vstruct_for_each_safe(_s, _i) \
|
||||
for (typeof(&(_s)->start[0]) _next, _i = (_s)->start; \
|
||||
_i < vstruct_last(_s) && (_next = vstruct_next(_i), true); \
|
||||
_i = _next)
|
||||
|
||||
#define vstruct_idx(_s, _idx) \
|
||||
((typeof(&(_s)->start[0])) ((_s)->_data + (_idx)))
|
||||
|
1
qcow2.c
1
qcow2.c
@ -84,7 +84,6 @@ void qcow2_write_image(int infd, int outfd, ranges *data,
|
||||
.l1_index = -1,
|
||||
.offset = round_up(sizeof(hdr), block_size),
|
||||
};
|
||||
struct range *r;
|
||||
char *buf = xmalloc(block_size);
|
||||
u64 src_offset, dst_offset;
|
||||
|
||||
|
@ -293,14 +293,13 @@ static int range_cmp(const void *_l, const void *_r)
|
||||
|
||||
void ranges_sort_merge(ranges *r)
|
||||
{
|
||||
struct range *t, *i;
|
||||
ranges tmp = { 0 };
|
||||
|
||||
sort(r->data, r->nr, sizeof(r->data[0]), range_cmp, NULL);
|
||||
|
||||
/* Merge contiguous ranges: */
|
||||
darray_for_each(*r, i) {
|
||||
t = tmp.nr ? &tmp.data[tmp.nr - 1] : NULL;
|
||||
struct range *t = tmp.nr ? &tmp.data[tmp.nr - 1] : NULL;
|
||||
|
||||
if (t && t->end >= i->start)
|
||||
t->end = max(t->end, i->end);
|
||||
@ -314,8 +313,6 @@ void ranges_sort_merge(ranges *r)
|
||||
|
||||
void ranges_roundup(ranges *r, unsigned block_size)
|
||||
{
|
||||
struct range *i;
|
||||
|
||||
darray_for_each(*r, i) {
|
||||
i->start = round_down(i->start, block_size);
|
||||
i->end = round_up(i->end, block_size);
|
||||
@ -324,8 +321,6 @@ void ranges_roundup(ranges *r, unsigned block_size)
|
||||
|
||||
void ranges_rounddown(ranges *r, unsigned block_size)
|
||||
{
|
||||
struct range *i;
|
||||
|
||||
darray_for_each(*r, i) {
|
||||
i->start = round_up(i->start, block_size);
|
||||
i->end = round_down(i->end, block_size);
|
||||
|
Loading…
Reference in New Issue
Block a user