mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-03-29 00:00:03 +03:00
Update bcachefs sources to ff6fd3ef0cd0 bcachefs: Check for writing btree_ptr_v2.sectors_written == 0
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
c824bbe69d
commit
a0da3c91d4
.bcachefs_revision
libbcachefs
acl.calloc_background.calloc_background.halloc_foreground.calloc_foreground.hbackpointers.cbackpointers.hbcachefs.hbcachefs_format.hbkey_methods.hbtree_gc.cbtree_gc.hbtree_io.cbtree_iter.cbtree_iter.hbtree_journal_iter.cbtree_journal_iter.hbtree_key_cache.cbtree_locking.cbtree_locking.hbtree_trans_commit.cbtree_types.hbtree_update.cbtree_update.hbtree_update_interior.cbtree_update_interior.hbtree_write_buffer.cbuckets.cbuckets.hchardev.cchecksum.cdata_update.cdebug.cdirent.cdirent.hdisk_groups.cec.cec.herror.cextent_update.cextents.ceytzinger.cfs-common.cfs-io-buffered.cfs-io-direct.cfs-io-pagecache.cfs-io.cfs.cfsck.cinode.cinode.hio_misc.cio_read.cio_write.cjournal.cjournal.hjournal_io.cjournal_reclaim.cjournal_seq_blacklist.cjournal_types.hlogged_ops.clru.cmigrate.cmove.cmovinggc.copts.hprintbuf.cprintbuf.hquota.crebalance.crecovery.crecovery_passes.creflink.creflink.hreplicas.csb-clean.csb-counters.csb-downgrade.csb-errors_types.hsb-members.csb-members.hsb-members_types.hsnapshot.csnapshot.hstr_hash.hsubvolume.csuper-io.csuper.csuper_types.hsysfs.ctests.cutil.cxattr.c
@ -1 +1 @@
|
||||
10ca1f99f8c99a3d992b686cdc29d427807070e5
|
||||
ff6fd3ef0cd00288c3bb8f5b798bd58391e97f21
|
||||
|
@ -282,18 +282,12 @@ struct posix_acl *bch2_get_acl(struct mnt_idmap *idmap,
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter = { NULL };
|
||||
struct posix_acl *acl = NULL;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
retry:
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
|
||||
&hash, inode_inum(inode), &search, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
|
||||
&hash, inode_inum(inode), &search, 0);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -366,7 +360,7 @@ retry:
|
||||
|
||||
ret = bch2_subvol_is_ro_trans(trans, inode->ei_subvol) ?:
|
||||
bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto btree_err;
|
||||
|
||||
@ -414,39 +408,30 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
|
||||
struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
|
||||
struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c_xattr xattr;
|
||||
struct bkey_i_xattr *new;
|
||||
struct posix_acl *acl = NULL;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
|
||||
&hash_info, inum, &search, BTREE_ITER_INTENT);
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
|
||||
&hash_info, inum, &search, BTREE_ITER_intent);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
return bch2_err_matches(ret, ENOENT) ? 0 : ret;
|
||||
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
xattr = bkey_s_c_to_xattr(k);
|
||||
struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
|
||||
|
||||
acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
|
||||
le16_to_cpu(xattr.v->x_val_len));
|
||||
ret = PTR_ERR_OR_ZERO(acl);
|
||||
if (IS_ERR_OR_NULL(acl))
|
||||
goto err;
|
||||
|
||||
ret = allocate_dropping_locks_errcode(trans,
|
||||
__posix_acl_chmod(&acl, _gfp, mode));
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
|
||||
if (IS_ERR(new)) {
|
||||
ret = PTR_ERR(new);
|
||||
ret = allocate_dropping_locks_errcode(trans, __posix_acl_chmod(&acl, _gfp, mode));
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
struct bkey_i_xattr *new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
|
||||
ret = PTR_ERR_OR_ZERO(new);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
new->k.p = iter.pos;
|
||||
ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
|
||||
|
@ -330,27 +330,17 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
|
||||
prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
|
||||
bch2_prt_data_type(out, a->data_type);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "journal_seq %llu", a->journal_seq);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "need_discard %llu", BCH_ALLOC_V4_NEED_DISCARD(a));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "need_inc_gen %llu", BCH_ALLOC_V4_NEED_INC_GEN(a));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "dirty_sectors %u", a->dirty_sectors);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "cached_sectors %u", a->cached_sectors);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "stripe %u", a->stripe);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "stripe_redundancy %u", a->stripe_redundancy);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "io_time[READ] %llu", a->io_time[READ]);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "fragmentation %llu", a->fragmentation_lru);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
|
||||
prt_printf(out, "journal_seq %llu\n", a->journal_seq);
|
||||
prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a));
|
||||
prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a));
|
||||
prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors);
|
||||
prt_printf(out, "cached_sectors %u\n", a->cached_sectors);
|
||||
prt_printf(out, "stripe %u\n", a->stripe);
|
||||
prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy);
|
||||
prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]);
|
||||
prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]);
|
||||
prt_printf(out, "fragmentation %llu\n", a->fragmentation_lru);
|
||||
prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a));
|
||||
printbuf_indent_sub(out, 2);
|
||||
}
|
||||
|
||||
@ -447,9 +437,9 @@ bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
|
||||
BTREE_ITER_WITH_UPDATES|
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_with_updates|
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (unlikely(ret))
|
||||
return ERR_PTR(ret);
|
||||
@ -520,7 +510,7 @@ int bch2_bucket_gens_init(struct bch_fs *c)
|
||||
int ret;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
/*
|
||||
* Not a fsck error because this is checked/repaired by
|
||||
* bch2_check_alloc_key() which runs later:
|
||||
@ -573,7 +563,7 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
|
||||
if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
|
||||
u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
|
||||
|
||||
@ -586,10 +576,10 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
* Not a fsck error because this is checked/repaired by
|
||||
* bch2_check_alloc_key() which runs later:
|
||||
*/
|
||||
if (!bch2_dev_exists2(c, k.k->p.inode))
|
||||
if (!bch2_dev_exists(c, k.k->p.inode))
|
||||
continue;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, k.k->p.inode);
|
||||
|
||||
for (u64 b = max_t(u64, ca->mi.first_bucket, start);
|
||||
b < min_t(u64, ca->mi.nbuckets, end);
|
||||
@ -599,7 +589,7 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
}));
|
||||
} else {
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
/*
|
||||
* Not a fsck error because this is checked/repaired by
|
||||
* bch2_check_alloc_key() which runs later:
|
||||
@ -607,7 +597,7 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
if (!bch2_dev_bucket_exists(c, k.k->p))
|
||||
continue;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, k.k->p.inode);
|
||||
|
||||
struct bch_alloc_v4 a;
|
||||
*bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
|
||||
@ -630,7 +620,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
|
||||
bool set)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, alloc_k.k->p.inode);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c old;
|
||||
struct bkey_i *k;
|
||||
@ -667,7 +657,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
|
||||
|
||||
old = bch2_bkey_get_iter(trans, &iter, btree,
|
||||
bkey_start_pos(&k->k),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(old);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -711,8 +701,8 @@ static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
|
||||
return ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_WITH_UPDATES);
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_with_updates);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -743,12 +733,12 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
"alloc key for invalid device or bucket"))
|
||||
return -EIO;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, new.k->p.inode);
|
||||
|
||||
struct bch_alloc_v4 old_a_convert;
|
||||
const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
|
||||
|
||||
new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
|
||||
@ -791,7 +781,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
|
||||
bch_dev_bkey_exists(c, new.k->p.inode));
|
||||
bch2_dev_bkey_exists(c, new.k->p.inode));
|
||||
if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
|
||||
ret = bch2_lru_change(trans,
|
||||
BCH_LRU_FRAGMENTATION_START,
|
||||
@ -812,7 +802,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
* not:
|
||||
*/
|
||||
|
||||
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
|
||||
if ((flags & BTREE_TRIGGER_bucket_invalidate) &&
|
||||
old_a->cached_sectors) {
|
||||
ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
|
||||
-((s64) old_a->cached_sectors));
|
||||
@ -821,12 +811,12 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
|
||||
if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
|
||||
struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
|
||||
u64 journal_seq = trans->journal_res.seq;
|
||||
u64 bucket_journal_seq = new_a->journal_seq;
|
||||
|
||||
if ((flags & BTREE_TRIGGER_INSERT) &&
|
||||
if ((flags & BTREE_TRIGGER_insert) &&
|
||||
data_type_is_empty(old_a->data_type) !=
|
||||
data_type_is_empty(new_a->data_type) &&
|
||||
new.k->type == KEY_TYPE_alloc_v4) {
|
||||
@ -887,8 +877,8 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
bch2_do_gc_gens(c);
|
||||
}
|
||||
|
||||
if ((flags & BTREE_TRIGGER_GC) &&
|
||||
(flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
|
||||
if ((flags & BTREE_TRIGGER_gc) &&
|
||||
(flags & BTREE_TRIGGER_bucket_invalidate)) {
|
||||
struct bch_alloc_v4 new_a_convert;
|
||||
const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
|
||||
|
||||
@ -913,7 +903,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
/*
|
||||
* This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
|
||||
* This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
|
||||
* extents style btrees, but works on non-extents btrees:
|
||||
*/
|
||||
static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
|
||||
@ -965,8 +955,8 @@ static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
|
||||
if (bch2_dev_bucket_exists(c, *bucket))
|
||||
return true;
|
||||
|
||||
if (bch2_dev_exists2(c, bucket->inode)) {
|
||||
ca = bch_dev_bkey_exists(c, bucket->inode);
|
||||
if (bch2_dev_exists(c, bucket->inode)) {
|
||||
ca = bch2_dev_bkey_exists(c, bucket->inode);
|
||||
|
||||
if (bucket->offset < ca->mi.first_bucket) {
|
||||
bucket->offset = ca->mi.first_bucket;
|
||||
@ -1007,7 +997,7 @@ again:
|
||||
}
|
||||
|
||||
if (!bch2_dev_bucket_exists(c, k.k->p)) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bucket.inode);
|
||||
|
||||
bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
|
||||
}
|
||||
@ -1040,7 +1030,7 @@ int bch2_check_alloc_key(struct btree_trans *trans,
|
||||
alloc_k.k->p.inode, alloc_k.k->p.offset))
|
||||
return bch2_btree_delete_at(trans, alloc_iter, 0);
|
||||
|
||||
ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
|
||||
ca = bch2_dev_bkey_exists(c, alloc_k.k->p.inode);
|
||||
if (!ca->mi.freespace_initialized)
|
||||
return 0;
|
||||
|
||||
@ -1159,7 +1149,7 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, start.inode);
|
||||
ca = bch2_dev_bkey_exists(c, start.inode);
|
||||
if (!ca->mi.freespace_initialized)
|
||||
return 0;
|
||||
|
||||
@ -1349,7 +1339,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
bkey_reassemble(&g.k_i, k);
|
||||
|
||||
/* if no bch_dev, skip out whether we repair or not */
|
||||
dev_exists = bch2_dev_exists2(c, k.k->p.inode);
|
||||
dev_exists = bch2_dev_exists(c, k.k->p.inode);
|
||||
if (!dev_exists) {
|
||||
if (fsck_err_on(!dev_exists, c,
|
||||
bucket_gens_to_invalid_dev,
|
||||
@ -1360,7 +1350,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
goto out;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
ca = bch2_dev_bkey_exists(c, k.k->p.inode);
|
||||
if (fsck_err_on(end <= ca->mi.first_bucket ||
|
||||
start >= ca->mi.nbuckets, c,
|
||||
bucket_gens_to_invalid_buckets,
|
||||
@ -1411,13 +1401,13 @@ int bch2_check_alloc_info(struct bch_fs *c)
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
|
||||
while (1) {
|
||||
struct bpos next;
|
||||
@ -1479,13 +1469,13 @@ bkey_err:
|
||||
|
||||
ret = for_each_btree_key(trans, iter,
|
||||
BTREE_ID_need_discard, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
bch2_check_discard_freespace_key(trans, &iter));
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
@ -1515,7 +1505,7 @@ bkey_err:
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_check_bucket_gens_key(trans, &iter, k));
|
||||
err:
|
||||
@ -1562,7 +1552,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
|
||||
a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
|
||||
ret = bch2_trans_update(trans, alloc_iter,
|
||||
&a_mut->k_i, BTREE_TRIGGER_NORUN);
|
||||
&a_mut->k_i, BTREE_TRIGGER_norun);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -1601,7 +1591,7 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_check_alloc_to_lru_ref(trans, &iter)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -1679,7 +1669,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
|
||||
bool discard_locked = false;
|
||||
int ret = 0;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pos.inode);
|
||||
ca = bch2_dev_bkey_exists(c, pos.inode);
|
||||
|
||||
if (!percpu_ref_tryget(&ca->io_ref)) {
|
||||
bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
|
||||
@ -1703,7 +1693,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
|
||||
need_discard_iter->pos,
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -1827,7 +1817,7 @@ void bch2_do_discards(struct bch_fs *c)
|
||||
static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent);
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -1862,7 +1852,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
|
||||
if (i->snapshot)
|
||||
continue;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, i->inode);
|
||||
ca = bch2_dev_bkey_exists(c, i->inode);
|
||||
|
||||
if (!percpu_ref_tryget(&ca->io_ref)) {
|
||||
darray_remove_item(&c->discard_buckets_in_flight, i);
|
||||
@ -1903,7 +1893,7 @@ static void bch2_do_discards_fast_work(struct work_struct *work)
|
||||
|
||||
static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bucket.inode);
|
||||
|
||||
if (!percpu_ref_is_dying(&ca->io_ref) &&
|
||||
!discard_in_flight_add(c, bucket) &&
|
||||
@ -1962,7 +1952,7 @@ static int invalidate_one_bucket(struct btree_trans *trans,
|
||||
a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
|
||||
|
||||
ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
|
||||
BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
|
||||
BTREE_TRIGGER_bucket_invalidate) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_WATERMARK_btree|
|
||||
BCH_TRANS_COMMIT_no_enospc);
|
||||
@ -2014,7 +2004,7 @@ static void bch2_do_invalidates_work(struct work_struct *work)
|
||||
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
|
||||
lru_pos(ca->dev_idx, 0, 0),
|
||||
lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
|
||||
BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_intent, k,
|
||||
invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
|
||||
|
||||
if (ret < 0) {
|
||||
@ -2051,7 +2041,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
||||
POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
/*
|
||||
* Scan the alloc btree for every bucket on @ca, and add buckets to the
|
||||
* freespace/need_discard/need_gc_gens btrees as needed:
|
||||
@ -2182,6 +2172,9 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
|
||||
u64 now;
|
||||
int ret = 0;
|
||||
|
||||
if (bch2_trans_relock(trans))
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
|
||||
ret = PTR_ERR_OR_ZERO(a);
|
||||
if (ret)
|
||||
|
@ -17,12 +17,11 @@ static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
|
||||
if (!bch2_dev_exists2(c, pos.inode))
|
||||
if (!bch2_dev_exists(c, pos.inode))
|
||||
return false;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pos.inode);
|
||||
return pos.offset >= ca->mi.first_bucket &&
|
||||
pos.offset < ca->mi.nbuckets;
|
||||
ca = bch2_dev_bkey_exists(c, pos.inode);
|
||||
return bucket_valid(ca, pos.offset);
|
||||
}
|
||||
|
||||
static inline u64 bucket_to_u64(struct bpos bucket)
|
||||
|
@ -100,7 +100,7 @@ static void bch2_open_bucket_hash_remove(struct bch_fs *c, struct open_bucket *o
|
||||
|
||||
void __bch2_open_bucket_put(struct bch_fs *c, struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
|
||||
if (ob->ec) {
|
||||
ec_stripe_new_put(c, ob->ec, STRIPE_REF_io);
|
||||
@ -300,7 +300,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter,
|
||||
BTREE_ID_alloc, POS(ca->dev_idx, b),
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret) {
|
||||
ob = ERR_PTR(ret);
|
||||
@ -344,7 +344,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
|
||||
|
||||
ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
|
||||
&bp_pos, &bp,
|
||||
BTREE_ITER_NOPRESERVE);
|
||||
BTREE_ITER_nopreserve);
|
||||
if (ret) {
|
||||
ob = ERR_PTR(ret);
|
||||
goto err;
|
||||
@ -404,7 +404,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
|
||||
*/
|
||||
again:
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, alloc_cursor),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BTREE_ITER_slots, k, ret) {
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a;
|
||||
|
||||
@ -420,7 +420,7 @@ again:
|
||||
continue;
|
||||
|
||||
/* now check the cached key to serialize concurrent allocs of the bucket */
|
||||
ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_CACHED);
|
||||
ck = bch2_bkey_get_iter(trans, &citer, BTREE_ID_alloc, k.k->p, BTREE_ITER_cached);
|
||||
ret = bkey_err(ck);
|
||||
if (ret)
|
||||
break;
|
||||
@ -679,7 +679,7 @@ static int add_new_bucket(struct bch_fs *c,
|
||||
struct open_bucket *ob)
|
||||
{
|
||||
unsigned durability =
|
||||
bch_dev_bkey_exists(c, ob->dev)->mi.durability;
|
||||
bch2_dev_bkey_exists(c, ob->dev)->mi.durability;
|
||||
|
||||
BUG_ON(*nr_effective >= nr_replicas);
|
||||
|
||||
@ -836,7 +836,7 @@ static bool want_bucket(struct bch_fs *c,
|
||||
bool *have_cache, bool ec,
|
||||
struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
|
||||
if (!test_bit(ob->dev, devs_may_alloc->d))
|
||||
return false;
|
||||
@ -906,7 +906,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
|
||||
struct open_bucket *ob = c->open_buckets + c->open_buckets_partial[i];
|
||||
|
||||
if (want_bucket(c, wp, devs_may_alloc, have_cache, ec, ob)) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev_usage usage;
|
||||
u64 avail;
|
||||
|
||||
@ -1291,7 +1291,7 @@ deallocate_extra_replicas(struct bch_fs *c,
|
||||
unsigned i;
|
||||
|
||||
open_bucket_for_each(c, ptrs, ob, i) {
|
||||
unsigned d = bch_dev_bkey_exists(c, ob->dev)->mi.durability;
|
||||
unsigned d = bch2_dev_bkey_exists(c, ob->dev)->mi.durability;
|
||||
|
||||
if (d && d <= extra_replicas) {
|
||||
extra_replicas -= d;
|
||||
@ -1342,6 +1342,10 @@ retry:
|
||||
|
||||
*wp_ret = wp = writepoint_find(trans, write_point.v);
|
||||
|
||||
ret = bch2_trans_relock(trans);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* metadata may not allocate on cache devices: */
|
||||
if (wp->data_type != BCH_DATA_user)
|
||||
have_cache = true;
|
||||
@ -1444,7 +1448,7 @@ err:
|
||||
|
||||
struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
|
||||
return (struct bch_extent_ptr) {
|
||||
.type = 1 << BCH_EXTENT_ENTRY_ptr,
|
||||
@ -1520,7 +1524,7 @@ void bch2_fs_allocator_foreground_init(struct bch_fs *c)
|
||||
|
||||
static void bch2_open_bucket_to_text(struct printbuf *out, struct bch_fs *c, struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
unsigned data_type = ob->data_type;
|
||||
barrier(); /* READ_ONCE() doesn't work on bitfields */
|
||||
|
||||
|
@ -184,7 +184,7 @@ bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
|
||||
wp->sectors_allocated += sectors;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
|
||||
|
||||
ptr.cached = cached ||
|
||||
|
@ -46,10 +46,10 @@ int bch2_backpointer_invalid(struct bch_fs *c, struct bkey_s_c k,
|
||||
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
|
||||
|
||||
/* these will be caught by fsck */
|
||||
if (!bch2_dev_exists2(c, bp.k->p.inode))
|
||||
if (!bch2_dev_exists(c, bp.k->p.inode))
|
||||
return 0;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bp.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bp.k->p.inode);
|
||||
struct bpos bucket = bp_pos_to_bucket(c, bp.k->p);
|
||||
int ret = 0;
|
||||
|
||||
@ -75,7 +75,7 @@ void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer
|
||||
|
||||
void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
|
||||
{
|
||||
if (bch2_dev_exists2(c, k.k->p.inode)) {
|
||||
if (bch2_dev_exists(c, k.k->p.inode)) {
|
||||
prt_str(out, "bucket=");
|
||||
bch2_bpos_to_text(out, bp_pos_to_bucket(c, k.k->p));
|
||||
prt_str(out, " ");
|
||||
@ -117,8 +117,7 @@ static noinline int backpointer_mod_err(struct btree_trans *trans,
|
||||
|
||||
bch_err(c, "%s", buf.buf);
|
||||
} else if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_extents_to_backpointers) {
|
||||
prt_printf(&buf, "backpointer not found when deleting");
|
||||
prt_newline(&buf);
|
||||
prt_printf(&buf, "backpointer not found when deleting\n");
|
||||
printbuf_indent_add(&buf, 2);
|
||||
|
||||
prt_printf(&buf, "searching for ");
|
||||
@ -171,9 +170,9 @@ int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
|
||||
bp_k->k.p,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_SLOTS|
|
||||
BTREE_ITER_WITH_UPDATES);
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_slots|
|
||||
BTREE_ITER_with_updates);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -213,7 +212,7 @@ int bch2_get_next_backpointer(struct btree_trans *trans,
|
||||
|
||||
if (gen >= 0) {
|
||||
k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc,
|
||||
bucket, BTREE_ITER_CACHED|iter_flags);
|
||||
bucket, BTREE_ITER_cached|iter_flags);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -367,7 +366,7 @@ static int bch2_check_btree_backpointer(struct btree_trans *trans, struct btree_
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
|
||||
if (fsck_err_on(!bch2_dev_exists(c, k.k->p.inode), c,
|
||||
backpointer_to_missing_device,
|
||||
"backpointer for missing device:\n%s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
@ -460,7 +459,7 @@ found:
|
||||
|
||||
bytes = p.crc.compressed_size << 9;
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, dev);
|
||||
if (!bch2_dev_get_ioref(ca, READ))
|
||||
return false;
|
||||
|
||||
@ -760,7 +759,7 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
|
||||
|
||||
__for_each_btree_node(trans, iter, btree,
|
||||
btree == start.btree ? start.pos : POS_MIN,
|
||||
0, depth, BTREE_ITER_PREFETCH, b, ret) {
|
||||
0, depth, BTREE_ITER_prefetch, b, ret) {
|
||||
mem_may_pin -= btree_buf_bytes(b);
|
||||
if (mem_may_pin <= 0) {
|
||||
c->btree_cache.pinned_nodes_end = *end =
|
||||
@ -794,31 +793,13 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
|
||||
|
||||
while (level >= depth) {
|
||||
struct btree_iter iter;
|
||||
bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0,
|
||||
level,
|
||||
BTREE_ITER_PREFETCH);
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
struct bkey_s_c k = bch2_btree_iter_peek(&iter);
|
||||
if (!k.k)
|
||||
break;
|
||||
ret = bkey_err(k) ?:
|
||||
check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
if (ret)
|
||||
break;
|
||||
if (bpos_eq(iter.pos, SPOS_MAX))
|
||||
break;
|
||||
bch2_btree_iter_advance(&iter);
|
||||
}
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
bch2_trans_node_iter_init(trans, &iter, btree_id, POS_MIN, 0, level,
|
||||
BTREE_ITER_prefetch);
|
||||
|
||||
ret = for_each_btree_key_continue(trans, iter, 0, k, ({
|
||||
check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
}));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -936,7 +917,7 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
|
||||
struct bpos last_flushed_pos = SPOS_MAX;
|
||||
|
||||
return for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
|
||||
POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_one_backpointer(trans, start, end,
|
||||
bkey_s_c_to_backpointer(k),
|
||||
|
@ -39,7 +39,7 @@ void bch2_backpointer_swab(struct bkey_s);
|
||||
static inline struct bpos bp_pos_to_bucket(const struct bch_fs *c,
|
||||
struct bpos bp_pos)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bp_pos.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bp_pos.inode);
|
||||
u64 bucket_sector = bp_pos.offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT;
|
||||
|
||||
return POS(bp_pos.inode, sector_to_bucket(ca, bucket_sector));
|
||||
@ -52,7 +52,7 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
|
||||
struct bpos bucket,
|
||||
u64 bucket_offset)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, bucket.inode);
|
||||
struct bpos ret = POS(bucket.inode,
|
||||
(bucket_to_sector(ca, bucket.offset) <<
|
||||
MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
|
||||
|
@ -359,6 +359,8 @@ do { \
|
||||
#define BCH_DEBUG_PARAMS_ALWAYS() \
|
||||
BCH_DEBUG_PARAM(key_merging_disabled, \
|
||||
"Disables merging of extents") \
|
||||
BCH_DEBUG_PARAM(btree_node_merging_disabled, \
|
||||
"Disables merging of btree nodes") \
|
||||
BCH_DEBUG_PARAM(btree_gc_always_rewrite, \
|
||||
"Causes mark and sweep to compact and rewrite every " \
|
||||
"btree node it traverses") \
|
||||
@ -468,6 +470,7 @@ enum bch_time_stats {
|
||||
#include "quota_types.h"
|
||||
#include "rebalance_types.h"
|
||||
#include "replicas_types.h"
|
||||
#include "sb-members_types.h"
|
||||
#include "subvolume_types.h"
|
||||
#include "super_types.h"
|
||||
#include "thread_with_file_types.h"
|
||||
@ -1250,11 +1253,6 @@ static inline s64 bch2_current_time(const struct bch_fs *c)
|
||||
return timespec_to_bch2_time(c, now);
|
||||
}
|
||||
|
||||
static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
return dev < c->sb.nr_devices && c->devs[dev];
|
||||
}
|
||||
|
||||
static inline struct stdio_redirect *bch2_fs_stdio_redirect(struct bch_fs *c)
|
||||
{
|
||||
struct stdio_redirect *stdio = c->stdio;
|
||||
|
@ -1504,7 +1504,8 @@ enum btree_id_flags {
|
||||
BIT_ULL(KEY_TYPE_stripe)) \
|
||||
x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \
|
||||
BIT_ULL(KEY_TYPE_reflink_v)| \
|
||||
BIT_ULL(KEY_TYPE_indirect_inline_data)) \
|
||||
BIT_ULL(KEY_TYPE_indirect_inline_data)| \
|
||||
BIT_ULL(KEY_TYPE_error)) \
|
||||
x(subvolumes, 8, 0, \
|
||||
BIT_ULL(KEY_TYPE_subvolume)) \
|
||||
x(snapshots, 9, 0, \
|
||||
|
@ -29,7 +29,8 @@ struct bkey_ops {
|
||||
bool (*key_normalize)(struct bch_fs *, struct bkey_s);
|
||||
bool (*key_merge)(struct bch_fs *, struct bkey_s, struct bkey_s_c);
|
||||
int (*trigger)(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
void (*compat)(enum btree_id id, unsigned version,
|
||||
unsigned big_endian, int write,
|
||||
struct bkey_s);
|
||||
@ -76,56 +77,10 @@ static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct b
|
||||
|
||||
bool bch2_bkey_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
|
||||
|
||||
enum btree_update_flags {
|
||||
__BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END,
|
||||
__BTREE_UPDATE_NOJOURNAL,
|
||||
__BTREE_UPDATE_KEY_CACHE_RECLAIM,
|
||||
|
||||
__BTREE_TRIGGER_NORUN,
|
||||
__BTREE_TRIGGER_TRANSACTIONAL,
|
||||
__BTREE_TRIGGER_ATOMIC,
|
||||
__BTREE_TRIGGER_GC,
|
||||
__BTREE_TRIGGER_INSERT,
|
||||
__BTREE_TRIGGER_OVERWRITE,
|
||||
__BTREE_TRIGGER_BUCKET_INVALIDATE,
|
||||
};
|
||||
|
||||
#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
|
||||
#define BTREE_UPDATE_NOJOURNAL (1U << __BTREE_UPDATE_NOJOURNAL)
|
||||
#define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
|
||||
|
||||
/* Don't run triggers at all */
|
||||
#define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
|
||||
|
||||
/*
|
||||
* If set, we're running transactional triggers as part of a transaction commit:
|
||||
* triggers may generate new updates
|
||||
*
|
||||
* If cleared, and either BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE are set,
|
||||
* we're running atomic triggers during a transaction commit: we have our
|
||||
* journal reservation, we're holding btree node write locks, and we know the
|
||||
* transaction is going to commit (returning an error here is a fatal error,
|
||||
* causing us to go emergency read-only)
|
||||
*/
|
||||
#define BTREE_TRIGGER_TRANSACTIONAL (1U << __BTREE_TRIGGER_TRANSACTIONAL)
|
||||
#define BTREE_TRIGGER_ATOMIC (1U << __BTREE_TRIGGER_ATOMIC)
|
||||
|
||||
/* We're in gc/fsck: running triggers to recalculate e.g. disk usage */
|
||||
#define BTREE_TRIGGER_GC (1U << __BTREE_TRIGGER_GC)
|
||||
|
||||
/* @new is entering the btree */
|
||||
#define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)
|
||||
|
||||
/* @old is leaving the btree */
|
||||
#define BTREE_TRIGGER_OVERWRITE (1U << __BTREE_TRIGGER_OVERWRITE)
|
||||
|
||||
/* signal from bucket invalidate path to alloc trigger */
|
||||
#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
|
||||
|
||||
static inline int bch2_key_trigger(struct btree_trans *trans,
|
||||
enum btree_id btree, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
const struct bkey_ops *ops = bch2_bkey_type_ops(old.k->type ?: new.k->type);
|
||||
|
||||
@ -135,8 +90,9 @@ static inline int bch2_key_trigger(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
static inline int bch2_key_trigger_old(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, unsigned flags)
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_i deleted;
|
||||
|
||||
@ -144,12 +100,13 @@ static inline int bch2_key_trigger_old(struct btree_trans *trans,
|
||||
deleted.k.p = old.k->p;
|
||||
|
||||
return bch2_key_trigger(trans, btree_id, level, old, bkey_i_to_s(&deleted),
|
||||
BTREE_TRIGGER_OVERWRITE|flags);
|
||||
BTREE_TRIGGER_overwrite|flags);
|
||||
}
|
||||
|
||||
static inline int bch2_key_trigger_new(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s new, unsigned flags)
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s new,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_i deleted;
|
||||
|
||||
@ -157,7 +114,7 @@ static inline int bch2_key_trigger_new(struct btree_trans *trans,
|
||||
deleted.k.p = new.k->p;
|
||||
|
||||
return bch2_key_trigger(trans, btree_id, level, bkey_i_to_s_c(&deleted), new,
|
||||
BTREE_TRIGGER_INSERT|flags);
|
||||
BTREE_TRIGGER_insert|flags);
|
||||
}
|
||||
|
||||
void bch2_bkey_renumber(enum btree_node_type, struct bkey_packed *, int);
|
||||
|
@ -52,12 +52,6 @@ static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
|
||||
}}};
|
||||
}
|
||||
|
||||
static bool should_restart_for_topology_repair(struct bch_fs *c)
|
||||
{
|
||||
return c->opts.fix_errors != FSCK_FIX_no &&
|
||||
!(c->recovery_passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology));
|
||||
}
|
||||
|
||||
static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
|
||||
{
|
||||
preempt_disable();
|
||||
@ -546,9 +540,9 @@ reconstruct_root:
|
||||
if (!bch2_btree_has_scanned_nodes(c, i)) {
|
||||
mustfix_fsck_err(c, btree_root_unreadable_and_scan_found_nothing,
|
||||
"no nodes found for btree %s, continue?", bch2_btree_id_str(i));
|
||||
bch2_btree_root_alloc_fake(c, i, 0);
|
||||
bch2_btree_root_alloc_fake_trans(trans, i, 0);
|
||||
} else {
|
||||
bch2_btree_root_alloc_fake(c, i, 1);
|
||||
bch2_btree_root_alloc_fake_trans(trans, i, 1);
|
||||
bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
|
||||
ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
|
||||
if (ret)
|
||||
@ -576,7 +570,7 @@ reconstruct_root:
|
||||
goto reconstruct_root;
|
||||
|
||||
bch_err(c, "empty btree root %s", bch2_btree_id_str(i));
|
||||
bch2_btree_root_alloc_fake(c, i, 0);
|
||||
bch2_btree_root_alloc_fake_trans(trans, i, 0);
|
||||
r->alive = false;
|
||||
ret = 0;
|
||||
}
|
||||
@ -603,7 +597,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
* use check_bucket_ref here
|
||||
*/
|
||||
bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, p, entry_c);
|
||||
|
||||
@ -736,7 +730,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
*/
|
||||
struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, ptr);
|
||||
|
||||
ptr->gen = g->gen;
|
||||
@ -747,7 +741,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
restart_drop_ptrs:
|
||||
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
bkey_for_each_ptr_decode(bkey_i_to_s(new).k, ptrs, p, entry) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(bkey_i_to_s_c(new), p, entry);
|
||||
|
||||
@ -862,7 +856,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
|
||||
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_key_trigger(trans, btree_id, level, old,
|
||||
unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
|
||||
unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_gc));
|
||||
fsck_err:
|
||||
err:
|
||||
printbuf_exit(&buf);
|
||||
@ -872,8 +866,7 @@ err:
|
||||
|
||||
static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial)
|
||||
{
|
||||
struct btree_node_iter iter;
|
||||
struct bkey unpacked;
|
||||
struct btree_and_journal_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
@ -881,36 +874,33 @@ static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool i
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!btree_node_type_needs_gc(btree_node_type(b)))
|
||||
return 0;
|
||||
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
|
||||
|
||||
bch2_btree_node_iter_init_from_start(&iter, b);
|
||||
|
||||
while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
|
||||
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
|
||||
&k, initial);
|
||||
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
||||
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false, &k, initial);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
|
||||
bch2_btree_node_iter_advance(&iter, b);
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
}
|
||||
|
||||
return 0;
|
||||
bch2_btree_and_journal_iter_exit(&iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
|
||||
bool initial, bool metadata_only)
|
||||
bool initial)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct btree *b;
|
||||
unsigned depth = metadata_only ? 1 : 0;
|
||||
unsigned target_depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
|
||||
int ret = 0;
|
||||
|
||||
gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
|
||||
|
||||
__for_each_btree_node(trans, iter, btree_id, POS_MIN,
|
||||
0, depth, BTREE_ITER_PREFETCH, b, ret) {
|
||||
0, target_depth, BTREE_ITER_prefetch, b, ret) {
|
||||
bch2_verify_btree_nr_keys(b);
|
||||
|
||||
gc_pos_set(c, gc_pos_btree_node(b));
|
||||
@ -941,126 +931,61 @@ static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
|
||||
static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
|
||||
unsigned target_depth)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_and_journal_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_buf cur;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
ret = bch2_btree_node_check_topology(trans, b);
|
||||
int ret = btree_gc_mark_node(trans, b, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
|
||||
bch2_bkey_buf_init(&cur);
|
||||
|
||||
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
||||
BUG_ON(bpos_lt(k.k->p, b->data->min_key));
|
||||
BUG_ON(bpos_gt(k.k->p, b->data->max_key));
|
||||
|
||||
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
|
||||
false, &k, true);
|
||||
if (ret)
|
||||
goto fsck_err;
|
||||
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
}
|
||||
|
||||
if (b->c.level > target_depth) {
|
||||
bch2_btree_and_journal_iter_exit(&iter);
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_and_journal_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_buf cur;
|
||||
|
||||
bch2_bkey_buf_init(&cur);
|
||||
bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
|
||||
iter.prefetch = true;
|
||||
|
||||
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
||||
struct btree *child;
|
||||
|
||||
bch2_bkey_buf_reassemble(&cur, c, k);
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
|
||||
child = bch2_btree_node_get_noiter(trans, cur.k,
|
||||
struct btree *child =
|
||||
bch2_btree_node_get_noiter(trans, cur.k,
|
||||
b->c.btree_id, b->c.level - 1,
|
||||
false);
|
||||
ret = PTR_ERR_OR_ZERO(child);
|
||||
|
||||
if (bch2_err_matches(ret, EIO)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
if (__fsck_err(c,
|
||||
FSCK_CAN_FIX|
|
||||
FSCK_CAN_IGNORE|
|
||||
FSCK_NO_RATELIMIT,
|
||||
btree_node_read_error,
|
||||
"Unreadable btree node at btree %s level %u:\n"
|
||||
" %s",
|
||||
bch2_btree_id_str(b->c.btree_id),
|
||||
b->c.level - 1,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
|
||||
should_restart_for_topology_repair(c)) {
|
||||
bch_info(c, "Halting mark and sweep to start topology repair pass");
|
||||
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
|
||||
goto fsck_err;
|
||||
} else {
|
||||
/* Continue marking when opted to not
|
||||
* fix the error: */
|
||||
ret = 0;
|
||||
set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
|
||||
continue;
|
||||
}
|
||||
} else if (ret) {
|
||||
bch_err_msg(c, ret, "getting btree node");
|
||||
bch_err_msg(c, ret, "getting btree node");
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
ret = bch2_gc_btree_init_recurse(trans, child,
|
||||
target_depth);
|
||||
ret = bch2_gc_btree_init_recurse(trans, child, target_depth);
|
||||
six_unlock_read(&child->c.lock);
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
bch2_bkey_buf_exit(&cur, c);
|
||||
bch2_btree_and_journal_iter_exit(&iter);
|
||||
}
|
||||
fsck_err:
|
||||
bch2_bkey_buf_exit(&cur, c);
|
||||
bch2_btree_and_journal_iter_exit(&iter);
|
||||
printbuf_exit(&buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_btree_init(struct btree_trans *trans,
|
||||
enum btree_id btree_id,
|
||||
bool metadata_only)
|
||||
enum btree_id btree_id)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree *b;
|
||||
unsigned target_depth = metadata_only ? 1 : 0;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
/*
|
||||
* We need to make sure every leaf node is readable before going RW
|
||||
unsigned target_depth = btree_node_type_needs_gc(__btree_node_type(0, btree_id)) ? 0 : 1;
|
||||
*/
|
||||
unsigned target_depth = 0;
|
||||
int ret = 0;
|
||||
|
||||
b = bch2_btree_id_root(c, btree_id)->b;
|
||||
struct btree *b = bch2_btree_id_root(c, btree_id)->b;
|
||||
|
||||
six_lock_read(&b->c.lock, NULL, NULL);
|
||||
printbuf_reset(&buf);
|
||||
bch2_bpos_to_text(&buf, b->data->min_key);
|
||||
if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
|
||||
btree_root_bad_min_key,
|
||||
"btree root with incorrect min_key: %s", buf.buf)) {
|
||||
bch_err(c, "repair unimplemented");
|
||||
ret = -BCH_ERR_fsck_repair_unimplemented;
|
||||
goto fsck_err;
|
||||
}
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_bpos_to_text(&buf, b->data->max_key);
|
||||
if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
|
||||
btree_root_bad_max_key,
|
||||
"btree root with incorrect max_key: %s", buf.buf)) {
|
||||
bch_err(c, "repair unimplemented");
|
||||
ret = -BCH_ERR_fsck_repair_unimplemented;
|
||||
goto fsck_err;
|
||||
}
|
||||
|
||||
if (b->c.level >= target_depth)
|
||||
ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
|
||||
|
||||
@ -1070,11 +995,9 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
|
||||
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1, true,
|
||||
&k, true);
|
||||
}
|
||||
fsck_err:
|
||||
six_unlock_read(&b->c.lock);
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1084,7 +1007,7 @@ static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
|
||||
(int) btree_id_to_gc_phase(r);
|
||||
}
|
||||
|
||||
static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
|
||||
static int bch2_gc_btrees(struct bch_fs *c, bool initial)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
enum btree_id ids[BTREE_ID_NR];
|
||||
@ -1095,98 +1018,38 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
|
||||
ids[i] = i;
|
||||
bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
|
||||
|
||||
for (i = 0; i < BTREE_ID_NR && !ret; i++)
|
||||
ret = initial
|
||||
? bch2_gc_btree_init(trans, ids[i], metadata_only)
|
||||
: bch2_gc_btree(trans, ids[i], initial, metadata_only);
|
||||
for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
|
||||
unsigned btree = i < BTREE_ID_NR ? ids[i] : i;
|
||||
|
||||
for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) {
|
||||
if (!bch2_btree_id_root(c, i)->alive)
|
||||
if (IS_ERR_OR_NULL(bch2_btree_id_root(c, btree)->b))
|
||||
continue;
|
||||
|
||||
ret = initial
|
||||
? bch2_gc_btree_init(trans, i, metadata_only)
|
||||
: bch2_gc_btree(trans, i, initial, metadata_only);
|
||||
}
|
||||
? bch2_gc_btree_init(trans, btree)
|
||||
: bch2_gc_btree(trans, btree, initial);
|
||||
|
||||
if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
|
||||
c, btree_node_read_error,
|
||||
"btree node read error for %s",
|
||||
bch2_btree_id_str(btree)))
|
||||
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
|
||||
}
|
||||
fsck_err:
|
||||
bch2_trans_put(trans);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
|
||||
u64 start, u64 end,
|
||||
enum bch_data_type type,
|
||||
unsigned flags)
|
||||
{
|
||||
u64 b = sector_to_bucket(ca, start);
|
||||
|
||||
do {
|
||||
unsigned sectors =
|
||||
min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
|
||||
|
||||
bch2_mark_metadata_bucket(c, ca, b, type, sectors,
|
||||
gc_phase(GC_PHASE_SB), flags);
|
||||
b++;
|
||||
start += sectors;
|
||||
} while (start < end);
|
||||
}
|
||||
|
||||
static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
|
||||
unsigned flags)
|
||||
{
|
||||
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
|
||||
unsigned i;
|
||||
u64 b;
|
||||
|
||||
for (i = 0; i < layout->nr_superblocks; i++) {
|
||||
u64 offset = le64_to_cpu(layout->sb_offset[i]);
|
||||
|
||||
if (offset == BCH_SB_SECTOR)
|
||||
mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
|
||||
BCH_DATA_sb, flags);
|
||||
|
||||
mark_metadata_sectors(c, ca, offset,
|
||||
offset + (1 << layout->sb_max_size_bits),
|
||||
BCH_DATA_sb, flags);
|
||||
}
|
||||
|
||||
for (i = 0; i < ca->journal.nr; i++) {
|
||||
b = ca->journal.buckets[i];
|
||||
bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
|
||||
ca->mi.bucket_size,
|
||||
gc_phase(GC_PHASE_SB), flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void bch2_mark_superblocks(struct bch_fs *c)
|
||||
static int bch2_mark_superblocks(struct bch_fs *c)
|
||||
{
|
||||
mutex_lock(&c->sb_lock);
|
||||
gc_pos_set(c, gc_phase(GC_PHASE_SB));
|
||||
|
||||
for_each_online_member(c, ca)
|
||||
bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
|
||||
int ret = bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_gc);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* Also see bch2_pending_btree_node_free_insert_done() */
|
||||
static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
|
||||
{
|
||||
struct btree_update *as;
|
||||
struct pending_btree_node_free *d;
|
||||
|
||||
mutex_lock(&c->btree_interior_update_lock);
|
||||
gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
|
||||
|
||||
for_each_pending_btree_node_free(c, as, d)
|
||||
if (d->index_update_done)
|
||||
bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
|
||||
|
||||
mutex_unlock(&c->btree_interior_update_lock);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void bch2_gc_free(struct bch_fs *c)
|
||||
{
|
||||
genradix_free(&c->reflink_gc_table);
|
||||
@ -1204,28 +1067,23 @@ static void bch2_gc_free(struct bch_fs *c)
|
||||
c->usage_gc = NULL;
|
||||
}
|
||||
|
||||
static int bch2_gc_done(struct bch_fs *c,
|
||||
bool initial, bool metadata_only)
|
||||
static int bch2_gc_done(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca = NULL;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
bool verify = !metadata_only &&
|
||||
!c->opts.reconstruct_alloc &&
|
||||
(!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
percpu_down_write(&c->mark_lock);
|
||||
|
||||
#define copy_field(_err, _f, _msg, ...) \
|
||||
if (dst->_f != src->_f && \
|
||||
(!verify || \
|
||||
fsck_err(c, _err, _msg ": got %llu, should be %llu" \
|
||||
, ##__VA_ARGS__, dst->_f, src->_f))) \
|
||||
#define copy_field(_err, _f, _msg, ...) \
|
||||
if (fsck_err_on(dst->_f != src->_f, c, _err, \
|
||||
_msg ": got %llu, should be %llu" , ##__VA_ARGS__, \
|
||||
dst->_f, src->_f)) \
|
||||
dst->_f = src->_f
|
||||
#define copy_dev_field(_err, _f, _msg, ...) \
|
||||
#define copy_dev_field(_err, _f, _msg, ...) \
|
||||
copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
|
||||
#define copy_fs_field(_err, _f, _msg, ...) \
|
||||
#define copy_fs_field(_err, _f, _msg, ...) \
|
||||
copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
|
||||
@ -1258,31 +1116,24 @@ static int bch2_gc_done(struct bch_fs *c,
|
||||
copy_fs_field(fs_usage_btree_wrong,
|
||||
b.btree, "btree");
|
||||
|
||||
if (!metadata_only) {
|
||||
copy_fs_field(fs_usage_data_wrong,
|
||||
b.data, "data");
|
||||
copy_fs_field(fs_usage_cached_wrong,
|
||||
b.cached, "cached");
|
||||
copy_fs_field(fs_usage_reserved_wrong,
|
||||
b.reserved, "reserved");
|
||||
copy_fs_field(fs_usage_nr_inodes_wrong,
|
||||
b.nr_inodes,"nr_inodes");
|
||||
copy_fs_field(fs_usage_data_wrong,
|
||||
b.data, "data");
|
||||
copy_fs_field(fs_usage_cached_wrong,
|
||||
b.cached, "cached");
|
||||
copy_fs_field(fs_usage_reserved_wrong,
|
||||
b.reserved, "reserved");
|
||||
copy_fs_field(fs_usage_nr_inodes_wrong,
|
||||
b.nr_inodes,"nr_inodes");
|
||||
|
||||
for (i = 0; i < BCH_REPLICAS_MAX; i++)
|
||||
copy_fs_field(fs_usage_persistent_reserved_wrong,
|
||||
persistent_reserved[i],
|
||||
"persistent_reserved[%i]", i);
|
||||
}
|
||||
for (i = 0; i < BCH_REPLICAS_MAX; i++)
|
||||
copy_fs_field(fs_usage_persistent_reserved_wrong,
|
||||
persistent_reserved[i],
|
||||
"persistent_reserved[%i]", i);
|
||||
|
||||
for (i = 0; i < c->replicas.nr; i++) {
|
||||
struct bch_replicas_entry_v1 *e =
|
||||
cpu_replicas_entry(&c->replicas, i);
|
||||
|
||||
if (metadata_only &&
|
||||
(e->data_type == BCH_DATA_user ||
|
||||
e->data_type == BCH_DATA_cached))
|
||||
continue;
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_replicas_entry_to_text(&buf, e);
|
||||
|
||||
@ -1361,11 +1212,10 @@ static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
|
||||
|
||||
static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k,
|
||||
bool metadata_only)
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, iter->pos.inode);
|
||||
struct bucket old_gc, gc, *b;
|
||||
struct bkey_i_alloc_v4 *a;
|
||||
struct bch_alloc_v4 old_convert, new;
|
||||
@ -1402,12 +1252,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
bch2_dev_usage_update_m(c, ca, &old_gc, &gc);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
if (metadata_only &&
|
||||
gc.data_type != BCH_DATA_sb &&
|
||||
gc.data_type != BCH_DATA_journal &&
|
||||
gc.data_type != BCH_DATA_btree)
|
||||
return 0;
|
||||
|
||||
if (gen_after(old->gen, gc.gen))
|
||||
return 0;
|
||||
|
||||
@ -1460,12 +1304,12 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
|
||||
a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
|
||||
|
||||
ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
|
||||
ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun);
|
||||
fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
|
||||
static int bch2_gc_alloc_done(struct bch_fs *c)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
@ -1474,9 +1318,9 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
|
||||
for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS(ca->dev_idx, ca->mi.first_bucket),
|
||||
POS(ca->dev_idx, ca->mi.nbuckets - 1),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_slots|BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
|
||||
bch2_alloc_write_key(trans, &iter, k, metadata_only)));
|
||||
bch2_alloc_write_key(trans, &iter, k)));
|
||||
if (ret) {
|
||||
percpu_ref_put(&ca->ref);
|
||||
break;
|
||||
@ -1487,7 +1331,7 @@ static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
|
||||
static int bch2_gc_alloc_start(struct bch_fs *c)
|
||||
{
|
||||
for_each_member_device(c, ca) {
|
||||
struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
|
||||
@ -1506,8 +1350,8 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, k.k->p.inode);
|
||||
struct bucket *g = gc_bucket(ca, k.k->p.offset);
|
||||
|
||||
struct bch_alloc_v4 a_convert;
|
||||
@ -1515,36 +1359,19 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
|
||||
|
||||
g->gen_valid = 1;
|
||||
g->gen = a->gen;
|
||||
|
||||
if (metadata_only &&
|
||||
(a->data_type == BCH_DATA_user ||
|
||||
a->data_type == BCH_DATA_cached ||
|
||||
a->data_type == BCH_DATA_parity)) {
|
||||
g->data_type = a->data_type;
|
||||
g->dirty_sectors = a->dirty_sectors;
|
||||
g->cached_sectors = a->cached_sectors;
|
||||
g->stripe = a->stripe;
|
||||
g->stripe_redundancy = a->stripe_redundancy;
|
||||
}
|
||||
|
||||
0;
|
||||
})));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
|
||||
static void bch2_gc_alloc_reset(struct bch_fs *c)
|
||||
{
|
||||
for_each_member_device(c, ca) {
|
||||
struct bucket_array *buckets = gc_bucket_array(ca);
|
||||
struct bucket *g;
|
||||
|
||||
for_each_bucket(g, buckets) {
|
||||
if (metadata_only &&
|
||||
(g->data_type == BCH_DATA_user ||
|
||||
g->data_type == BCH_DATA_cached ||
|
||||
g->data_type == BCH_DATA_parity))
|
||||
continue;
|
||||
g->data_type = 0;
|
||||
g->dirty_sectors = 0;
|
||||
g->cached_sectors = 0;
|
||||
@ -1600,35 +1427,27 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
|
||||
static int bch2_gc_reflink_done(struct bch_fs *c)
|
||||
{
|
||||
size_t idx = 0;
|
||||
|
||||
if (metadata_only)
|
||||
return 0;
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_reflink, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
|
||||
c->reflink_gc_nr = 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_reflink_start(struct bch_fs *c,
|
||||
bool metadata_only)
|
||||
static int bch2_gc_reflink_start(struct bch_fs *c)
|
||||
{
|
||||
|
||||
if (metadata_only)
|
||||
return 0;
|
||||
|
||||
c->reflink_gc_nr = 0;
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
const __le64 *refcount = bkey_refcount_c(k);
|
||||
|
||||
if (!refcount)
|
||||
@ -1651,7 +1470,7 @@ static int bch2_gc_reflink_start(struct bch_fs *c,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
|
||||
static void bch2_gc_reflink_reset(struct bch_fs *c)
|
||||
{
|
||||
struct genradix_iter iter;
|
||||
struct reflink_gc *r;
|
||||
@ -1713,20 +1532,17 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
|
||||
static int bch2_gc_stripes_done(struct bch_fs *c)
|
||||
{
|
||||
if (metadata_only)
|
||||
return 0;
|
||||
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_stripes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_gc_write_stripes_key(trans, &iter, k)));
|
||||
}
|
||||
|
||||
static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
|
||||
static void bch2_gc_stripes_reset(struct bch_fs *c)
|
||||
{
|
||||
genradix_free(&c->gc_stripes);
|
||||
}
|
||||
@ -1736,7 +1552,6 @@ static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
|
||||
*
|
||||
* @c: filesystem object
|
||||
* @initial: are we in recovery?
|
||||
* @metadata_only: are we just checking metadata references, or everything?
|
||||
*
|
||||
* Returns: 0 on success, or standard errcode on failure
|
||||
*
|
||||
@ -1755,7 +1570,7 @@ static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
|
||||
* move around - if references move backwards in the ordering GC
|
||||
* uses, GC could skip past them
|
||||
*/
|
||||
int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
|
||||
static int bch2_gc(struct bch_fs *c, bool initial)
|
||||
{
|
||||
unsigned iter = 0;
|
||||
int ret;
|
||||
@ -1767,23 +1582,20 @@ int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
|
||||
bch2_btree_interior_updates_flush(c);
|
||||
|
||||
ret = bch2_gc_start(c) ?:
|
||||
bch2_gc_alloc_start(c, metadata_only) ?:
|
||||
bch2_gc_reflink_start(c, metadata_only);
|
||||
bch2_gc_alloc_start(c) ?:
|
||||
bch2_gc_reflink_start(c);
|
||||
if (ret)
|
||||
goto out;
|
||||
again:
|
||||
gc_pos_set(c, gc_phase(GC_PHASE_START));
|
||||
|
||||
bch2_mark_superblocks(c);
|
||||
|
||||
ret = bch2_gc_btrees(c, initial, metadata_only);
|
||||
ret = bch2_mark_superblocks(c);
|
||||
BUG_ON(ret);
|
||||
|
||||
ret = bch2_gc_btrees(c, initial);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
#if 0
|
||||
bch2_mark_pending_btree_node_frees(c);
|
||||
#endif
|
||||
c->gc_count++;
|
||||
|
||||
if (test_bit(BCH_FS_need_another_gc, &c->flags) ||
|
||||
@ -1801,9 +1613,9 @@ again:
|
||||
clear_bit(BCH_FS_need_another_gc, &c->flags);
|
||||
__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
|
||||
|
||||
bch2_gc_stripes_reset(c, metadata_only);
|
||||
bch2_gc_alloc_reset(c, metadata_only);
|
||||
bch2_gc_reflink_reset(c, metadata_only);
|
||||
bch2_gc_stripes_reset(c);
|
||||
bch2_gc_alloc_reset(c);
|
||||
bch2_gc_reflink_reset(c);
|
||||
ret = bch2_gc_reset(c);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -1816,10 +1628,10 @@ out:
|
||||
if (!ret) {
|
||||
bch2_journal_block(&c->journal);
|
||||
|
||||
ret = bch2_gc_alloc_done(c, metadata_only) ?:
|
||||
bch2_gc_done(c, initial, metadata_only) ?:
|
||||
bch2_gc_stripes_done(c, metadata_only) ?:
|
||||
bch2_gc_reflink_done(c, metadata_only);
|
||||
ret = bch2_gc_alloc_done(c) ?:
|
||||
bch2_gc_done(c) ?:
|
||||
bch2_gc_stripes_done(c) ?:
|
||||
bch2_gc_reflink_done(c);
|
||||
|
||||
bch2_journal_unblock(&c->journal);
|
||||
}
|
||||
@ -1842,6 +1654,11 @@ out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_check_allocations(struct bch_fs *c)
|
||||
{
|
||||
return bch2_gc(c, true);
|
||||
}
|
||||
|
||||
static int gc_btree_gens_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k)
|
||||
@ -1853,7 +1670,7 @@ static int gc_btree_gens_key(struct btree_trans *trans,
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (ptr_stale(ca, ptr) > 16) {
|
||||
percpu_up_read(&c->mark_lock);
|
||||
@ -1862,7 +1679,7 @@ static int gc_btree_gens_key(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
|
||||
|
||||
if (gen_after(*gen, ptr->gen))
|
||||
@ -1883,7 +1700,7 @@ update:
|
||||
static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(trans->c, iter->pos.inode);
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
|
||||
struct bkey_i_alloc_v4 *a_mut;
|
||||
@ -1944,7 +1761,7 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, i,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
@ -1956,7 +1773,7 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH,
|
||||
BTREE_ITER_prefetch,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
@ -2023,14 +1840,7 @@ static int bch2_gc_thread(void *arg)
|
||||
last = atomic64_read(&clock->now);
|
||||
last_kick = atomic_read(&c->kick_gc);
|
||||
|
||||
/*
|
||||
* Full gc is currently incompatible with btree key cache:
|
||||
*/
|
||||
#if 0
|
||||
ret = bch2_gc(c, false, false);
|
||||
#else
|
||||
bch2_gc_gens(c);
|
||||
#endif
|
||||
debug_check_no_locks_held();
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,7 @@
|
||||
#include "btree_types.h"
|
||||
|
||||
int bch2_check_topology(struct bch_fs *);
|
||||
int bch2_gc(struct bch_fs *, bool, bool);
|
||||
int bch2_check_allocations(struct bch_fs *);
|
||||
int bch2_gc_gens(struct bch_fs *);
|
||||
void bch2_gc_thread_stop(struct bch_fs *);
|
||||
int bch2_gc_thread_start(struct bch_fs *);
|
||||
|
@ -23,6 +23,18 @@
|
||||
|
||||
#include <linux/sched/mm.h>
|
||||
|
||||
static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
|
||||
{
|
||||
prt_printf(out, "btree=%s l=%u seq %llux\n",
|
||||
bch2_btree_id_str(BTREE_NODE_ID(bn)),
|
||||
(unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq);
|
||||
prt_str(out, "min: ");
|
||||
bch2_bpos_to_text(out, bn->min_key);
|
||||
prt_newline(out);
|
||||
prt_str(out, "max: ");
|
||||
bch2_bpos_to_text(out, bn->max_key);
|
||||
}
|
||||
|
||||
void bch2_btree_node_io_unlock(struct btree *b)
|
||||
{
|
||||
EBUG_ON(!btree_node_write_in_flight(b));
|
||||
@ -524,7 +536,9 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
|
||||
prt_printf(out, "at btree ");
|
||||
bch2_btree_pos_to_text(out, c, b);
|
||||
|
||||
prt_printf(out, "\n node offset %u/%u",
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
prt_printf(out, "\nnode offset %u/%u",
|
||||
b->written, btree_ptr_sectors_written(&b->key));
|
||||
if (i)
|
||||
prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
|
||||
@ -1021,18 +1035,19 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
-BCH_ERR_btree_node_read_err_must_retry,
|
||||
c, ca, b, NULL,
|
||||
btree_node_bad_seq,
|
||||
"got wrong btree node (want %llx got %llx)\n"
|
||||
"got btree %s level %llu pos %s",
|
||||
bp->seq, b->data->keys.seq,
|
||||
bch2_btree_id_str(BTREE_NODE_ID(b->data)),
|
||||
BTREE_NODE_LEVEL(b->data),
|
||||
buf.buf);
|
||||
"got wrong btree node: got\n%s",
|
||||
(printbuf_reset(&buf),
|
||||
bch2_btree_node_header_to_text(&buf, b->data),
|
||||
buf.buf));
|
||||
} else {
|
||||
btree_err_on(!b->data->keys.seq,
|
||||
-BCH_ERR_btree_node_read_err_must_retry,
|
||||
c, ca, b, NULL,
|
||||
btree_node_bad_seq,
|
||||
"bad btree header: seq 0");
|
||||
"bad btree header: seq 0\n%s",
|
||||
(printbuf_reset(&buf),
|
||||
bch2_btree_node_header_to_text(&buf, b->data),
|
||||
buf.buf));
|
||||
}
|
||||
|
||||
while (b->written < (ptr_written ?: btree_sectors(c))) {
|
||||
@ -1250,7 +1265,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
btree_node_reset_sib_u64s(b);
|
||||
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
|
||||
struct bch_dev *ca2 = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca2 = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (ca2->mi.state != BCH_MEMBER_STATE_rw)
|
||||
set_btree_node_need_rewrite(b);
|
||||
@ -1280,7 +1295,7 @@ static void btree_node_read_work(struct work_struct *work)
|
||||
container_of(work, struct btree_read_bio, work);
|
||||
struct bch_fs *c = rb->c;
|
||||
struct btree *b = rb->b;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
struct bio *bio = &rb->bio;
|
||||
struct bch_io_failures failed = { .nr = 0 };
|
||||
struct printbuf buf = PRINTBUF;
|
||||
@ -1292,7 +1307,7 @@ static void btree_node_read_work(struct work_struct *work)
|
||||
while (1) {
|
||||
retry = true;
|
||||
bch_info(c, "retrying read");
|
||||
ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
rb->have_ioref = bch2_dev_get_ioref(ca, READ);
|
||||
bio_reset(bio, NULL, REQ_OP_READ|REQ_SYNC|REQ_META);
|
||||
bio->bi_iter.bi_sector = rb->pick.ptr.offset;
|
||||
@ -1363,7 +1378,7 @@ static void btree_node_read_endio(struct bio *bio)
|
||||
struct bch_fs *c = rb->c;
|
||||
|
||||
if (rb->have_ioref) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
|
||||
bch2_latency_acct(ca, rb->start_time, READ);
|
||||
}
|
||||
@ -1560,7 +1575,7 @@ static void btree_node_read_all_replicas_endio(struct bio *bio)
|
||||
struct btree_node_read_all *ra = rb->ra;
|
||||
|
||||
if (rb->have_ioref) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rb->pick.ptr.dev);
|
||||
|
||||
bch2_latency_acct(ca, rb->start_time, READ);
|
||||
}
|
||||
@ -1602,7 +1617,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
|
||||
|
||||
i = 0;
|
||||
bkey_for_each_ptr_decode(k.k, ptrs, pick, entry) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
struct btree_read_bio *rb =
|
||||
container_of(ra->bio[i], struct btree_read_bio, bio);
|
||||
rb->c = c;
|
||||
@ -1679,7 +1694,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
|
||||
return;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
|
||||
bio = bio_alloc_bioset(NULL,
|
||||
buf_pages(b->data, btree_buf_bytes(b)),
|
||||
@ -1896,7 +1911,7 @@ static void btree_node_write_endio(struct bio *bio)
|
||||
struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
|
||||
struct bch_fs *c = wbio->c;
|
||||
struct btree *b = wbio->bio.bi_private;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, wbio->dev);
|
||||
unsigned long flags;
|
||||
|
||||
if (wbio->have_ioref)
|
||||
@ -2346,20 +2361,13 @@ void bch2_btree_write_stats_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
printbuf_tabstop_push(out, 20);
|
||||
printbuf_tabstop_push(out, 10);
|
||||
|
||||
prt_tab(out);
|
||||
prt_str(out, "nr");
|
||||
prt_tab(out);
|
||||
prt_str(out, "size");
|
||||
prt_newline(out);
|
||||
prt_printf(out, "\tnr\tsize\n");
|
||||
|
||||
for (unsigned i = 0; i < BTREE_WRITE_TYPE_NR; i++) {
|
||||
u64 nr = atomic64_read(&c->btree_write_stats[i].nr);
|
||||
u64 bytes = atomic64_read(&c->btree_write_stats[i].bytes);
|
||||
|
||||
prt_printf(out, "%s:", bch2_btree_write_types[i]);
|
||||
prt_tab(out);
|
||||
prt_u64(out, nr);
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%s:\t%llu\t", bch2_btree_write_types[i], nr);
|
||||
prt_human_readable_u64(out, nr ? div64_u64(bytes, nr) : 0);
|
||||
prt_newline(out);
|
||||
}
|
||||
|
@ -61,7 +61,7 @@ static inline int btree_path_cmp(const struct btree_path *l,
|
||||
static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
|
||||
{
|
||||
/* Are we iterating over keys in all snapshots? */
|
||||
if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
|
||||
if (iter->flags & BTREE_ITER_all_snapshots) {
|
||||
p = bpos_successor(p);
|
||||
} else {
|
||||
p = bpos_nosnap_successor(p);
|
||||
@ -74,7 +74,7 @@ static inline struct bpos bkey_successor(struct btree_iter *iter, struct bpos p)
|
||||
static inline struct bpos bkey_predecessor(struct btree_iter *iter, struct bpos p)
|
||||
{
|
||||
/* Are we iterating over keys in all snapshots? */
|
||||
if (iter->flags & BTREE_ITER_ALL_SNAPSHOTS) {
|
||||
if (iter->flags & BTREE_ITER_all_snapshots) {
|
||||
p = bpos_predecessor(p);
|
||||
} else {
|
||||
p = bpos_nosnap_predecessor(p);
|
||||
@ -88,7 +88,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = iter->pos;
|
||||
|
||||
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
if ((iter->flags & BTREE_ITER_is_extents) &&
|
||||
!bkey_eq(pos, POS_MAX))
|
||||
pos = bkey_successor(iter, pos);
|
||||
return pos;
|
||||
@ -253,13 +253,13 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
|
||||
|
||||
BUG_ON(iter->btree_id >= BTREE_ID_NR);
|
||||
|
||||
BUG_ON(!!(iter->flags & BTREE_ITER_CACHED) != btree_iter_path(trans, iter)->cached);
|
||||
BUG_ON(!!(iter->flags & BTREE_ITER_cached) != btree_iter_path(trans, iter)->cached);
|
||||
|
||||
BUG_ON((iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS));
|
||||
BUG_ON((iter->flags & BTREE_ITER_is_extents) &&
|
||||
(iter->flags & BTREE_ITER_all_snapshots));
|
||||
|
||||
BUG_ON(!(iter->flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_snapshot_field) &&
|
||||
(iter->flags & BTREE_ITER_all_snapshots) &&
|
||||
!btree_type_has_snapshot_field(iter->btree_id));
|
||||
|
||||
if (iter->update_path)
|
||||
@ -269,10 +269,10 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
|
||||
|
||||
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
|
||||
{
|
||||
BUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
|
||||
BUG_ON((iter->flags & BTREE_ITER_filter_snapshots) &&
|
||||
!iter->pos.snapshot);
|
||||
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_all_snapshots) &&
|
||||
iter->pos.snapshot != iter->snapshot);
|
||||
|
||||
BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
|
||||
@ -289,7 +289,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
|
||||
if (!bch2_debug_check_iterators)
|
||||
return 0;
|
||||
|
||||
if (!(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS))
|
||||
if (!(iter->flags & BTREE_ITER_filter_snapshots))
|
||||
return 0;
|
||||
|
||||
if (bkey_err(k) || !k.k)
|
||||
@ -300,8 +300,8 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
|
||||
k.k->p.snapshot));
|
||||
|
||||
bch2_trans_iter_init(trans, ©, iter->btree_id, iter->pos,
|
||||
BTREE_ITER_NOPRESERVE|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_nopreserve|
|
||||
BTREE_ITER_all_snapshots);
|
||||
prev = bch2_btree_iter_prev(©);
|
||||
if (!prev.k)
|
||||
goto out;
|
||||
@ -897,7 +897,7 @@ static noinline int btree_node_iter_and_journal_peek(struct btree_trans *trans,
|
||||
|
||||
bch2_bkey_buf_reassemble(out, c, k);
|
||||
|
||||
if ((flags & BTREE_ITER_PREFETCH) &&
|
||||
if ((flags & BTREE_ITER_prefetch) &&
|
||||
c->opts.btree_node_prefetch)
|
||||
ret = btree_path_prefetch_j(trans, path, &jiter);
|
||||
|
||||
@ -944,7 +944,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
|
||||
|
||||
bch2_bkey_buf_unpack(&tmp, c, l->b, k);
|
||||
|
||||
if ((flags & BTREE_ITER_PREFETCH) &&
|
||||
if ((flags & BTREE_ITER_prefetch) &&
|
||||
c->opts.btree_node_prefetch) {
|
||||
ret = btree_path_prefetch(trans, path);
|
||||
if (ret)
|
||||
@ -999,6 +999,7 @@ retry_all:
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
cond_resched();
|
||||
trans->locked = true;
|
||||
|
||||
if (unlikely(trans->memory_allocation_failure)) {
|
||||
struct closure cl;
|
||||
@ -1162,6 +1163,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
|
||||
goto out_uptodate;
|
||||
|
||||
path->level = btree_path_up_until_good_node(trans, path, 0);
|
||||
unsigned max_level = path->level;
|
||||
|
||||
EBUG_ON(btree_path_node(path, path->level) &&
|
||||
!btree_node_locked(path, path->level));
|
||||
@ -1192,6 +1194,16 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(max_level > path->level)) {
|
||||
struct btree_path *linked;
|
||||
unsigned iter;
|
||||
|
||||
trans_for_each_path_with_node(trans, path_l(path)->b, linked, iter)
|
||||
for (unsigned j = path->level + 1; j < max_level; j++)
|
||||
linked->l[j] = path->l[j];
|
||||
}
|
||||
|
||||
out_uptodate:
|
||||
path->uptodate = BTREE_ITER_UPTODATE;
|
||||
out:
|
||||
@ -1334,6 +1346,26 @@ static inline void __bch2_path_free(struct btree_trans *trans, btree_path_idx_t
|
||||
__clear_bit(path, trans->paths_allocated);
|
||||
}
|
||||
|
||||
static bool bch2_btree_path_can_relock(struct btree_trans *trans, struct btree_path *path)
|
||||
{
|
||||
unsigned l = path->level;
|
||||
|
||||
do {
|
||||
if (!btree_path_node(path, l))
|
||||
break;
|
||||
|
||||
if (!is_btree_node(path, l))
|
||||
return false;
|
||||
|
||||
if (path->l[l].lock_seq != path->l[l].b->c.lock.seq)
|
||||
return false;
|
||||
|
||||
l++;
|
||||
} while (l < path->locks_want);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool intent)
|
||||
{
|
||||
struct btree_path *path = trans->paths + path_idx, *dup;
|
||||
@ -1348,10 +1380,15 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
|
||||
if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
|
||||
return;
|
||||
|
||||
if (path->should_be_locked &&
|
||||
!trans->restarted &&
|
||||
(!dup || !bch2_btree_path_relock_norestart(trans, dup)))
|
||||
return;
|
||||
if (path->should_be_locked && !trans->restarted) {
|
||||
if (!dup)
|
||||
return;
|
||||
|
||||
if (!(trans->locked
|
||||
? bch2_btree_path_relock_norestart(trans, dup)
|
||||
: bch2_btree_path_can_relock(trans, dup)))
|
||||
return;
|
||||
}
|
||||
|
||||
if (dup) {
|
||||
dup->preserve |= path->preserve;
|
||||
@ -1384,22 +1421,26 @@ void __noreturn bch2_trans_in_restart_error(struct btree_trans *trans)
|
||||
(void *) trans->last_restarted_ip);
|
||||
}
|
||||
|
||||
void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
|
||||
{
|
||||
panic("trans should be locked, unlocked by %pS\n",
|
||||
(void *) trans->last_unlock_ip);
|
||||
}
|
||||
|
||||
noinline __cold
|
||||
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
|
||||
{
|
||||
prt_printf(buf, "transaction updates for %s journal seq %llu",
|
||||
prt_printf(buf, "transaction updates for %s journal seq %llu\n",
|
||||
trans->fn, trans->journal_res.seq);
|
||||
prt_newline(buf);
|
||||
printbuf_indent_add(buf, 2);
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
struct bkey_s_c old = { &i->old_k, i->old_v };
|
||||
|
||||
prt_printf(buf, "update: btree=%s cached=%u %pS",
|
||||
prt_printf(buf, "update: btree=%s cached=%u %pS\n",
|
||||
bch2_btree_id_str(i->btree_id),
|
||||
i->cached,
|
||||
(void *) i->ip_allocated);
|
||||
prt_newline(buf);
|
||||
|
||||
prt_printf(buf, " old ");
|
||||
bch2_bkey_val_to_text(buf, trans->c, old);
|
||||
@ -1428,7 +1469,7 @@ void bch2_dump_trans_updates(struct btree_trans *trans)
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
static void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
|
||||
static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
|
||||
{
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
|
||||
@ -1440,11 +1481,50 @@ static void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *tr
|
||||
path->level);
|
||||
bch2_bpos_to_text(out, path->pos);
|
||||
|
||||
prt_printf(out, " locks %u", path->nodes_locked);
|
||||
#ifdef TRACK_PATH_ALLOCATED
|
||||
prt_printf(out, " %pS", (void *) path->ip_allocated);
|
||||
#endif
|
||||
}
|
||||
|
||||
static const char *btree_node_locked_str(enum btree_node_locked_type t)
|
||||
{
|
||||
switch (t) {
|
||||
case BTREE_NODE_UNLOCKED:
|
||||
return "unlocked";
|
||||
case BTREE_NODE_READ_LOCKED:
|
||||
return "read";
|
||||
case BTREE_NODE_INTENT_LOCKED:
|
||||
return "intent";
|
||||
case BTREE_NODE_WRITE_LOCKED:
|
||||
return "write";
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx)
|
||||
{
|
||||
bch2_btree_path_to_text_short(out, trans, path_idx);
|
||||
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
|
||||
prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
|
||||
prt_newline(out);
|
||||
|
||||
printbuf_indent_add(out, 2);
|
||||
for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
|
||||
prt_printf(out, "l=%u locks %s seq %u node ", l,
|
||||
btree_node_locked_str(btree_node_locked_type(path, l)),
|
||||
path->l[l].lock_seq);
|
||||
|
||||
int ret = PTR_ERR_OR_ZERO(path->l[l].b);
|
||||
if (ret)
|
||||
prt_str(out, bch2_err_str(ret));
|
||||
else
|
||||
prt_printf(out, "%px", path->l[l].b);
|
||||
prt_newline(out);
|
||||
}
|
||||
printbuf_indent_sub(out, 2);
|
||||
}
|
||||
|
||||
static noinline __cold
|
||||
@ -1457,7 +1537,7 @@ void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
|
||||
btree_trans_sort_paths(trans);
|
||||
|
||||
trans_for_each_path_idx_inorder(trans, iter)
|
||||
bch2_btree_path_to_text(out, trans, iter.path_idx);
|
||||
bch2_btree_path_to_text_short(out, trans, iter.path_idx);
|
||||
}
|
||||
|
||||
noinline __cold
|
||||
@ -1608,11 +1688,12 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
|
||||
unsigned flags, unsigned long ip)
|
||||
{
|
||||
struct btree_path *path;
|
||||
bool cached = flags & BTREE_ITER_CACHED;
|
||||
bool intent = flags & BTREE_ITER_INTENT;
|
||||
bool cached = flags & BTREE_ITER_cached;
|
||||
bool intent = flags & BTREE_ITER_intent;
|
||||
struct trans_for_each_path_inorder_iter iter;
|
||||
btree_path_idx_t path_pos = 0, path_idx;
|
||||
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
bch2_trans_verify_not_in_restart(trans);
|
||||
bch2_trans_verify_locks(trans);
|
||||
|
||||
@ -1657,7 +1738,7 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
|
||||
trans->paths_sorted = false;
|
||||
}
|
||||
|
||||
if (!(flags & BTREE_ITER_NOPRESERVE))
|
||||
if (!(flags & BTREE_ITER_nopreserve))
|
||||
path->preserve = true;
|
||||
|
||||
if (path->intent_ref)
|
||||
@ -1678,6 +1759,22 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
|
||||
return path_idx;
|
||||
}
|
||||
|
||||
btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *trans,
|
||||
enum btree_id btree_id,
|
||||
unsigned level,
|
||||
struct bpos pos)
|
||||
{
|
||||
btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
|
||||
BTREE_ITER_nopreserve|
|
||||
BTREE_ITER_intent, _RET_IP_);
|
||||
path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
|
||||
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
bch2_btree_path_downgrade(trans, path);
|
||||
__bch2_btree_path_unlock(trans, path);
|
||||
return path_idx;
|
||||
}
|
||||
|
||||
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
|
||||
{
|
||||
|
||||
@ -1733,9 +1830,11 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
struct btree_trans *trans = iter->trans;
|
||||
int ret;
|
||||
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path,
|
||||
btree_iter_search_key(iter),
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
|
||||
@ -1774,7 +1873,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
|
||||
iter->k.p = iter->pos = b->key.k.p;
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
out:
|
||||
@ -1835,13 +1934,16 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
if (bpos_eq(iter->pos, b->key.k.p)) {
|
||||
__btree_path_set_level_up(trans, path, path->level++);
|
||||
} else {
|
||||
if (btree_lock_want(path, path->level + 1) == BTREE_NODE_UNLOCKED)
|
||||
btree_node_unlock(trans, path, path->level + 1);
|
||||
|
||||
/*
|
||||
* Haven't gotten to the end of the parent node: go back down to
|
||||
* the next child node
|
||||
*/
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path,
|
||||
bpos_successor(iter->pos),
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
path = btree_iter_path(trans, iter);
|
||||
@ -1859,7 +1961,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
iter->k.p = iter->pos = b->key.k.p;
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
|
||||
@ -1878,11 +1980,11 @@ err:
|
||||
inline bool bch2_btree_iter_advance(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = iter->k.p;
|
||||
bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
|
||||
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
|
||||
? bpos_eq(pos, SPOS_MAX)
|
||||
: bkey_eq(pos, SPOS_MAX));
|
||||
|
||||
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
if (ret && !(iter->flags & BTREE_ITER_is_extents))
|
||||
pos = bkey_successor(iter, pos);
|
||||
bch2_btree_iter_set_pos(iter, pos);
|
||||
return ret;
|
||||
@ -1891,11 +1993,11 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
|
||||
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = bkey_start_pos(&iter->k);
|
||||
bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
|
||||
bool ret = !(iter->flags & BTREE_ITER_all_snapshots
|
||||
? bpos_eq(pos, POS_MIN)
|
||||
: bkey_eq(pos, POS_MIN));
|
||||
|
||||
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
if (ret && !(iter->flags & BTREE_ITER_is_extents))
|
||||
pos = bkey_predecessor(iter, pos);
|
||||
bch2_btree_iter_set_pos(iter, pos);
|
||||
return ret;
|
||||
@ -2006,7 +2108,10 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
if ((iter->flags & BTREE_ITER_KEY_CACHE_FILL) &&
|
||||
bch2_trans_verify_not_in_restart(trans);
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
|
||||
if ((iter->flags & BTREE_ITER_key_cache_fill) &&
|
||||
bpos_eq(iter->pos, pos))
|
||||
return bkey_s_c_null;
|
||||
|
||||
@ -2015,17 +2120,17 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
|
||||
|
||||
if (!iter->key_cache_path)
|
||||
iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
|
||||
iter->flags & BTREE_ITER_INTENT, 0,
|
||||
iter->flags|BTREE_ITER_CACHED|
|
||||
BTREE_ITER_CACHED_NOFILL,
|
||||
iter->flags & BTREE_ITER_intent, 0,
|
||||
iter->flags|BTREE_ITER_cached|
|
||||
BTREE_ITER_cached_nofill,
|
||||
_THIS_IP_);
|
||||
|
||||
iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
|
||||
iter->flags|BTREE_ITER_CACHED) ?:
|
||||
iter->flags|BTREE_ITER_cached) ?:
|
||||
bch2_btree_path_relock(trans, btree_iter_path(trans, iter), _THIS_IP_);
|
||||
if (unlikely(ret))
|
||||
return bkey_s_c_err(ret);
|
||||
@ -2053,7 +2158,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
struct btree_path_level *l;
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
|
||||
@ -2078,7 +2183,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
|
||||
k = btree_path_level_peek_all(trans->c, l, &iter->k);
|
||||
|
||||
if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
|
||||
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
|
||||
k.k &&
|
||||
(k2 = btree_trans_peek_key_cache(iter, k.k->p)).k) {
|
||||
k = k2;
|
||||
@ -2089,10 +2194,10 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
|
||||
if (unlikely(iter->flags & BTREE_ITER_with_journal))
|
||||
k = btree_trans_peek_journal(trans, iter, k);
|
||||
|
||||
if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
|
||||
if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
|
||||
trans->nr_updates))
|
||||
bch2_btree_trans_peek_updates(trans, iter, &k);
|
||||
|
||||
@ -2144,11 +2249,12 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
struct bpos iter_pos;
|
||||
int ret;
|
||||
|
||||
EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
EBUG_ON((iter->flags & BTREE_ITER_filter_snapshots) && bkey_eq(end, POS_MAX));
|
||||
|
||||
if (iter->update_path) {
|
||||
bch2_path_put_nokeep(trans, iter->update_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = 0;
|
||||
}
|
||||
|
||||
@ -2171,7 +2277,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* isn't monotonically increasing before FILTER_SNAPSHOTS, and
|
||||
* that's what we check against in extents mode:
|
||||
*/
|
||||
if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
|
||||
? bkey_gt(k.k->p, end)
|
||||
: k.k->p.inode > end.inode))
|
||||
goto end;
|
||||
@ -2179,13 +2285,13 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
if (iter->update_path &&
|
||||
!bkey_eq(trans->paths[iter->update_path].pos, k.k->p)) {
|
||||
bch2_path_put_nokeep(trans, iter->update_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = 0;
|
||||
}
|
||||
|
||||
if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
|
||||
(iter->flags & BTREE_ITER_INTENT) &&
|
||||
!(iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
if ((iter->flags & BTREE_ITER_filter_snapshots) &&
|
||||
(iter->flags & BTREE_ITER_intent) &&
|
||||
!(iter->flags & BTREE_ITER_is_extents) &&
|
||||
!iter->update_path) {
|
||||
struct bpos pos = k.k->p;
|
||||
|
||||
@ -2200,12 +2306,12 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* advance, same as on exit for iter->path, but only up
|
||||
* to snapshot
|
||||
*/
|
||||
__btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_INTENT);
|
||||
__btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = iter->path;
|
||||
|
||||
iter->update_path = bch2_btree_path_set_pos(trans,
|
||||
iter->update_path, pos,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
_THIS_IP_);
|
||||
ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
|
||||
if (unlikely(ret)) {
|
||||
@ -2218,7 +2324,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* We can never have a key in a leaf node at POS_MAX, so
|
||||
* we don't have to check these successor() calls:
|
||||
*/
|
||||
if ((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) &&
|
||||
if ((iter->flags & BTREE_ITER_filter_snapshots) &&
|
||||
!bch2_snapshot_is_ancestor(trans->c,
|
||||
iter->snapshot,
|
||||
k.k->p.snapshot)) {
|
||||
@ -2227,7 +2333,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
}
|
||||
|
||||
if (bkey_whiteout(k.k) &&
|
||||
!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
|
||||
!(iter->flags & BTREE_ITER_all_snapshots)) {
|
||||
search_key = bkey_successor(iter, k.k->p);
|
||||
continue;
|
||||
}
|
||||
@ -2237,12 +2343,12 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* equal to the key we just returned - except extents can
|
||||
* straddle iter->pos:
|
||||
*/
|
||||
if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
if (!(iter->flags & BTREE_ITER_is_extents))
|
||||
iter_pos = k.k->p;
|
||||
else
|
||||
iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
|
||||
|
||||
if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (unlikely(!(iter->flags & BTREE_ITER_is_extents)
|
||||
? bkey_gt(iter_pos, end)
|
||||
: bkey_ge(iter_pos, end)))
|
||||
goto end;
|
||||
@ -2253,7 +2359,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
iter->pos = iter_pos;
|
||||
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
@ -2266,7 +2372,7 @@ out_no_locked:
|
||||
btree_path_set_should_be_locked(trans->paths + iter->update_path);
|
||||
}
|
||||
|
||||
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
|
||||
if (!(iter->flags & BTREE_ITER_all_snapshots))
|
||||
iter->pos.snapshot = iter->snapshot;
|
||||
|
||||
ret = bch2_btree_iter_verify_ret(iter, k);
|
||||
@ -2316,21 +2422,22 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
btree_path_idx_t saved_path = 0;
|
||||
int ret;
|
||||
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
EBUG_ON(btree_iter_path(trans, iter)->cached ||
|
||||
btree_iter_path(trans, iter)->level);
|
||||
|
||||
if (iter->flags & BTREE_ITER_WITH_JOURNAL)
|
||||
if (iter->flags & BTREE_ITER_with_journal)
|
||||
return bkey_s_c_err(-BCH_ERR_btree_iter_with_journal_not_supported);
|
||||
|
||||
bch2_btree_iter_verify(iter);
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
|
||||
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
|
||||
if (iter->flags & BTREE_ITER_filter_snapshots)
|
||||
search_key.snapshot = U32_MAX;
|
||||
|
||||
while (1) {
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
|
||||
@ -2345,17 +2452,17 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
|
||||
k = btree_path_level_peek(trans, path, &path->l[0], &iter->k);
|
||||
if (!k.k ||
|
||||
((iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
((iter->flags & BTREE_ITER_is_extents)
|
||||
? bpos_ge(bkey_start_pos(k.k), search_key)
|
||||
: bpos_gt(k.k->p, search_key)))
|
||||
k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
|
||||
|
||||
if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
|
||||
if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
|
||||
trans->nr_updates))
|
||||
bch2_btree_trans_peek_prev_updates(trans, iter, &k);
|
||||
|
||||
if (likely(k.k)) {
|
||||
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
|
||||
if (iter->flags & BTREE_ITER_filter_snapshots) {
|
||||
if (k.k->p.snapshot == iter->snapshot)
|
||||
goto got_key;
|
||||
|
||||
@ -2366,7 +2473,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
*/
|
||||
if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
|
||||
bch2_path_put_nokeep(trans, iter->path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->path = saved_path;
|
||||
saved_path = 0;
|
||||
iter->k = saved_k;
|
||||
@ -2379,9 +2486,9 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
k.k->p.snapshot)) {
|
||||
if (saved_path)
|
||||
bch2_path_put_nokeep(trans, saved_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
saved_path = btree_path_clone(trans, iter->path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
path = btree_iter_path(trans, iter);
|
||||
saved_k = *k.k;
|
||||
saved_v = k.v;
|
||||
@ -2392,9 +2499,9 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
}
|
||||
got_key:
|
||||
if (bkey_whiteout(k.k) &&
|
||||
!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS)) {
|
||||
!(iter->flags & BTREE_ITER_all_snapshots)) {
|
||||
search_key = bkey_predecessor(iter, k.k->p);
|
||||
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
|
||||
if (iter->flags & BTREE_ITER_filter_snapshots)
|
||||
search_key.snapshot = U32_MAX;
|
||||
continue;
|
||||
}
|
||||
@ -2418,11 +2525,11 @@ got_key:
|
||||
if (bkey_lt(k.k->p, iter->pos))
|
||||
iter->pos = k.k->p;
|
||||
|
||||
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
|
||||
if (iter->flags & BTREE_ITER_filter_snapshots)
|
||||
iter->pos.snapshot = iter->snapshot;
|
||||
out_no_locked:
|
||||
if (saved_path)
|
||||
bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_INTENT);
|
||||
bch2_path_put_nokeep(trans, saved_path, iter->flags & BTREE_ITER_intent);
|
||||
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
@ -2452,12 +2559,13 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
bch2_btree_iter_verify(iter);
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_WITH_KEY_CACHE));
|
||||
EBUG_ON(btree_iter_path(trans, iter)->level && (iter->flags & BTREE_ITER_with_key_cache));
|
||||
|
||||
/* extents can't span inode numbers: */
|
||||
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
if ((iter->flags & BTREE_ITER_is_extents) &&
|
||||
unlikely(iter->pos.offset == KEY_OFFSET_MAX)) {
|
||||
if (iter->pos.inode == KEY_INODE_MAX)
|
||||
return bkey_s_c_null;
|
||||
@ -2467,7 +2575,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
|
||||
search_key = btree_iter_search_key(iter);
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
|
||||
@ -2476,22 +2584,22 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
goto out_no_locked;
|
||||
}
|
||||
|
||||
if ((iter->flags & BTREE_ITER_CACHED) ||
|
||||
!(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
|
||||
if ((iter->flags & BTREE_ITER_cached) ||
|
||||
!(iter->flags & (BTREE_ITER_is_extents|BTREE_ITER_filter_snapshots))) {
|
||||
k = bkey_s_c_null;
|
||||
|
||||
if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
|
||||
if (unlikely((iter->flags & BTREE_ITER_with_updates) &&
|
||||
trans->nr_updates)) {
|
||||
bch2_btree_trans_peek_slot_updates(trans, iter, &k);
|
||||
if (k.k)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&
|
||||
if (unlikely(iter->flags & BTREE_ITER_with_journal) &&
|
||||
(k = btree_trans_peek_slot_journal(trans, iter)).k)
|
||||
goto out;
|
||||
|
||||
if (unlikely(iter->flags & BTREE_ITER_WITH_KEY_CACHE) &&
|
||||
if (unlikely(iter->flags & BTREE_ITER_with_key_cache) &&
|
||||
(k = btree_trans_peek_key_cache(iter, iter->pos)).k) {
|
||||
if (!bkey_err(k))
|
||||
iter->k = *k.k;
|
||||
@ -2506,12 +2614,12 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
struct bpos next;
|
||||
struct bpos end = iter->pos;
|
||||
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (iter->flags & BTREE_ITER_is_extents)
|
||||
end.offset = U64_MAX;
|
||||
|
||||
EBUG_ON(btree_iter_path(trans, iter)->level);
|
||||
|
||||
if (iter->flags & BTREE_ITER_INTENT) {
|
||||
if (iter->flags & BTREE_ITER_intent) {
|
||||
struct btree_iter iter2;
|
||||
|
||||
bch2_trans_copy_iter(&iter2, iter);
|
||||
@ -2542,7 +2650,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
bkey_init(&iter->k);
|
||||
iter->k.p = iter->pos;
|
||||
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS) {
|
||||
if (iter->flags & BTREE_ITER_is_extents) {
|
||||
bch2_key_resize(&iter->k,
|
||||
min_t(u64, KEY_SIZE_MAX,
|
||||
(next.inode == iter->pos.inode
|
||||
@ -2726,13 +2834,13 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
|
||||
{
|
||||
if (iter->update_path)
|
||||
bch2_path_put_nokeep(trans, iter->update_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
if (iter->path)
|
||||
bch2_path_put(trans, iter->path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
if (iter->key_cache_path)
|
||||
bch2_path_put(trans, iter->key_cache_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->path = 0;
|
||||
iter->update_path = 0;
|
||||
iter->key_cache_path = 0;
|
||||
@ -2757,9 +2865,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
|
||||
unsigned depth,
|
||||
unsigned flags)
|
||||
{
|
||||
flags |= BTREE_ITER_NOT_EXTENTS;
|
||||
flags |= __BTREE_ITER_ALL_SNAPSHOTS;
|
||||
flags |= BTREE_ITER_ALL_SNAPSHOTS;
|
||||
flags |= BTREE_ITER_not_extents;
|
||||
flags |= BTREE_ITER_snapshot_field;
|
||||
flags |= BTREE_ITER_all_snapshots;
|
||||
|
||||
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
|
||||
__bch2_btree_iter_flags(trans, btree_id, flags),
|
||||
@ -2782,9 +2890,9 @@ void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
|
||||
dst->ip_allocated = _RET_IP_;
|
||||
#endif
|
||||
if (src->path)
|
||||
__btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_INTENT);
|
||||
__btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
|
||||
if (src->update_path)
|
||||
__btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_INTENT);
|
||||
__btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
|
||||
dst->key_cache_path = 0;
|
||||
}
|
||||
|
||||
@ -2953,7 +3061,8 @@ u32 bch2_trans_begin(struct btree_trans *trans)
|
||||
if (!trans->restarted &&
|
||||
(need_resched() ||
|
||||
time_after64(now, trans->last_begin_time + BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))) {
|
||||
drop_locks_do(trans, (cond_resched(), 0));
|
||||
bch2_trans_unlock(trans);
|
||||
cond_resched();
|
||||
now = local_clock();
|
||||
}
|
||||
trans->last_begin_time = now;
|
||||
@ -2963,11 +3072,14 @@ u32 bch2_trans_begin(struct btree_trans *trans)
|
||||
bch2_trans_srcu_unlock(trans);
|
||||
|
||||
trans->last_begin_ip = _RET_IP_;
|
||||
trans->locked = true;
|
||||
|
||||
if (trans->restarted) {
|
||||
bch2_btree_path_traverse_all(trans);
|
||||
trans->notrace_relock_fail = false;
|
||||
}
|
||||
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
return trans->restart_count;
|
||||
}
|
||||
|
||||
@ -3020,7 +3132,7 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
|
||||
*/
|
||||
BUG_ON(pos_task &&
|
||||
pid == pos_task->pid &&
|
||||
bch2_trans_locked(pos));
|
||||
pos->locked);
|
||||
|
||||
if (pos_task && pid < pos_task->pid) {
|
||||
list_add_tail(&trans->list, &pos->list);
|
||||
@ -3036,6 +3148,7 @@ got_trans:
|
||||
trans->last_begin_time = local_clock();
|
||||
trans->fn_idx = fn_idx;
|
||||
trans->locking_wait.task = current;
|
||||
trans->locked = true;
|
||||
trans->journal_replay_not_finished =
|
||||
unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)) &&
|
||||
atomic_inc_not_zero(&c->journal_keys.ref);
|
||||
@ -3166,13 +3279,11 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
|
||||
pid = owner ? owner->pid : 0;
|
||||
rcu_read_unlock();
|
||||
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
|
||||
prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
|
||||
b->level, bch2_btree_id_str(b->btree_id));
|
||||
bch2_bpos_to_text(out, btree_node_pos(b));
|
||||
|
||||
prt_tab(out);
|
||||
prt_printf(out, " locks %u:%u:%u held by pid %u",
|
||||
prt_printf(out, "\t locks %u:%u:%u held by pid %u",
|
||||
c.n[0], c.n[1], c.n[2], pid);
|
||||
}
|
||||
|
||||
@ -3229,10 +3340,8 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
|
||||
|
||||
b = READ_ONCE(trans->locking);
|
||||
if (b) {
|
||||
prt_printf(out, " blocked for %lluus on",
|
||||
div_u64(local_clock() - trans->locking_wait.start_time,
|
||||
1000));
|
||||
prt_newline(out);
|
||||
prt_printf(out, " blocked for %lluus on\n",
|
||||
div_u64(local_clock() - trans->locking_wait.start_time, 1000));
|
||||
prt_printf(out, " %c", lock_types[trans->locking_wait.lock_want]);
|
||||
bch2_btree_bkey_cached_common_to_text(out, b);
|
||||
prt_newline(out);
|
||||
|
@ -216,9 +216,13 @@ int __must_check bch2_btree_path_traverse_one(struct btree_trans *,
|
||||
btree_path_idx_t,
|
||||
unsigned, unsigned long);
|
||||
|
||||
static inline void bch2_trans_verify_not_unlocked(struct btree_trans *);
|
||||
|
||||
static inline int __must_check bch2_btree_path_traverse(struct btree_trans *trans,
|
||||
btree_path_idx_t path, unsigned flags)
|
||||
{
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
|
||||
if (trans->paths[path].uptodate < BTREE_ITER_NEED_RELOCK)
|
||||
return 0;
|
||||
|
||||
@ -227,6 +231,9 @@ static inline int __must_check bch2_btree_path_traverse(struct btree_trans *tran
|
||||
|
||||
btree_path_idx_t bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
|
||||
unsigned, unsigned, unsigned, unsigned long);
|
||||
btree_path_idx_t bch2_path_get_unlocked_mut(struct btree_trans *, enum btree_id,
|
||||
unsigned, struct bpos);
|
||||
|
||||
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
|
||||
|
||||
/*
|
||||
@ -283,7 +290,6 @@ int bch2_trans_relock(struct btree_trans *);
|
||||
int bch2_trans_relock_notrace(struct btree_trans *);
|
||||
void bch2_trans_unlock(struct btree_trans *);
|
||||
void bch2_trans_unlock_long(struct btree_trans *);
|
||||
bool bch2_trans_locked(struct btree_trans *);
|
||||
|
||||
static inline int trans_was_restarted(struct btree_trans *trans, u32 restart_count)
|
||||
{
|
||||
@ -309,6 +315,14 @@ static inline void bch2_trans_verify_not_in_restart(struct btree_trans *trans)
|
||||
bch2_trans_in_restart_error(trans);
|
||||
}
|
||||
|
||||
void __noreturn bch2_trans_unlocked_error(struct btree_trans *);
|
||||
|
||||
static inline void bch2_trans_verify_not_unlocked(struct btree_trans *trans)
|
||||
{
|
||||
if (!trans->locked)
|
||||
bch2_trans_unlocked_error(trans);
|
||||
}
|
||||
|
||||
__always_inline
|
||||
static int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
|
||||
{
|
||||
@ -386,10 +400,10 @@ static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos
|
||||
|
||||
if (unlikely(iter->update_path))
|
||||
bch2_path_put(trans, iter->update_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = 0;
|
||||
|
||||
if (!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS))
|
||||
if (!(iter->flags & BTREE_ITER_all_snapshots))
|
||||
new_pos.snapshot = iter->snapshot;
|
||||
|
||||
__bch2_btree_iter_set_pos(iter, new_pos);
|
||||
@ -397,7 +411,7 @@ static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos
|
||||
|
||||
static inline void bch2_btree_iter_set_pos_to_extent_start(struct btree_iter *iter)
|
||||
{
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_IS_EXTENTS));
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_is_extents));
|
||||
iter->pos = bkey_start_pos(&iter->k);
|
||||
}
|
||||
|
||||
@ -416,20 +430,20 @@ static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
|
||||
unsigned btree_id,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
|
||||
if (!(flags & (BTREE_ITER_all_snapshots|BTREE_ITER_not_extents)) &&
|
||||
btree_id_is_extents(btree_id))
|
||||
flags |= BTREE_ITER_IS_EXTENTS;
|
||||
flags |= BTREE_ITER_is_extents;
|
||||
|
||||
if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
if (!(flags & BTREE_ITER_snapshot_field) &&
|
||||
!btree_type_has_snapshot_field(btree_id))
|
||||
flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
|
||||
flags &= ~BTREE_ITER_all_snapshots;
|
||||
|
||||
if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
if (!(flags & BTREE_ITER_all_snapshots) &&
|
||||
btree_type_has_snapshots(btree_id))
|
||||
flags |= BTREE_ITER_FILTER_SNAPSHOTS;
|
||||
flags |= BTREE_ITER_filter_snapshots;
|
||||
|
||||
if (trans->journal_replay_not_finished)
|
||||
flags |= BTREE_ITER_WITH_JOURNAL;
|
||||
flags |= BTREE_ITER_with_journal;
|
||||
|
||||
return flags;
|
||||
}
|
||||
@ -439,10 +453,10 @@ static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!btree_id_cached(trans->c, btree_id)) {
|
||||
flags &= ~BTREE_ITER_CACHED;
|
||||
flags &= ~BTREE_ITER_WITH_KEY_CACHE;
|
||||
} else if (!(flags & BTREE_ITER_CACHED))
|
||||
flags |= BTREE_ITER_WITH_KEY_CACHE;
|
||||
flags &= ~BTREE_ITER_cached;
|
||||
flags &= ~BTREE_ITER_with_key_cache;
|
||||
} else if (!(flags & BTREE_ITER_cached))
|
||||
flags |= BTREE_ITER_with_key_cache;
|
||||
|
||||
return __bch2_btree_iter_flags(trans, btree_id, flags);
|
||||
}
|
||||
@ -619,14 +633,14 @@ u32 bch2_trans_begin(struct btree_trans *);
|
||||
static inline struct bkey_s_c bch2_btree_iter_peek_prev_type(struct btree_iter *iter,
|
||||
unsigned flags)
|
||||
{
|
||||
return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
|
||||
return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
|
||||
bch2_btree_iter_peek_prev(iter);
|
||||
}
|
||||
|
||||
static inline struct bkey_s_c bch2_btree_iter_peek_type(struct btree_iter *iter,
|
||||
unsigned flags)
|
||||
{
|
||||
return flags & BTREE_ITER_SLOTS ? bch2_btree_iter_peek_slot(iter) :
|
||||
return flags & BTREE_ITER_slots ? bch2_btree_iter_peek_slot(iter) :
|
||||
bch2_btree_iter_peek(iter);
|
||||
}
|
||||
|
||||
@ -634,7 +648,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *
|
||||
struct bpos end,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!(flags & BTREE_ITER_SLOTS))
|
||||
if (!(flags & BTREE_ITER_slots))
|
||||
return bch2_btree_iter_peek_upto(iter, end);
|
||||
|
||||
if (bkey_gt(iter->pos, end))
|
||||
@ -699,16 +713,12 @@ transaction_restart: \
|
||||
_ret2 ?: trans_was_restarted(_trans, _restart_count); \
|
||||
})
|
||||
|
||||
#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
|
||||
_start, _end, _flags, _k, _do) \
|
||||
#define for_each_btree_key_upto_continue(_trans, _iter, \
|
||||
_end, _flags, _k, _do) \
|
||||
({ \
|
||||
struct btree_iter _iter; \
|
||||
struct bkey_s_c _k; \
|
||||
int _ret3 = 0; \
|
||||
\
|
||||
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
(_start), (_flags)); \
|
||||
\
|
||||
do { \
|
||||
_ret3 = lockrestart_do(_trans, ({ \
|
||||
(_k) = bch2_btree_iter_peek_upto_type(&(_iter), \
|
||||
@ -724,6 +734,21 @@ transaction_restart: \
|
||||
_ret3; \
|
||||
})
|
||||
|
||||
#define for_each_btree_key_continue(_trans, _iter, _flags, _k, _do) \
|
||||
for_each_btree_key_upto_continue(_trans, _iter, SPOS_MAX, _flags, _k, _do)
|
||||
|
||||
#define for_each_btree_key_upto(_trans, _iter, _btree_id, \
|
||||
_start, _end, _flags, _k, _do) \
|
||||
({ \
|
||||
bch2_trans_begin(trans); \
|
||||
\
|
||||
struct btree_iter _iter; \
|
||||
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
(_start), (_flags)); \
|
||||
\
|
||||
for_each_btree_key_upto_continue(_trans, _iter, _end, _flags, _k, _do);\
|
||||
})
|
||||
|
||||
#define for_each_btree_key(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _do) \
|
||||
for_each_btree_key_upto(_trans, _iter, _btree_id, _start, \
|
||||
@ -794,14 +819,6 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
|
||||
return k;
|
||||
}
|
||||
|
||||
#define for_each_btree_key_old(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _ret) \
|
||||
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
(_start), (_flags)); \
|
||||
(_k) = __bch2_btree_iter_peek_and_restart((_trans), &(_iter), _flags),\
|
||||
!((_ret) = bkey_err(_k)) && (_k).k; \
|
||||
bch2_btree_iter_advance(&(_iter)))
|
||||
|
||||
#define for_each_btree_key_upto_norestart(_trans, _iter, _btree_id, \
|
||||
_start, _end, _flags, _k, _ret) \
|
||||
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
|
||||
@ -861,6 +878,7 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
|
||||
})
|
||||
|
||||
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
|
||||
void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
|
||||
void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
|
||||
void bch2_dump_trans_updates(struct btree_trans *);
|
||||
void bch2_dump_trans_paths_updates(struct btree_trans *);
|
||||
|
@ -623,3 +623,20 @@ void bch2_shoot_down_journal_keys(struct bch_fs *c, enum btree_id btree,
|
||||
keys->data[dst++] = *i;
|
||||
keys->nr = keys->gap = dst;
|
||||
}
|
||||
|
||||
void bch2_journal_keys_dump(struct bch_fs *c)
|
||||
{
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
pr_info("%zu keys:", keys->nr);
|
||||
|
||||
move_gap(keys, keys->nr);
|
||||
|
||||
darray_for_each(*keys, i) {
|
||||
printbuf_reset(&buf);
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
|
||||
pr_err("%s l=%u %s", bch2_btree_id_str(i->btree_id), i->level, buf.buf);
|
||||
}
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
@ -70,4 +70,6 @@ void bch2_shoot_down_journal_keys(struct bch_fs *, enum btree_id,
|
||||
unsigned, unsigned,
|
||||
struct bpos, struct bpos);
|
||||
|
||||
void bch2_journal_keys_dump(struct bch_fs *);
|
||||
|
||||
#endif /* _BCACHEFS_BTREE_JOURNAL_ITER_H */
|
||||
|
@ -383,9 +383,9 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, ck->key.btree_id, ck->key.pos,
|
||||
BTREE_ITER_KEY_CACHE_FILL|
|
||||
BTREE_ITER_CACHED_NOFILL);
|
||||
iter.flags &= ~BTREE_ITER_WITH_JOURNAL;
|
||||
BTREE_ITER_key_cache_fill|
|
||||
BTREE_ITER_cached_nofill);
|
||||
iter.flags &= ~BTREE_ITER_with_journal;
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -515,23 +515,10 @@ retry:
|
||||
fill:
|
||||
path->uptodate = BTREE_ITER_UPTODATE;
|
||||
|
||||
if (!ck->valid && !(flags & BTREE_ITER_CACHED_NOFILL)) {
|
||||
/*
|
||||
* Using the underscore version because we haven't set
|
||||
* path->uptodate yet:
|
||||
*/
|
||||
if (!path->locks_want &&
|
||||
!__bch2_btree_path_upgrade(trans, path, 1, NULL)) {
|
||||
trace_and_count(trans->c, trans_restart_key_cache_upgrade, trans, _THIS_IP_);
|
||||
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_upgrade);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = btree_key_cache_fill(trans, path, ck);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_btree_path_relock(trans, path, _THIS_IP_);
|
||||
if (!ck->valid && !(flags & BTREE_ITER_cached_nofill)) {
|
||||
ret = bch2_btree_path_upgrade(trans, path, 1) ?:
|
||||
btree_key_cache_fill(trans, path, ck) ?:
|
||||
bch2_btree_path_relock(trans, path, _THIS_IP_);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -622,13 +609,13 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &b_iter, key.btree_id, key.pos,
|
||||
BTREE_ITER_SLOTS|
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_slots|
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_all_snapshots);
|
||||
bch2_trans_iter_init(trans, &c_iter, key.btree_id, key.pos,
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
b_iter.flags &= ~BTREE_ITER_WITH_KEY_CACHE;
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_intent);
|
||||
b_iter.flags &= ~BTREE_ITER_with_key_cache;
|
||||
|
||||
ret = bch2_btree_iter_traverse(&c_iter);
|
||||
if (ret)
|
||||
@ -666,9 +653,9 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
|
||||
|
||||
ret = bch2_btree_iter_traverse(&b_iter) ?:
|
||||
bch2_trans_update(trans, &b_iter, ck->k,
|
||||
BTREE_UPDATE_KEY_CACHE_RECLAIM|
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
||||
BTREE_TRIGGER_NORUN) ?:
|
||||
BTREE_UPDATE_key_cache_reclaim|
|
||||
BTREE_UPDATE_internal_snapshot_node|
|
||||
BTREE_TRIGGER_norun) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_check_rw|
|
||||
BCH_TRANS_COMMIT_no_enospc|
|
||||
@ -790,7 +777,7 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
|
||||
* flushing. The flush callback will not proceed unless ->seq matches
|
||||
* the latest pin, so make sure it starts with a consistent value.
|
||||
*/
|
||||
if (!(insert_entry->flags & BTREE_UPDATE_NOJOURNAL) ||
|
||||
if (!(insert_entry->flags & BTREE_UPDATE_nojournal) ||
|
||||
!journal_pin_active(&ck->journal)) {
|
||||
ck->seq = trans->journal_res.seq;
|
||||
}
|
||||
@ -1048,12 +1035,9 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
|
||||
|
||||
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
|
||||
{
|
||||
prt_printf(out, "nr_freed:\t%lu", atomic_long_read(&c->nr_freed));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "nr_keys:\t%lu", atomic_long_read(&c->nr_keys));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "nr_dirty:\t%lu", atomic_long_read(&c->nr_dirty));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "nr_freed:\t%lu\n", atomic_long_read(&c->nr_freed));
|
||||
prt_printf(out, "nr_keys:\t%lu\n", atomic_long_read(&c->nr_keys));
|
||||
prt_printf(out, "nr_dirty:\t%lu\n", atomic_long_read(&c->nr_dirty));
|
||||
}
|
||||
|
||||
void bch2_btree_key_cache_exit(void)
|
||||
|
@ -83,8 +83,7 @@ static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
|
||||
{
|
||||
struct trans_waiting_for_lock *i;
|
||||
|
||||
prt_printf(out, "Found lock cycle (%u entries):", g->nr);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "Found lock cycle (%u entries):\n", g->nr);
|
||||
|
||||
for (i = g->g; i < g->g + g->nr; i++) {
|
||||
struct task_struct *task = READ_ONCE(i->trans->locking_wait.task);
|
||||
@ -224,8 +223,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
|
||||
|
||||
bch2_btree_trans_to_text(&buf, trans);
|
||||
|
||||
prt_printf(&buf, "backtrace:");
|
||||
prt_newline(&buf);
|
||||
prt_printf(&buf, "backtrace:\n");
|
||||
printbuf_indent_add(&buf, 2);
|
||||
bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
|
||||
printbuf_indent_sub(&buf, 2);
|
||||
@ -492,8 +490,6 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
|
||||
if (path->uptodate == BTREE_ITER_NEED_RELOCK)
|
||||
path->uptodate = BTREE_ITER_UPTODATE;
|
||||
|
||||
bch2_trans_verify_locks(trans);
|
||||
|
||||
return path->uptodate < BTREE_ITER_NEED_RELOCK;
|
||||
}
|
||||
|
||||
@ -609,7 +605,9 @@ bool bch2_btree_path_relock_norestart(struct btree_trans *trans, struct btree_pa
|
||||
{
|
||||
struct get_locks_fail f;
|
||||
|
||||
return btree_path_get_locks(trans, path, false, &f);
|
||||
bool ret = btree_path_get_locks(trans, path, false, &f);
|
||||
bch2_trans_verify_locks(trans);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __bch2_btree_path_relock(struct btree_trans *trans,
|
||||
@ -632,7 +630,9 @@ bool bch2_btree_path_upgrade_noupgrade_sibs(struct btree_trans *trans,
|
||||
|
||||
path->locks_want = new_locks_want;
|
||||
|
||||
return btree_path_get_locks(trans, path, true, f);
|
||||
bool ret = btree_path_get_locks(trans, path, true, f);
|
||||
bch2_trans_verify_locks(trans);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool __bch2_btree_path_upgrade(struct btree_trans *trans,
|
||||
@ -640,8 +640,9 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
|
||||
unsigned new_locks_want,
|
||||
struct get_locks_fail *f)
|
||||
{
|
||||
if (bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f))
|
||||
return true;
|
||||
bool ret = bch2_btree_path_upgrade_noupgrade_sibs(trans, path, new_locks_want, f);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* XXX: this is ugly - we'd prefer to not be mucking with other
|
||||
@ -675,8 +676,9 @@ bool __bch2_btree_path_upgrade(struct btree_trans *trans,
|
||||
btree_path_get_locks(trans, linked, true, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
out:
|
||||
bch2_trans_verify_locks(trans);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __bch2_btree_path_downgrade(struct btree_trans *trans,
|
||||
@ -725,82 +727,100 @@ void bch2_trans_downgrade(struct btree_trans *trans)
|
||||
bch2_btree_path_downgrade(trans, path);
|
||||
}
|
||||
|
||||
int bch2_trans_relock(struct btree_trans *trans)
|
||||
static inline void __bch2_trans_unlock(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
unsigned i;
|
||||
|
||||
trans_for_each_path(trans, path, i)
|
||||
__bch2_btree_path_unlock(trans, path);
|
||||
}
|
||||
|
||||
static noinline __cold int bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
|
||||
struct get_locks_fail *f, bool trace)
|
||||
{
|
||||
if (!trace)
|
||||
goto out;
|
||||
|
||||
if (trace_trans_restart_relock_enabled()) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bpos_to_text(&buf, path->pos);
|
||||
prt_printf(&buf, " l=%u seq=%u node seq=", f->l, path->l[f->l].lock_seq);
|
||||
if (IS_ERR_OR_NULL(f->b)) {
|
||||
prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
|
||||
} else {
|
||||
prt_printf(&buf, "%u", f->b->c.lock.seq);
|
||||
|
||||
struct six_lock_count c =
|
||||
bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
|
||||
prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
|
||||
|
||||
c = six_lock_counts(&f->b->c.lock);
|
||||
prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
|
||||
}
|
||||
|
||||
trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
count_event(trans->c, trans_restart_relock);
|
||||
out:
|
||||
__bch2_trans_unlock(trans);
|
||||
bch2_trans_verify_locks(trans);
|
||||
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
|
||||
}
|
||||
|
||||
static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace)
|
||||
{
|
||||
bch2_trans_verify_locks(trans);
|
||||
|
||||
if (unlikely(trans->restarted))
|
||||
return -((int) trans->restarted);
|
||||
if (unlikely(trans->locked))
|
||||
goto out;
|
||||
|
||||
struct btree_path *path;
|
||||
unsigned i;
|
||||
|
||||
trans_for_each_path(trans, path, i) {
|
||||
struct get_locks_fail f;
|
||||
|
||||
if (path->should_be_locked &&
|
||||
!btree_path_get_locks(trans, path, false, &f)) {
|
||||
if (trace_trans_restart_relock_enabled()) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bpos_to_text(&buf, path->pos);
|
||||
prt_printf(&buf, " l=%u seq=%u node seq=",
|
||||
f.l, path->l[f.l].lock_seq);
|
||||
if (IS_ERR_OR_NULL(f.b)) {
|
||||
prt_str(&buf, bch2_err_str(PTR_ERR(f.b)));
|
||||
} else {
|
||||
prt_printf(&buf, "%u", f.b->c.lock.seq);
|
||||
|
||||
struct six_lock_count c =
|
||||
bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l);
|
||||
prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
|
||||
|
||||
c = six_lock_counts(&f.b->c.lock);
|
||||
prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
|
||||
}
|
||||
|
||||
trace_trans_restart_relock(trans, _RET_IP_, buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
|
||||
count_event(trans->c, trans_restart_relock);
|
||||
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
|
||||
}
|
||||
!btree_path_get_locks(trans, path, false, &f))
|
||||
return bch2_trans_relock_fail(trans, path, &f, trace);
|
||||
}
|
||||
|
||||
trans->locked = true;
|
||||
out:
|
||||
bch2_trans_verify_locks(trans);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_trans_relock(struct btree_trans *trans)
|
||||
{
|
||||
return __bch2_trans_relock(trans, true);
|
||||
}
|
||||
|
||||
int bch2_trans_relock_notrace(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
unsigned i;
|
||||
|
||||
if (unlikely(trans->restarted))
|
||||
return -((int) trans->restarted);
|
||||
|
||||
trans_for_each_path(trans, path, i)
|
||||
if (path->should_be_locked &&
|
||||
!bch2_btree_path_relock_norestart(trans, path)) {
|
||||
return btree_trans_restart(trans, BCH_ERR_transaction_restart_relock);
|
||||
}
|
||||
return 0;
|
||||
return __bch2_trans_relock(trans, false);
|
||||
}
|
||||
|
||||
void bch2_trans_unlock_noassert(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
unsigned i;
|
||||
__bch2_trans_unlock(trans);
|
||||
|
||||
trans_for_each_path(trans, path, i)
|
||||
__bch2_btree_path_unlock(trans, path);
|
||||
trans->locked = false;
|
||||
trans->last_unlock_ip = _RET_IP_;
|
||||
}
|
||||
|
||||
void bch2_trans_unlock(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
unsigned i;
|
||||
__bch2_trans_unlock(trans);
|
||||
|
||||
trans_for_each_path(trans, path, i)
|
||||
__bch2_btree_path_unlock(trans, path);
|
||||
trans->locked = false;
|
||||
trans->last_unlock_ip = _RET_IP_;
|
||||
}
|
||||
|
||||
void bch2_trans_unlock_long(struct btree_trans *trans)
|
||||
@ -809,17 +829,6 @@ void bch2_trans_unlock_long(struct btree_trans *trans)
|
||||
bch2_trans_srcu_unlock(trans);
|
||||
}
|
||||
|
||||
bool bch2_trans_locked(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
unsigned i;
|
||||
|
||||
trans_for_each_path(trans, path, i)
|
||||
if (path->nodes_locked)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
int __bch2_trans_mutex_lock(struct btree_trans *trans,
|
||||
struct mutex *lock)
|
||||
{
|
||||
@ -836,15 +845,19 @@ int __bch2_trans_mutex_lock(struct btree_trans *trans,
|
||||
|
||||
void bch2_btree_path_verify_locks(struct btree_path *path)
|
||||
{
|
||||
unsigned l;
|
||||
/*
|
||||
* A path may be uptodate and yet have nothing locked if and only if
|
||||
* there is no node at path->level, which generally means we were
|
||||
* iterating over all nodes and got to the end of the btree
|
||||
*/
|
||||
BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
|
||||
btree_path_node(path, path->level) &&
|
||||
!path->nodes_locked);
|
||||
|
||||
if (!path->nodes_locked) {
|
||||
BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
|
||||
btree_path_node(path, path->level));
|
||||
if (!path->nodes_locked)
|
||||
return;
|
||||
}
|
||||
|
||||
for (l = 0; l < BTREE_MAX_DEPTH; l++) {
|
||||
for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
|
||||
int want = btree_lock_want(path, l);
|
||||
int have = btree_node_locked_type(path, l);
|
||||
|
||||
@ -857,8 +870,24 @@ void bch2_btree_path_verify_locks(struct btree_path *path)
|
||||
}
|
||||
}
|
||||
|
||||
static bool bch2_trans_locked(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
unsigned i;
|
||||
|
||||
trans_for_each_path(trans, path, i)
|
||||
if (path->nodes_locked)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
void bch2_trans_verify_locks(struct btree_trans *trans)
|
||||
{
|
||||
if (!trans->locked) {
|
||||
BUG_ON(bch2_trans_locked(trans));
|
||||
return;
|
||||
}
|
||||
|
||||
struct btree_path *path;
|
||||
unsigned i;
|
||||
|
||||
|
@ -364,14 +364,14 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
unsigned new_locks_want)
|
||||
{
|
||||
struct get_locks_fail f;
|
||||
struct get_locks_fail f = {};
|
||||
unsigned old_locks_want = path->locks_want;
|
||||
|
||||
new_locks_want = min(new_locks_want, BTREE_MAX_DEPTH);
|
||||
|
||||
if (path->locks_want < new_locks_want
|
||||
? __bch2_btree_path_upgrade(trans, path, new_locks_want, &f)
|
||||
: path->uptodate == BTREE_ITER_UPTODATE)
|
||||
: path->nodes_locked)
|
||||
return 0;
|
||||
|
||||
trace_and_count(trans->c, trans_restart_upgrade, trans, _THIS_IP_, path,
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_foreground.h"
|
||||
#include "btree_gc.h"
|
||||
#include "btree_io.h"
|
||||
#include "btree_iter.h"
|
||||
@ -19,6 +20,26 @@
|
||||
|
||||
#include <linux/prefetch.h>
|
||||
|
||||
static const char * const trans_commit_flags_strs[] = {
|
||||
#define x(n, ...) #n,
|
||||
BCH_TRANS_COMMIT_FLAGS()
|
||||
#undef x
|
||||
NULL
|
||||
};
|
||||
|
||||
void bch2_trans_commit_flags_to_text(struct printbuf *out, enum bch_trans_commit_flags flags)
|
||||
{
|
||||
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
|
||||
|
||||
prt_printf(out, "watermark=%s", bch2_watermarks[watermark]);
|
||||
|
||||
flags >>= BCH_WATERMARK_BITS;
|
||||
if (flags) {
|
||||
prt_char(out, ' ');
|
||||
bch2_prt_bitflags(out, trans_commit_flags_strs, flags);
|
||||
}
|
||||
}
|
||||
|
||||
static void verify_update_old_key(struct btree_trans *trans, struct btree_insert_entry *i)
|
||||
{
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
@ -315,7 +336,7 @@ static inline void btree_insert_entry_checks(struct btree_trans *trans,
|
||||
BUG_ON(i->btree_id != path->btree_id);
|
||||
EBUG_ON(!i->level &&
|
||||
btree_type_has_snapshots(i->btree_id) &&
|
||||
!(i->flags & BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) &&
|
||||
!(i->flags & BTREE_UPDATE_internal_snapshot_node) &&
|
||||
test_bit(JOURNAL_REPLAY_DONE, &trans->c->journal.flags) &&
|
||||
i->k->k.p.snapshot &&
|
||||
bch2_snapshot_is_internal_node(trans->c, i->k->k.p.snapshot) > 0);
|
||||
@ -443,13 +464,13 @@ static int run_one_mem_trigger(struct btree_trans *trans,
|
||||
|
||||
verify_update_old_key(trans, i);
|
||||
|
||||
if (unlikely(flags & BTREE_TRIGGER_NORUN))
|
||||
if (unlikely(flags & BTREE_TRIGGER_norun))
|
||||
return 0;
|
||||
|
||||
if (old_ops->trigger == new_ops->trigger) {
|
||||
ret = bch2_key_trigger(trans, i->btree_id, i->level,
|
||||
old, bkey_i_to_s(new),
|
||||
BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
|
||||
BTREE_TRIGGER_insert|BTREE_TRIGGER_overwrite|flags);
|
||||
} else {
|
||||
ret = bch2_key_trigger_new(trans, i->btree_id, i->level,
|
||||
bkey_i_to_s(new), flags) ?:
|
||||
@ -472,11 +493,11 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
|
||||
struct bkey_s_c old = { &old_k, i->old_v };
|
||||
const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
|
||||
const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
|
||||
unsigned flags = i->flags|BTREE_TRIGGER_TRANSACTIONAL;
|
||||
unsigned flags = i->flags|BTREE_TRIGGER_transactional;
|
||||
|
||||
verify_update_old_key(trans, i);
|
||||
|
||||
if ((i->flags & BTREE_TRIGGER_NORUN) ||
|
||||
if ((i->flags & BTREE_TRIGGER_norun) ||
|
||||
!(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)))
|
||||
return 0;
|
||||
|
||||
@ -486,8 +507,8 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
|
||||
i->overwrite_trigger_run = true;
|
||||
i->insert_trigger_run = true;
|
||||
return bch2_key_trigger(trans, i->btree_id, i->level, old, bkey_i_to_s(i->k),
|
||||
BTREE_TRIGGER_INSERT|
|
||||
BTREE_TRIGGER_OVERWRITE|flags) ?: 1;
|
||||
BTREE_TRIGGER_insert|
|
||||
BTREE_TRIGGER_overwrite|flags) ?: 1;
|
||||
} else if (overwrite && !i->overwrite_trigger_run) {
|
||||
i->overwrite_trigger_run = true;
|
||||
return bch2_key_trigger_old(trans, i->btree_id, i->level, old, flags) ?: 1;
|
||||
@ -572,7 +593,7 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
trans_for_each_update(trans, i)
|
||||
BUG_ON(!(i->flags & BTREE_TRIGGER_NORUN) &&
|
||||
BUG_ON(!(i->flags & BTREE_TRIGGER_norun) &&
|
||||
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)) &&
|
||||
(!i->insert_trigger_run || !i->overwrite_trigger_run));
|
||||
#endif
|
||||
@ -590,7 +611,7 @@ static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
|
||||
|
||||
if (btree_node_type_needs_gc(__btree_node_type(i->level, i->btree_id)) &&
|
||||
gc_visited(trans->c, gc_pos_btree_node(insert_l(trans, i)->b))) {
|
||||
int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_GC);
|
||||
int ret = run_one_mem_trigger(trans, i, i->flags|BTREE_TRIGGER_gc);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -609,6 +630,9 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
||||
unsigned u64s = 0;
|
||||
int ret;
|
||||
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
bch2_trans_verify_not_in_restart(trans);
|
||||
|
||||
if (race_fault()) {
|
||||
trace_and_count(c, trans_restart_fault_inject, trans, trace_ip);
|
||||
return btree_trans_restart_nounlock(trans, BCH_ERR_transaction_restart_fault_inject);
|
||||
@ -686,7 +710,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
if (BTREE_NODE_TYPE_HAS_ATOMIC_TRIGGERS & (1U << i->bkey_type)) {
|
||||
ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_ATOMIC|i->flags);
|
||||
ret = run_one_mem_trigger(trans, i, BTREE_TRIGGER_atomic|i->flags);
|
||||
if (ret)
|
||||
goto fatal_err;
|
||||
}
|
||||
@ -705,7 +729,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
||||
if (i->key_cache_already_flushed)
|
||||
continue;
|
||||
|
||||
if (i->flags & BTREE_UPDATE_NOJOURNAL)
|
||||
if (i->flags & BTREE_UPDATE_nojournal)
|
||||
continue;
|
||||
|
||||
verify_update_old_key(trans, i);
|
||||
@ -773,9 +797,8 @@ static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
printbuf_reset(err);
|
||||
prt_printf(err, "invalid bkey on insert from %s -> %ps",
|
||||
prt_printf(err, "invalid bkey on insert from %s -> %ps\n",
|
||||
trans->fn, (void *) i->ip_allocated);
|
||||
prt_newline(err);
|
||||
printbuf_indent_add(err, 2);
|
||||
|
||||
bch2_bkey_val_to_text(err, c, bkey_i_to_s_c(i->k));
|
||||
@ -796,8 +819,7 @@ static noinline int bch2_trans_commit_journal_entry_invalid(struct btree_trans *
|
||||
struct bch_fs *c = trans->c;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
prt_printf(&buf, "invalid bkey on insert from %s", trans->fn);
|
||||
prt_newline(&buf);
|
||||
prt_printf(&buf, "invalid bkey on insert from %s\n", trans->fn);
|
||||
printbuf_indent_add(&buf, 2);
|
||||
|
||||
bch2_journal_entry_to_text(&buf, c, i);
|
||||
@ -988,6 +1010,9 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
|
||||
struct bch_fs *c = trans->c;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
bch2_trans_verify_not_in_restart(trans);
|
||||
|
||||
if (!trans->nr_updates &&
|
||||
!trans->journal_entries_u64s)
|
||||
goto out_reset;
|
||||
@ -1065,7 +1090,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
|
||||
if (i->key_cache_already_flushed)
|
||||
continue;
|
||||
|
||||
if (i->flags & BTREE_UPDATE_NOJOURNAL)
|
||||
if (i->flags & BTREE_UPDATE_nojournal)
|
||||
continue;
|
||||
|
||||
/* we're going to journal the key being updated: */
|
||||
@ -1086,6 +1111,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
|
||||
}
|
||||
retry:
|
||||
errored_at = NULL;
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
bch2_trans_verify_not_in_restart(trans);
|
||||
if (likely(!(flags & BCH_TRANS_COMMIT_no_journal_res)))
|
||||
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
|
||||
|
@ -187,36 +187,87 @@ struct btree_node_iter {
|
||||
} data[MAX_BSETS];
|
||||
};
|
||||
|
||||
#define BTREE_ITER_FLAGS() \
|
||||
x(slots) \
|
||||
x(intent) \
|
||||
x(prefetch) \
|
||||
x(is_extents) \
|
||||
x(not_extents) \
|
||||
x(cached) \
|
||||
x(with_key_cache) \
|
||||
x(with_updates) \
|
||||
x(with_journal) \
|
||||
x(snapshot_field) \
|
||||
x(all_snapshots) \
|
||||
x(filter_snapshots) \
|
||||
x(nopreserve) \
|
||||
x(cached_nofill) \
|
||||
x(key_cache_fill) \
|
||||
|
||||
#define STR_HASH_FLAGS() \
|
||||
x(must_create) \
|
||||
x(must_replace)
|
||||
|
||||
#define BTREE_UPDATE_FLAGS() \
|
||||
x(internal_snapshot_node) \
|
||||
x(nojournal) \
|
||||
x(key_cache_reclaim)
|
||||
|
||||
|
||||
/*
|
||||
* Iterate over all possible positions, synthesizing deleted keys for holes:
|
||||
* BTREE_TRIGGER_norun - don't run triggers at all
|
||||
*
|
||||
* BTREE_TRIGGER_transactional - we're running transactional triggers as part of
|
||||
* a transaction commit: triggers may generate new updates
|
||||
*
|
||||
* BTREE_TRIGGER_atomic - we're running atomic triggers during a transaction
|
||||
* commit: we have our journal reservation, we're holding btree node write
|
||||
* locks, and we know the transaction is going to commit (returning an error
|
||||
* here is a fatal error, causing us to go emergency read-only)
|
||||
*
|
||||
* BTREE_TRIGGER_gc - we're in gc/fsck: running triggers to recalculate e.g. disk usage
|
||||
*
|
||||
* BTREE_TRIGGER_insert - @new is entering the btree
|
||||
* BTREE_TRIGGER_overwrite - @old is leaving the btree
|
||||
*
|
||||
* BTREE_TRIGGER_bucket_invalidate - signal from bucket invalidate path to alloc
|
||||
* trigger
|
||||
*/
|
||||
static const __maybe_unused u16 BTREE_ITER_SLOTS = 1 << 0;
|
||||
/*
|
||||
* Indicates that intent locks should be taken on leaf nodes, because we expect
|
||||
* to be doing updates:
|
||||
*/
|
||||
static const __maybe_unused u16 BTREE_ITER_INTENT = 1 << 1;
|
||||
/*
|
||||
* Causes the btree iterator code to prefetch additional btree nodes from disk:
|
||||
*/
|
||||
static const __maybe_unused u16 BTREE_ITER_PREFETCH = 1 << 2;
|
||||
/*
|
||||
* Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
|
||||
* @pos or the first key strictly greater than @pos
|
||||
*/
|
||||
static const __maybe_unused u16 BTREE_ITER_IS_EXTENTS = 1 << 3;
|
||||
static const __maybe_unused u16 BTREE_ITER_NOT_EXTENTS = 1 << 4;
|
||||
static const __maybe_unused u16 BTREE_ITER_CACHED = 1 << 5;
|
||||
static const __maybe_unused u16 BTREE_ITER_WITH_KEY_CACHE = 1 << 6;
|
||||
static const __maybe_unused u16 BTREE_ITER_WITH_UPDATES = 1 << 7;
|
||||
static const __maybe_unused u16 BTREE_ITER_WITH_JOURNAL = 1 << 8;
|
||||
static const __maybe_unused u16 __BTREE_ITER_ALL_SNAPSHOTS = 1 << 9;
|
||||
static const __maybe_unused u16 BTREE_ITER_ALL_SNAPSHOTS = 1 << 10;
|
||||
static const __maybe_unused u16 BTREE_ITER_FILTER_SNAPSHOTS = 1 << 11;
|
||||
static const __maybe_unused u16 BTREE_ITER_NOPRESERVE = 1 << 12;
|
||||
static const __maybe_unused u16 BTREE_ITER_CACHED_NOFILL = 1 << 13;
|
||||
static const __maybe_unused u16 BTREE_ITER_KEY_CACHE_FILL = 1 << 14;
|
||||
#define __BTREE_ITER_FLAGS_END 15
|
||||
#define BTREE_TRIGGER_FLAGS() \
|
||||
x(norun) \
|
||||
x(transactional) \
|
||||
x(atomic) \
|
||||
x(gc) \
|
||||
x(insert) \
|
||||
x(overwrite) \
|
||||
x(bucket_invalidate)
|
||||
|
||||
enum {
|
||||
#define x(n) BTREE_ITER_FLAG_BIT_##n,
|
||||
BTREE_ITER_FLAGS()
|
||||
STR_HASH_FLAGS()
|
||||
BTREE_UPDATE_FLAGS()
|
||||
BTREE_TRIGGER_FLAGS()
|
||||
#undef x
|
||||
};
|
||||
|
||||
/* iter flags must fit in a u16: */
|
||||
//BUILD_BUG_ON(BTREE_ITER_FLAG_BIT_key_cache_fill > 15);
|
||||
|
||||
enum btree_iter_update_trigger_flags {
|
||||
#define x(n) BTREE_ITER_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
|
||||
BTREE_ITER_FLAGS()
|
||||
#undef x
|
||||
#define x(n) STR_HASH_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
|
||||
STR_HASH_FLAGS()
|
||||
#undef x
|
||||
#define x(n) BTREE_UPDATE_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
|
||||
BTREE_UPDATE_FLAGS()
|
||||
#undef x
|
||||
#define x(n) BTREE_TRIGGER_##n = 1U << BTREE_ITER_FLAG_BIT_##n,
|
||||
BTREE_TRIGGER_FLAGS()
|
||||
#undef x
|
||||
};
|
||||
|
||||
enum btree_path_uptodate {
|
||||
BTREE_ITER_UPTODATE = 0,
|
||||
@ -307,7 +358,7 @@ struct btree_iter {
|
||||
*/
|
||||
struct bkey k;
|
||||
|
||||
/* BTREE_ITER_WITH_JOURNAL: */
|
||||
/* BTREE_ITER_with_journal: */
|
||||
size_t journal_idx;
|
||||
#ifdef TRACK_PATH_ALLOCATED
|
||||
unsigned long ip_allocated;
|
||||
@ -418,6 +469,8 @@ struct btree_trans {
|
||||
u8 lock_must_abort;
|
||||
bool lock_may_not_fail:1;
|
||||
bool srcu_held:1;
|
||||
bool locked:1;
|
||||
bool write_locked:1;
|
||||
bool used_mempool:1;
|
||||
bool in_traverse_all:1;
|
||||
bool paths_sorted:1;
|
||||
@ -425,13 +478,13 @@ struct btree_trans {
|
||||
bool journal_transaction_names:1;
|
||||
bool journal_replay_not_finished:1;
|
||||
bool notrace_relock_fail:1;
|
||||
bool write_locked:1;
|
||||
enum bch_errcode restarted:16;
|
||||
u32 restart_count;
|
||||
|
||||
u64 last_begin_time;
|
||||
unsigned long last_begin_ip;
|
||||
unsigned long last_restarted_ip;
|
||||
unsigned long last_unlock_ip;
|
||||
unsigned long srcu_lock_time;
|
||||
|
||||
const char *fn;
|
||||
|
@ -25,14 +25,14 @@ static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
|
||||
|
||||
static int __must_check
|
||||
bch2_trans_update_by_path(struct btree_trans *, btree_path_idx_t,
|
||||
struct bkey_i *, enum btree_update_flags,
|
||||
struct bkey_i *, enum btree_iter_update_trigger_flags,
|
||||
unsigned long ip);
|
||||
|
||||
static noinline int extent_front_merge(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k,
|
||||
struct bkey_i **insert,
|
||||
enum btree_update_flags flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_i *update;
|
||||
@ -104,8 +104,8 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
|
||||
pos.snapshot++;
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, btree_id, pos,
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_NOPRESERVE, k, ret) {
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_nopreserve, k, ret) {
|
||||
if (!bkey_eq(k.k->p, pos))
|
||||
break;
|
||||
|
||||
@ -138,8 +138,8 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
darray_init(&s);
|
||||
|
||||
bch2_trans_iter_init(trans, &old_iter, id, old_pos,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_not_extents|
|
||||
BTREE_ITER_all_snapshots);
|
||||
while ((old_k = bch2_btree_iter_prev(&old_iter)).k &&
|
||||
!(ret = bkey_err(old_k)) &&
|
||||
bkey_eq(old_pos, old_k.k->p)) {
|
||||
@ -151,8 +151,8 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
continue;
|
||||
|
||||
new_k = bch2_bkey_get_iter(trans, &new_iter, id, whiteout_pos,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_not_extents|
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(new_k);
|
||||
if (ret)
|
||||
break;
|
||||
@ -168,7 +168,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
update->k.type = KEY_TYPE_whiteout;
|
||||
|
||||
ret = bch2_trans_update(trans, &new_iter, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
}
|
||||
bch2_trans_iter_exit(trans, &new_iter);
|
||||
|
||||
@ -185,7 +185,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
|
||||
int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
enum btree_update_flags flags,
|
||||
enum btree_iter_update_trigger_flags flags,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_s_c new)
|
||||
{
|
||||
@ -218,7 +218,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
|
||||
old.k->p, update->k.p) ?:
|
||||
bch2_btree_insert_nonextent(trans, btree_id, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
||||
BTREE_UPDATE_internal_snapshot_node|flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -235,7 +235,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
|
||||
old.k->p, update->k.p) ?:
|
||||
bch2_btree_insert_nonextent(trans, btree_id, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
||||
BTREE_UPDATE_internal_snapshot_node|flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -260,7 +260,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
ret = bch2_btree_insert_nonextent(trans, btree_id, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
||||
BTREE_UPDATE_internal_snapshot_node|flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -273,7 +273,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
bch2_cut_front(new.k->p, update);
|
||||
|
||||
ret = bch2_trans_update_by_path(trans, iter->path, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
||||
BTREE_UPDATE_internal_snapshot_node|
|
||||
flags, _RET_IP_);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -285,7 +285,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||
static int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
struct btree_iter *orig_iter,
|
||||
struct bkey_i *insert,
|
||||
enum btree_update_flags flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
@ -293,9 +293,9 @@ static int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_WITH_UPDATES|
|
||||
BTREE_ITER_NOT_EXTENTS);
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_with_updates|
|
||||
BTREE_ITER_not_extents);
|
||||
k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
|
||||
if ((ret = bkey_err(k)))
|
||||
goto err;
|
||||
@ -346,7 +346,7 @@ err:
|
||||
|
||||
static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
struct btree_insert_entry *i,
|
||||
enum btree_update_flags flags,
|
||||
enum btree_iter_update_trigger_flags flags,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct bkey k;
|
||||
@ -354,7 +354,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
|
||||
btree_path_idx_t path_idx =
|
||||
bch2_path_get(trans, i->btree_id, i->old_k.p, 1, 0,
|
||||
BTREE_ITER_INTENT, _THIS_IP_);
|
||||
BTREE_ITER_intent, _THIS_IP_);
|
||||
ret = bch2_btree_path_traverse(trans, path_idx, 0);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -372,7 +372,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
goto out;
|
||||
|
||||
i->key_cache_already_flushed = true;
|
||||
i->flags |= BTREE_TRIGGER_NORUN;
|
||||
i->flags |= BTREE_TRIGGER_norun;
|
||||
|
||||
btree_path_set_should_be_locked(btree_path);
|
||||
ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
|
||||
@ -383,7 +383,7 @@ out:
|
||||
|
||||
static int __must_check
|
||||
bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
|
||||
struct bkey_i *k, enum btree_update_flags flags,
|
||||
struct bkey_i *k, enum btree_iter_update_trigger_flags flags,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -479,15 +479,15 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
|
||||
if (!iter->key_cache_path)
|
||||
iter->key_cache_path =
|
||||
bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_CACHED, _THIS_IP_);
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_cached, _THIS_IP_);
|
||||
|
||||
iter->key_cache_path =
|
||||
bch2_btree_path_set_pos(trans, iter->key_cache_path, path->pos,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
_THIS_IP_);
|
||||
|
||||
ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_CACHED);
|
||||
ret = bch2_btree_path_traverse(trans, iter->key_cache_path, BTREE_ITER_cached);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
@ -505,17 +505,17 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_i *k, enum btree_update_flags flags)
|
||||
struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
btree_path_idx_t path_idx = iter->update_path ?: iter->path;
|
||||
int ret;
|
||||
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (iter->flags & BTREE_ITER_is_extents)
|
||||
return bch2_trans_update_extent(trans, iter, k, flags);
|
||||
|
||||
if (bkey_deleted(&k->k) &&
|
||||
!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
|
||||
(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
|
||||
!(flags & BTREE_UPDATE_key_cache_reclaim) &&
|
||||
(iter->flags & BTREE_ITER_filter_snapshots)) {
|
||||
ret = need_whiteout_for_snapshot(trans, iter->btree_id, k->k.p);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
@ -528,7 +528,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
|
||||
* Ensure that updates to cached btrees go to the key cache:
|
||||
*/
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
if (!(flags & BTREE_UPDATE_KEY_CACHE_RECLAIM) &&
|
||||
if (!(flags & BTREE_UPDATE_key_cache_reclaim) &&
|
||||
!path->cached &&
|
||||
!path->level &&
|
||||
btree_id_cached(trans->c, path->btree_id)) {
|
||||
@ -587,7 +587,7 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, iter, btree, POS_MAX, BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_prev(iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -621,15 +621,15 @@ void bch2_trans_commit_hook(struct btree_trans *trans,
|
||||
|
||||
int bch2_btree_insert_nonextent(struct btree_trans *trans,
|
||||
enum btree_id btree, struct bkey_i *k,
|
||||
enum btree_update_flags flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree, k->k.p,
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_not_extents|
|
||||
BTREE_ITER_intent);
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, k, flags);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -637,16 +637,13 @@ int bch2_btree_insert_nonextent(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
int bch2_btree_insert_trans(struct btree_trans *trans, enum btree_id id,
|
||||
struct bkey_i *k, enum btree_update_flags flags)
|
||||
struct bkey_i *k, enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, k, flags);
|
||||
BTREE_ITER_intent|flags);
|
||||
int ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, k, flags);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
@ -698,8 +695,8 @@ int bch2_btree_delete(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos,
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_intent);
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_btree_delete_at(trans, &iter, update_flags);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -717,7 +714,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_intent);
|
||||
while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
|
||||
struct disk_reservation disk_res =
|
||||
bch2_disk_reservation_init(trans->c, 0);
|
||||
@ -745,7 +742,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
|
||||
*/
|
||||
delete.k.p = iter.pos;
|
||||
|
||||
if (iter.flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (iter.flags & BTREE_ITER_is_extents)
|
||||
bch2_key_resize(&delete.k,
|
||||
bpos_min(end, k.k->p).offset -
|
||||
iter.pos.offset);
|
||||
@ -804,7 +801,7 @@ int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
|
||||
k->k.p = pos;
|
||||
|
||||
struct btree_iter iter;
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, k, 0);
|
||||
@ -852,7 +849,7 @@ __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
|
||||
if (!test_bit(JOURNAL_RUNNING, &c->journal.flags)) {
|
||||
ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -44,16 +44,18 @@ enum bch_trans_commit_flags {
|
||||
#undef x
|
||||
};
|
||||
|
||||
void bch2_trans_commit_flags_to_text(struct printbuf *, enum bch_trans_commit_flags);
|
||||
|
||||
int bch2_btree_delete_extent_at(struct btree_trans *, struct btree_iter *,
|
||||
unsigned, unsigned);
|
||||
int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
|
||||
int bch2_btree_delete(struct btree_trans *, enum btree_id, struct bpos, unsigned);
|
||||
|
||||
int bch2_btree_insert_nonextent(struct btree_trans *, enum btree_id,
|
||||
struct bkey_i *, enum btree_update_flags);
|
||||
struct bkey_i *, enum btree_iter_update_trigger_flags);
|
||||
|
||||
int bch2_btree_insert_trans(struct btree_trans *, enum btree_id, struct bkey_i *,
|
||||
enum btree_update_flags);
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
|
||||
struct disk_reservation *, int flags);
|
||||
|
||||
@ -94,14 +96,14 @@ static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *,
|
||||
enum btree_update_flags,
|
||||
enum btree_iter_update_trigger_flags,
|
||||
struct bkey_s_c, struct bkey_s_c);
|
||||
|
||||
int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
|
||||
enum btree_id, struct bpos);
|
||||
|
||||
int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
|
||||
struct bkey_i *, enum btree_update_flags);
|
||||
struct bkey_i *, enum btree_iter_update_trigger_flags);
|
||||
|
||||
struct jset_entry *__bch2_trans_jset_entry_alloc(struct btree_trans *, unsigned);
|
||||
|
||||
@ -276,7 +278,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr
|
||||
unsigned flags, unsigned type, unsigned min_bytes)
|
||||
{
|
||||
struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
|
||||
btree_id, pos, flags|BTREE_ITER_INTENT, type);
|
||||
btree_id, pos, flags|BTREE_ITER_intent, type);
|
||||
struct bkey_i *ret = IS_ERR(k.k)
|
||||
? ERR_CAST(k.k)
|
||||
: __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
|
||||
@ -299,7 +301,7 @@ static inline struct bkey_i *__bch2_bkey_get_mut(struct btree_trans *trans,
|
||||
unsigned flags, unsigned type, unsigned min_bytes)
|
||||
{
|
||||
struct bkey_i *mut = __bch2_bkey_get_mut_noupdate(trans, iter,
|
||||
btree_id, pos, flags|BTREE_ITER_INTENT, type, min_bytes);
|
||||
btree_id, pos, flags|BTREE_ITER_intent, type, min_bytes);
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(mut))
|
||||
|
@ -38,22 +38,6 @@ static int bch2_btree_insert_node(struct btree_update *, struct btree_trans *,
|
||||
btree_path_idx_t, struct btree *, struct keylist *);
|
||||
static void bch2_btree_update_add_new_node(struct btree_update *, struct btree *);
|
||||
|
||||
static btree_path_idx_t get_unlocked_mut_path(struct btree_trans *trans,
|
||||
enum btree_id btree_id,
|
||||
unsigned level,
|
||||
struct bpos pos)
|
||||
{
|
||||
btree_path_idx_t path_idx = bch2_path_get(trans, btree_id, pos, level + 1, level,
|
||||
BTREE_ITER_NOPRESERVE|
|
||||
BTREE_ITER_INTENT, _RET_IP_);
|
||||
path_idx = bch2_btree_path_make_mut(trans, path_idx, true, _RET_IP_);
|
||||
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
bch2_btree_path_downgrade(trans, path);
|
||||
__bch2_btree_path_unlock(trans, path);
|
||||
return path_idx;
|
||||
}
|
||||
|
||||
/*
|
||||
* Verify that child nodes correctly span parent node's range:
|
||||
*/
|
||||
@ -73,6 +57,24 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
|
||||
!bpos_eq(bkey_i_to_btree_ptr_v2(&b->key)->v.min_key,
|
||||
b->data->min_key));
|
||||
|
||||
if (b == btree_node_root(c, b)) {
|
||||
if (!bpos_eq(b->data->min_key, POS_MIN)) {
|
||||
printbuf_reset(&buf);
|
||||
bch2_bpos_to_text(&buf, b->data->min_key);
|
||||
need_fsck_err(c, btree_root_bad_min_key,
|
||||
"btree root with incorrect min_key: %s", buf.buf);
|
||||
goto topology_repair;
|
||||
}
|
||||
|
||||
if (!bpos_eq(b->data->max_key, SPOS_MAX)) {
|
||||
printbuf_reset(&buf);
|
||||
bch2_bpos_to_text(&buf, b->data->max_key);
|
||||
need_fsck_err(c, btree_root_bad_max_key,
|
||||
"btree root with incorrect max_key: %s", buf.buf);
|
||||
goto topology_repair;
|
||||
}
|
||||
}
|
||||
|
||||
if (!b->c.level)
|
||||
return 0;
|
||||
|
||||
@ -646,7 +648,7 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans,
|
||||
unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
|
||||
|
||||
ret = bch2_key_trigger_old(trans, as->btree_id, level, bkey_i_to_s_c(k),
|
||||
BTREE_TRIGGER_TRANSACTIONAL);
|
||||
BTREE_TRIGGER_transactional);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -655,7 +657,7 @@ static int btree_update_nodes_written_trans(struct btree_trans *trans,
|
||||
unsigned level = bkey_i_to_btree_ptr_v2(k)->v.mem_ptr;
|
||||
|
||||
ret = bch2_key_trigger_new(trans, as->btree_id, level, bkey_i_to_s(k),
|
||||
BTREE_TRIGGER_TRANSACTIONAL);
|
||||
BTREE_TRIGGER_transactional);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -735,9 +737,6 @@ err:
|
||||
*/
|
||||
b = READ_ONCE(as->b);
|
||||
if (b) {
|
||||
btree_path_idx_t path_idx = get_unlocked_mut_path(trans,
|
||||
as->btree_id, b->c.level, b->key.k.p);
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
/*
|
||||
* @b is the node we did the final insert into:
|
||||
*
|
||||
@ -755,12 +754,16 @@ err:
|
||||
* btree_node_lock_nopath() (the use of which is always suspect,
|
||||
* we need to work on removing this in the future)
|
||||
*
|
||||
* It should be, but get_unlocked_mut_path() -> bch2_path_get()
|
||||
* It should be, but bch2_path_get_unlocked_mut() -> bch2_path_get()
|
||||
* calls bch2_path_upgrade(), before we call path_make_mut(), so
|
||||
* we may rarely end up with a locked path besides the one we
|
||||
* have here:
|
||||
*/
|
||||
bch2_trans_unlock(trans);
|
||||
bch2_trans_begin(trans);
|
||||
btree_path_idx_t path_idx = bch2_path_get_unlocked_mut(trans,
|
||||
as->btree_id, b->c.level, b->key.k.p);
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_intent);
|
||||
mark_btree_node_locked(trans, path, b->c.level, BTREE_NODE_INTENT_LOCKED);
|
||||
path->l[b->c.level].lock_seq = six_lock_seq(&b->c.lock);
|
||||
@ -1158,9 +1161,8 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
|
||||
if (flags & BCH_TRANS_COMMIT_journal_reclaim)
|
||||
return ERR_PTR(-BCH_ERR_journal_reclaim_would_deadlock);
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
wait_event(c->journal.wait, !test_bit(JOURNAL_SPACE_LOW, &c->journal.flags));
|
||||
ret = bch2_trans_relock(trans);
|
||||
ret = drop_locks_do(trans,
|
||||
({ wait_event(c->journal.wait, !test_bit(JOURNAL_SPACE_LOW, &c->journal.flags)); 0; }));
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@ -1206,7 +1208,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
|
||||
as->start_time = start_time;
|
||||
as->ip_started = _RET_IP_;
|
||||
as->mode = BTREE_UPDATE_none;
|
||||
as->watermark = watermark;
|
||||
as->flags = flags;
|
||||
as->took_gc_lock = true;
|
||||
as->btree_id = path->btree_id;
|
||||
as->update_level_start = level_start;
|
||||
@ -1619,12 +1621,12 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
|
||||
six_unlock_write(&n2->c.lock);
|
||||
six_unlock_write(&n1->c.lock);
|
||||
|
||||
path1 = get_unlocked_mut_path(trans, as->btree_id, n1->c.level, n1->key.k.p);
|
||||
path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p);
|
||||
six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
|
||||
mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
|
||||
bch2_btree_path_level_init(trans, trans->paths + path1, n1);
|
||||
|
||||
path2 = get_unlocked_mut_path(trans, as->btree_id, n2->c.level, n2->key.k.p);
|
||||
path2 = bch2_path_get_unlocked_mut(trans, as->btree_id, n2->c.level, n2->key.k.p);
|
||||
six_lock_increment(&n2->c.lock, SIX_LOCK_intent);
|
||||
mark_btree_node_locked(trans, trans->paths + path2, n2->c.level, BTREE_NODE_INTENT_LOCKED);
|
||||
bch2_btree_path_level_init(trans, trans->paths + path2, n2);
|
||||
@ -1669,7 +1671,7 @@ static int btree_split(struct btree_update *as, struct btree_trans *trans,
|
||||
bch2_btree_update_add_new_node(as, n1);
|
||||
six_unlock_write(&n1->c.lock);
|
||||
|
||||
path1 = get_unlocked_mut_path(trans, as->btree_id, n1->c.level, n1->key.k.p);
|
||||
path1 = bch2_path_get_unlocked_mut(trans, as->btree_id, n1->c.level, n1->key.k.p);
|
||||
six_lock_increment(&n1->c.lock, SIX_LOCK_intent);
|
||||
mark_btree_node_locked(trans, trans->paths + path1, n1->c.level, BTREE_NODE_INTENT_LOCKED);
|
||||
bch2_btree_path_level_init(trans, trans->paths + path1, n1);
|
||||
@ -1947,6 +1949,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
u64 start_time = local_clock();
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_verify_not_in_restart(trans);
|
||||
bch2_trans_verify_not_unlocked(trans);
|
||||
BUG_ON(!trans->paths[path].should_be_locked);
|
||||
BUG_ON(!btree_node_locked(&trans->paths[path], level));
|
||||
|
||||
@ -1975,7 +1979,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
: bpos_successor(b->data->max_key);
|
||||
|
||||
sib_path = bch2_path_get(trans, btree, sib_pos,
|
||||
U8_MAX, level, BTREE_ITER_INTENT, _THIS_IP_);
|
||||
U8_MAX, level, BTREE_ITER_intent, _THIS_IP_);
|
||||
ret = bch2_btree_path_traverse(trans, sib_path, false);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -2068,7 +2072,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
bch2_btree_update_add_new_node(as, n);
|
||||
six_unlock_write(&n->c.lock);
|
||||
|
||||
new_path = get_unlocked_mut_path(trans, btree, n->c.level, n->key.k.p);
|
||||
new_path = bch2_path_get_unlocked_mut(trans, btree, n->c.level, n->key.k.p);
|
||||
six_lock_increment(&n->c.lock, SIX_LOCK_intent);
|
||||
mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
|
||||
bch2_btree_path_level_init(trans, trans->paths + new_path, n);
|
||||
@ -2146,7 +2150,7 @@ int bch2_btree_node_rewrite(struct btree_trans *trans,
|
||||
bch2_btree_update_add_new_node(as, n);
|
||||
six_unlock_write(&n->c.lock);
|
||||
|
||||
new_path = get_unlocked_mut_path(trans, iter->btree_id, n->c.level, n->key.k.p);
|
||||
new_path = bch2_path_get_unlocked_mut(trans, iter->btree_id, n->c.level, n->key.k.p);
|
||||
six_lock_increment(&n->c.lock, SIX_LOCK_intent);
|
||||
mark_btree_node_locked(trans, trans->paths + new_path, n->c.level, BTREE_NODE_INTENT_LOCKED);
|
||||
bch2_btree_path_level_init(trans, trans->paths + new_path, n);
|
||||
@ -2329,10 +2333,10 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
if (!skip_triggers) {
|
||||
ret = bch2_key_trigger_old(trans, b->c.btree_id, b->c.level + 1,
|
||||
bkey_i_to_s_c(&b->key),
|
||||
BTREE_TRIGGER_TRANSACTIONAL) ?:
|
||||
BTREE_TRIGGER_transactional) ?:
|
||||
bch2_key_trigger_new(trans, b->c.btree_id, b->c.level + 1,
|
||||
bkey_i_to_s(new_key),
|
||||
BTREE_TRIGGER_TRANSACTIONAL);
|
||||
BTREE_TRIGGER_transactional);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -2349,7 +2353,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
bch2_trans_copy_iter(&iter2, iter);
|
||||
|
||||
iter2.path = bch2_btree_path_make_mut(trans, iter2.path,
|
||||
iter2.flags & BTREE_ITER_INTENT,
|
||||
iter2.flags & BTREE_ITER_intent,
|
||||
_THIS_IP_);
|
||||
|
||||
struct btree_path *path2 = btree_iter_path(trans, &iter2);
|
||||
@ -2361,7 +2365,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
trans->paths_sorted = false;
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter2) ?:
|
||||
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_NORUN);
|
||||
bch2_trans_update(trans, &iter2, new_key, BTREE_TRIGGER_norun);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else {
|
||||
@ -2469,7 +2473,7 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_node_iter_init(trans, &iter, b->c.btree_id, b->key.k.p,
|
||||
BTREE_MAX_DEPTH, b->c.level,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bch2_btree_iter_traverse(&iter);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -2507,7 +2511,7 @@ void bch2_btree_set_root_for_read(struct bch_fs *c, struct btree *b)
|
||||
bch2_btree_set_root_inmem(c, b);
|
||||
}
|
||||
|
||||
static int __bch2_btree_root_alloc_fake(struct btree_trans *trans, enum btree_id id, unsigned level)
|
||||
int bch2_btree_root_alloc_fake_trans(struct btree_trans *trans, enum btree_id id, unsigned level)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct closure cl;
|
||||
@ -2555,17 +2559,18 @@ static int __bch2_btree_root_alloc_fake(struct btree_trans *trans, enum btree_id
|
||||
|
||||
void bch2_btree_root_alloc_fake(struct bch_fs *c, enum btree_id id, unsigned level)
|
||||
{
|
||||
bch2_trans_run(c, __bch2_btree_root_alloc_fake(trans, id, level));
|
||||
bch2_trans_run(c, bch2_btree_root_alloc_fake_trans(trans, id, level));
|
||||
}
|
||||
|
||||
static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update *as)
|
||||
{
|
||||
prt_printf(out, "%ps: btree=%s l=%u-%u watermark=%s mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
|
||||
(void *) as->ip_started,
|
||||
prt_printf(out, "%ps: ", (void *) as->ip_started);
|
||||
bch2_trans_commit_flags_to_text(out, as->flags);
|
||||
|
||||
prt_printf(out, " btree=%s l=%u-%u mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
|
||||
bch2_btree_id_str(as->btree_id),
|
||||
as->update_level_start,
|
||||
as->update_level_end,
|
||||
bch2_watermarks[as->watermark],
|
||||
bch2_btree_update_modes[as->mode],
|
||||
as->nodes_written,
|
||||
closure_nr_remaining(&as->cl),
|
||||
|
@ -52,7 +52,7 @@ struct btree_update {
|
||||
struct list_head unwritten_list;
|
||||
|
||||
enum btree_update_mode mode;
|
||||
enum bch_watermark watermark;
|
||||
enum bch_trans_commit_flags flags;
|
||||
unsigned nodes_written:1;
|
||||
unsigned took_gc_lock:1;
|
||||
|
||||
@ -144,6 +144,9 @@ static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans,
|
||||
|
||||
EBUG_ON(!btree_node_locked(path, level));
|
||||
|
||||
if (bch2_btree_node_merging_disabled)
|
||||
return 0;
|
||||
|
||||
b = path->l[level].b;
|
||||
if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold)
|
||||
return 0;
|
||||
@ -172,6 +175,8 @@ int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
|
||||
struct bkey_i *, unsigned, bool);
|
||||
|
||||
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
|
||||
|
||||
int bch2_btree_root_alloc_fake_trans(struct btree_trans *, enum btree_id, unsigned);
|
||||
void bch2_btree_root_alloc_fake(struct bch_fs *, enum btree_id, unsigned);
|
||||
|
||||
static inline unsigned btree_update_reserve_required(struct bch_fs *c,
|
||||
|
@ -122,7 +122,7 @@ static noinline int wb_flush_one_slowpath(struct btree_trans *trans,
|
||||
trans->journal_res.seq = wb->journal_seq;
|
||||
|
||||
return bch2_trans_update(trans, iter, &wb->k,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc|
|
||||
BCH_TRANS_COMMIT_no_check_rw|
|
||||
@ -191,13 +191,13 @@ btree_write_buffered_insert(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
|
||||
BTREE_ITER_CACHED|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_cached|BTREE_ITER_intent);
|
||||
|
||||
trans->journal_res.seq = wb->journal_seq;
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, &wb->k,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
@ -332,7 +332,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
|
||||
if (!iter.path || iter.btree_id != k->btree) {
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
|
||||
BTREE_ITER_INTENT|BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
|
||||
}
|
||||
|
||||
bch2_btree_iter_set_pos(&iter, k->k.k.p);
|
||||
|
@ -274,25 +274,14 @@ void bch2_dev_usage_init(struct bch_dev *ca)
|
||||
|
||||
void bch2_dev_usage_to_text(struct printbuf *out, struct bch_dev_usage *usage)
|
||||
{
|
||||
prt_tab(out);
|
||||
prt_str(out, "buckets");
|
||||
prt_tab_rjust(out);
|
||||
prt_str(out, "sectors");
|
||||
prt_tab_rjust(out);
|
||||
prt_str(out, "fragmented");
|
||||
prt_tab_rjust(out);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "\tbuckets\rsectors\rfragmented\r\n");
|
||||
|
||||
for (unsigned i = 0; i < BCH_DATA_NR; i++) {
|
||||
bch2_prt_data_type(out, i);
|
||||
prt_tab(out);
|
||||
prt_u64(out, usage->d[i].buckets);
|
||||
prt_tab_rjust(out);
|
||||
prt_u64(out, usage->d[i].sectors);
|
||||
prt_tab_rjust(out);
|
||||
prt_u64(out, usage->d[i].fragmented);
|
||||
prt_tab_rjust(out);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "\t%llu\r%llu\r%llu\r\n",
|
||||
usage->d[i].buckets,
|
||||
usage->d[i].sectors,
|
||||
usage->d[i].fragmented);
|
||||
}
|
||||
}
|
||||
|
||||
@ -496,60 +485,6 @@ int bch2_update_cached_sectors_list(struct btree_trans *trans, unsigned dev, s64
|
||||
return bch2_update_replicas_list(trans, &r.e, sectors);
|
||||
}
|
||||
|
||||
int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
size_t b, enum bch_data_type data_type,
|
||||
unsigned sectors, struct gc_pos pos,
|
||||
unsigned flags)
|
||||
{
|
||||
struct bucket old, new, *g;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||
BUG_ON(data_type != BCH_DATA_sb &&
|
||||
data_type != BCH_DATA_journal);
|
||||
|
||||
/*
|
||||
* Backup superblock might be past the end of our normal usable space:
|
||||
*/
|
||||
if (b >= ca->mi.nbuckets)
|
||||
return 0;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
g = gc_bucket(ca, b);
|
||||
|
||||
bucket_lock(g);
|
||||
old = *g;
|
||||
|
||||
if (bch2_fs_inconsistent_on(g->data_type &&
|
||||
g->data_type != data_type, c,
|
||||
"different types of data in same bucket: %s, %s",
|
||||
bch2_data_type_str(g->data_type),
|
||||
bch2_data_type_str(data_type))) {
|
||||
BUG();
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
|
||||
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
|
||||
ca->dev_idx, b, g->gen,
|
||||
bch2_data_type_str(g->data_type ?: data_type),
|
||||
g->dirty_sectors, sectors)) {
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
g->data_type = data_type;
|
||||
g->dirty_sectors += sectors;
|
||||
new = *g;
|
||||
err:
|
||||
bucket_unlock(g);
|
||||
if (!ret)
|
||||
bch2_dev_usage_update_m(c, ca, &old, &new);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_check_bucket_ref(struct btree_trans *trans,
|
||||
struct bkey_s_c k,
|
||||
const struct bch_extent_ptr *ptr,
|
||||
@ -558,7 +493,7 @@ int bch2_check_bucket_ref(struct btree_trans *trans,
|
||||
u32 bucket_sectors)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
@ -818,16 +753,17 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, struct extent_ptr_decoded p,
|
||||
const union bch_extent_entry *entry,
|
||||
s64 *sectors, unsigned flags)
|
||||
s64 *sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
bool insert = !(flags & BTREE_TRIGGER_OVERWRITE);
|
||||
bool insert = !(flags & BTREE_TRIGGER_overwrite);
|
||||
struct bpos bucket;
|
||||
struct bch_backpointer bp;
|
||||
|
||||
bch2_extent_ptr_to_bp(trans->c, btree_id, level, k, p, entry, &bucket, &bp);
|
||||
*sectors = insert ? bp.bucket_len : -((s64) bp.bucket_len);
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct btree_iter iter;
|
||||
struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update(trans, &iter, bucket);
|
||||
int ret = PTR_ERR_OR_ZERO(a);
|
||||
@ -850,9 +786,9 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p.ptr.dev);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
@ -886,13 +822,14 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
|
||||
struct bkey_s_c k,
|
||||
struct extent_ptr_decoded p,
|
||||
enum bch_data_type data_type,
|
||||
s64 sectors, unsigned flags)
|
||||
s64 sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct btree_iter iter;
|
||||
struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
|
||||
BTREE_ID_stripes, POS(0, p.ec.idx),
|
||||
BTREE_ITER_WITH_UPDATES, stripe);
|
||||
BTREE_ITER_with_updates, stripe);
|
||||
int ret = PTR_ERR_OR_ZERO(s);
|
||||
if (unlikely(ret)) {
|
||||
bch2_trans_inconsistent_on(bch2_err_matches(ret, ENOENT), trans,
|
||||
@ -922,10 +859,10 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_gc));
|
||||
|
||||
struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
|
||||
if (!m) {
|
||||
@ -961,9 +898,10 @@ err:
|
||||
|
||||
static int __trigger_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
struct bkey_s_c k,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
bool gc = flags & BTREE_TRIGGER_GC;
|
||||
bool gc = flags & BTREE_TRIGGER_gc;
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const union bch_extent_entry *entry;
|
||||
@ -1035,7 +973,7 @@ static int __trigger_extent(struct btree_trans *trans,
|
||||
int bch2_trigger_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_ptrs_c new_ptrs = bch2_bkey_ptrs_c(new.s_c);
|
||||
struct bkey_ptrs_c old_ptrs = bch2_bkey_ptrs_c(old);
|
||||
@ -1049,7 +987,7 @@ int bch2_trigger_extent(struct btree_trans *trans,
|
||||
new_ptrs_bytes))
|
||||
return 0;
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct bch_fs *c = trans->c;
|
||||
int mod = (int) bch2_bkey_needs_rebalance(c, new.s_c) -
|
||||
(int) bch2_bkey_needs_rebalance(c, old);
|
||||
@ -1062,7 +1000,7 @@ int bch2_trigger_extent(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & (BTREE_TRIGGER_TRANSACTIONAL|BTREE_TRIGGER_GC))
|
||||
if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc))
|
||||
return trigger_run_overwrite_then_insert(__trigger_extent, trans, btree_id, level, old, new, flags);
|
||||
|
||||
return 0;
|
||||
@ -1071,17 +1009,17 @@ int bch2_trigger_extent(struct btree_trans *trans,
|
||||
/* KEY_TYPE_reservation */
|
||||
|
||||
static int __trigger_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
enum btree_id btree_id, unsigned level, struct bkey_s_c k,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
|
||||
s64 sectors = (s64) k.k->size * replicas;
|
||||
|
||||
if (flags & BTREE_TRIGGER_OVERWRITE)
|
||||
if (flags & BTREE_TRIGGER_overwrite)
|
||||
sectors = -sectors;
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
int ret = bch2_replicas_deltas_realloc(trans, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1092,7 +1030,7 @@ static int __trigger_reservation(struct btree_trans *trans,
|
||||
d->persistent_reserved[replicas - 1] += sectors;
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
percpu_down_read(&c->mark_lock);
|
||||
preempt_disable();
|
||||
|
||||
@ -1112,7 +1050,7 @@ static int __trigger_reservation(struct btree_trans *trans,
|
||||
int bch2_trigger_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
return trigger_run_overwrite_then_insert(__trigger_reservation, trans, btree_id, level, old, new, flags);
|
||||
}
|
||||
@ -1120,22 +1058,16 @@ int bch2_trigger_reservation(struct btree_trans *trans,
|
||||
/* Mark superblocks: */
|
||||
|
||||
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
||||
struct bch_dev *ca, size_t b,
|
||||
struct bch_dev *ca, u64 b,
|
||||
enum bch_data_type type,
|
||||
unsigned sectors)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_i_alloc_v4 *a;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Backup superblock might be past the end of our normal usable space:
|
||||
*/
|
||||
if (b >= ca->mi.nbuckets)
|
||||
return 0;
|
||||
|
||||
a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
|
||||
struct bkey_i_alloc_v4 *a =
|
||||
bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
|
||||
if (IS_ERR(a))
|
||||
return PTR_ERR(a);
|
||||
|
||||
@ -1163,20 +1095,77 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
||||
struct bch_dev *ca, size_t b,
|
||||
enum bch_data_type type,
|
||||
unsigned sectors)
|
||||
static int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
u64 b, enum bch_data_type data_type, unsigned sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
return commit_do(trans, NULL, NULL, 0,
|
||||
__bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
|
||||
struct bucket old, new, *g;
|
||||
int ret = 0;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
g = gc_bucket(ca, b);
|
||||
|
||||
bucket_lock(g);
|
||||
old = *g;
|
||||
|
||||
if (bch2_fs_inconsistent_on(g->data_type &&
|
||||
g->data_type != data_type, c,
|
||||
"different types of data in same bucket: %s, %s",
|
||||
bch2_data_type_str(g->data_type),
|
||||
bch2_data_type_str(data_type))) {
|
||||
BUG();
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
|
||||
"bucket %u:%llu gen %u data type %s sector count overflow: %u + %u > bucket size",
|
||||
ca->dev_idx, b, g->gen,
|
||||
bch2_data_type_str(g->data_type ?: data_type),
|
||||
g->dirty_sectors, sectors)) {
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
g->data_type = data_type;
|
||||
g->dirty_sectors += sectors;
|
||||
new = *g;
|
||||
err:
|
||||
bucket_unlock(g);
|
||||
if (!ret)
|
||||
bch2_dev_usage_update_m(c, ca, &old, &new);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
||||
struct bch_dev *ca, u64 b,
|
||||
enum bch_data_type type, unsigned sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
BUG_ON(type != BCH_DATA_free &&
|
||||
type != BCH_DATA_sb &&
|
||||
type != BCH_DATA_journal);
|
||||
|
||||
/*
|
||||
* Backup superblock might be past the end of our normal usable space:
|
||||
*/
|
||||
if (b >= ca->mi.nbuckets)
|
||||
return 0;
|
||||
|
||||
if (flags & BTREE_TRIGGER_gc)
|
||||
return bch2_mark_metadata_bucket(trans->c, ca, b, type, sectors, flags);
|
||||
else if (flags & BTREE_TRIGGER_transactional)
|
||||
return commit_do(trans, NULL, NULL, 0,
|
||||
__bch2_trans_mark_metadata_bucket(trans, ca, b, type, sectors));
|
||||
else
|
||||
BUG();
|
||||
}
|
||||
|
||||
static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
u64 start, u64 end,
|
||||
enum bch_data_type type,
|
||||
u64 *bucket, unsigned *bucket_sectors)
|
||||
struct bch_dev *ca, u64 start, u64 end,
|
||||
enum bch_data_type type, u64 *bucket, unsigned *bucket_sectors,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
do {
|
||||
u64 b = sector_to_bucket(ca, start);
|
||||
@ -1185,7 +1174,7 @@ static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
|
||||
|
||||
if (b != *bucket && *bucket_sectors) {
|
||||
int ret = bch2_trans_mark_metadata_bucket(trans, ca, *bucket,
|
||||
type, *bucket_sectors);
|
||||
type, *bucket_sectors, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1200,8 +1189,8 @@ static int bch2_trans_mark_metadata_sectors(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
|
||||
struct bch_dev *ca)
|
||||
static int __bch2_trans_mark_dev_sb(struct btree_trans *trans, struct bch_dev *ca,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
|
||||
u64 bucket = 0;
|
||||
@ -1214,21 +1203,21 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
|
||||
if (offset == BCH_SB_SECTOR) {
|
||||
ret = bch2_trans_mark_metadata_sectors(trans, ca,
|
||||
0, BCH_SB_SECTOR,
|
||||
BCH_DATA_sb, &bucket, &bucket_sectors);
|
||||
BCH_DATA_sb, &bucket, &bucket_sectors, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = bch2_trans_mark_metadata_sectors(trans, ca, offset,
|
||||
offset + (1 << layout->sb_max_size_bits),
|
||||
BCH_DATA_sb, &bucket, &bucket_sectors);
|
||||
BCH_DATA_sb, &bucket, &bucket_sectors, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (bucket_sectors) {
|
||||
ret = bch2_trans_mark_metadata_bucket(trans, ca,
|
||||
bucket, BCH_DATA_sb, bucket_sectors);
|
||||
bucket, BCH_DATA_sb, bucket_sectors, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1236,7 +1225,7 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
|
||||
for (i = 0; i < ca->journal.nr; i++) {
|
||||
ret = bch2_trans_mark_metadata_bucket(trans, ca,
|
||||
ca->journal.buckets[i],
|
||||
BCH_DATA_journal, ca->mi.bucket_size);
|
||||
BCH_DATA_journal, ca->mi.bucket_size, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1244,18 +1233,20 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(trans, ca));
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
__bch2_trans_mark_dev_sb(trans, ca, flags));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_dev_sbs(struct bch_fs *c)
|
||||
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *c,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
for_each_online_member(c, ca) {
|
||||
int ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
int ret = bch2_trans_mark_dev_sb(c, ca, flags);
|
||||
if (ret) {
|
||||
percpu_ref_put(&ca->ref);
|
||||
return ret;
|
||||
@ -1265,6 +1256,11 @@ int bch2_trans_mark_dev_sbs(struct bch_fs *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_dev_sbs(struct bch_fs *c)
|
||||
{
|
||||
return bch2_trans_mark_dev_sbs_flags(c, BTREE_TRIGGER_transactional);
|
||||
}
|
||||
|
||||
/* Disk reservations: */
|
||||
|
||||
#define SECTORS_CACHE 1024
|
||||
|
@ -12,7 +12,7 @@
|
||||
#include "extents.h"
|
||||
#include "sb-members.h"
|
||||
|
||||
static inline size_t sector_to_bucket(const struct bch_dev *ca, sector_t s)
|
||||
static inline u64 sector_to_bucket(const struct bch_dev *ca, sector_t s)
|
||||
{
|
||||
return div_u64(s, ca->mi.bucket_size);
|
||||
}
|
||||
@ -30,12 +30,16 @@ static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
|
||||
return remainder;
|
||||
}
|
||||
|
||||
static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
|
||||
u32 *offset)
|
||||
static inline u64 sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s, u32 *offset)
|
||||
{
|
||||
return div_u64_rem(s, ca->mi.bucket_size, offset);
|
||||
}
|
||||
|
||||
static inline bool bucket_valid(const struct bch_dev *ca, u64 b)
|
||||
{
|
||||
return b - ca->mi.first_bucket < ca->mi.nbuckets_minus_first;
|
||||
}
|
||||
|
||||
#define for_each_bucket(_b, _buckets) \
|
||||
for (_b = (_buckets)->b + (_buckets)->first_bucket; \
|
||||
_b < (_buckets)->b + (_buckets)->nbuckets; _b++)
|
||||
@ -94,7 +98,7 @@ static inline struct bucket *gc_bucket(struct bch_dev *ca, size_t b)
|
||||
{
|
||||
struct bucket_array *buckets = gc_bucket_array(ca);
|
||||
|
||||
BUG_ON(b < buckets->first_bucket || b >= buckets->nbuckets);
|
||||
BUG_ON(!bucket_valid(ca, b));
|
||||
return buckets->b + b;
|
||||
}
|
||||
|
||||
@ -111,7 +115,7 @@ static inline u8 *bucket_gen(struct bch_dev *ca, size_t b)
|
||||
{
|
||||
struct bucket_gens *gens = bucket_gens(ca);
|
||||
|
||||
BUG_ON(b < gens->first_bucket || b >= gens->nbuckets);
|
||||
BUG_ON(!bucket_valid(ca, b));
|
||||
return gens->b + b;
|
||||
}
|
||||
|
||||
@ -124,7 +128,7 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
|
||||
static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
|
||||
const struct bch_extent_ptr *ptr)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
|
||||
}
|
||||
@ -133,7 +137,7 @@ static inline struct bpos PTR_BUCKET_POS_OFFSET(const struct bch_fs *c,
|
||||
const struct bch_extent_ptr *ptr,
|
||||
u32 *bucket_offset)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
return POS(ptr->dev, sector_to_bucket_and_offset(ca, ptr->offset, bucket_offset));
|
||||
}
|
||||
@ -337,23 +341,21 @@ int bch2_check_bucket_ref(struct btree_trans *, struct bkey_s_c,
|
||||
const struct bch_extent_ptr *,
|
||||
s64, enum bch_data_type, u8, u8, u32);
|
||||
|
||||
int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
|
||||
size_t, enum bch_data_type, unsigned,
|
||||
struct gc_pos, unsigned);
|
||||
|
||||
int bch2_trigger_extent(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_trigger_reservation(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
#define trigger_run_overwrite_then_insert(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
|
||||
({ \
|
||||
int ret = 0; \
|
||||
\
|
||||
if (_old.k->type) \
|
||||
ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \
|
||||
ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_insert); \
|
||||
if (!ret && _new.k->type) \
|
||||
ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_OVERWRITE);\
|
||||
ret = _fn(_trans, _btree_id, _level, _new.s_c, _flags & ~BTREE_TRIGGER_overwrite);\
|
||||
ret; \
|
||||
})
|
||||
|
||||
@ -362,9 +364,13 @@ void bch2_trans_account_disk_usage_change(struct btree_trans *);
|
||||
void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
|
||||
int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
|
||||
|
||||
int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *,
|
||||
size_t, enum bch_data_type, unsigned);
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *);
|
||||
int bch2_trans_mark_metadata_bucket(struct btree_trans *, struct bch_dev *, u64,
|
||||
enum bch_data_type, unsigned,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *, struct bch_dev *,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_trans_mark_dev_sbs_flags(struct bch_fs *,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_trans_mark_dev_sbs(struct bch_fs *);
|
||||
|
||||
static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
|
||||
|
@ -961,7 +961,9 @@ static const struct file_operations bch_chardev_fops = {
|
||||
};
|
||||
|
||||
static int bch_chardev_major;
|
||||
static struct class *bch_chardev_class;
|
||||
static const struct class bch_chardev_class = {
|
||||
.name = "bcachefs",
|
||||
};
|
||||
static struct device *bch_chardev;
|
||||
|
||||
void bch2_fs_chardev_exit(struct bch_fs *c)
|
||||
@ -978,7 +980,7 @@ int bch2_fs_chardev_init(struct bch_fs *c)
|
||||
if (c->minor < 0)
|
||||
return c->minor;
|
||||
|
||||
c->chardev = device_create(bch_chardev_class, NULL,
|
||||
c->chardev = device_create(&bch_chardev_class, NULL,
|
||||
MKDEV(bch_chardev_major, c->minor), c,
|
||||
"bcachefs%u-ctl", c->minor);
|
||||
if (IS_ERR(c->chardev))
|
||||
@ -989,32 +991,39 @@ int bch2_fs_chardev_init(struct bch_fs *c)
|
||||
|
||||
void bch2_chardev_exit(void)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(bch_chardev_class))
|
||||
device_destroy(bch_chardev_class,
|
||||
MKDEV(bch_chardev_major, U8_MAX));
|
||||
if (!IS_ERR_OR_NULL(bch_chardev_class))
|
||||
class_destroy(bch_chardev_class);
|
||||
device_destroy(&bch_chardev_class, MKDEV(bch_chardev_major, U8_MAX));
|
||||
class_unregister(&bch_chardev_class);
|
||||
if (bch_chardev_major > 0)
|
||||
unregister_chrdev(bch_chardev_major, "bcachefs");
|
||||
}
|
||||
|
||||
int __init bch2_chardev_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
bch_chardev_major = register_chrdev(0, "bcachefs-ctl", &bch_chardev_fops);
|
||||
if (bch_chardev_major < 0)
|
||||
return bch_chardev_major;
|
||||
|
||||
bch_chardev_class = class_create("bcachefs");
|
||||
if (IS_ERR(bch_chardev_class))
|
||||
return PTR_ERR(bch_chardev_class);
|
||||
ret = class_register(&bch_chardev_class);
|
||||
if (ret)
|
||||
goto major_out;
|
||||
|
||||
bch_chardev = device_create(bch_chardev_class, NULL,
|
||||
bch_chardev = device_create(&bch_chardev_class, NULL,
|
||||
MKDEV(bch_chardev_major, U8_MAX),
|
||||
NULL, "bcachefs-ctl");
|
||||
if (IS_ERR(bch_chardev))
|
||||
return PTR_ERR(bch_chardev);
|
||||
if (IS_ERR(bch_chardev)) {
|
||||
ret = PTR_ERR(bch_chardev);
|
||||
goto class_out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
class_out:
|
||||
class_unregister(&bch_chardev_class);
|
||||
major_out:
|
||||
unregister_chrdev(bch_chardev_major, "bcachefs-ctl");
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* NO_BCACHEFS_CHARDEV */
|
||||
|
@ -493,14 +493,10 @@ static void bch2_sb_crypt_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
{
|
||||
struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
|
||||
|
||||
prt_printf(out, "KFD: %llu", BCH_CRYPT_KDF_TYPE(crypt));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "scrypt n: %llu", BCH_KDF_SCRYPT_N(crypt));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "scrypt r: %llu", BCH_KDF_SCRYPT_R(crypt));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "scrypt p: %llu", BCH_KDF_SCRYPT_P(crypt));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "KFD: %llu\n", BCH_CRYPT_KDF_TYPE(crypt));
|
||||
prt_printf(out, "scrypt n: %llu\n", BCH_KDF_SCRYPT_N(crypt));
|
||||
prt_printf(out, "scrypt r: %llu\n", BCH_KDF_SCRYPT_R(crypt));
|
||||
prt_printf(out, "scrypt p: %llu\n", BCH_KDF_SCRYPT_P(crypt));
|
||||
}
|
||||
|
||||
const struct bch_sb_field_ops bch_sb_field_ops_crypt = {
|
||||
|
@ -106,7 +106,7 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, m->btree_id,
|
||||
bkey_start_pos(&bch2_keylist_front(keys)->k),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
|
||||
while (1) {
|
||||
struct bkey_s_c k;
|
||||
@ -288,7 +288,7 @@ restart_drop_extra_replicas:
|
||||
k.k->p, insert->k.p) ?:
|
||||
bch2_bkey_set_needs_rebalance(c, insert, &op->opts) ?:
|
||||
bch2_trans_update(trans, &iter, insert,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_trans_commit(trans, &op->res,
|
||||
NULL,
|
||||
BCH_TRANS_COMMIT_no_check_rw|
|
||||
@ -360,7 +360,7 @@ void bch2_data_update_exit(struct data_update *update)
|
||||
if (c->opts.nocow_enabled)
|
||||
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
||||
PTR_BUCKET_POS(c, ptr), 0);
|
||||
percpu_ref_put(&bch_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
percpu_ref_put(&bch2_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
}
|
||||
|
||||
bch2_bkey_buf_exit(&update->k, c);
|
||||
@ -386,8 +386,10 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
|
||||
while (bio_sectors(bio)) {
|
||||
unsigned sectors = bio_sectors(bio);
|
||||
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
|
||||
BTREE_ITER_SLOTS);
|
||||
BTREE_ITER_slots);
|
||||
ret = lockrestart_do(trans, ({
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
bkey_err(k);
|
||||
@ -480,15 +482,15 @@ int bch2_extent_drop_ptrs(struct btree_trans *trans,
|
||||
|
||||
/*
|
||||
* Since we're not inserting through an extent iterator
|
||||
* (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
|
||||
* (BTREE_ITER_all_snapshots iterators aren't extent iterators),
|
||||
* we aren't using the extent overwrite path to delete, we're
|
||||
* just using the normal key deletion path:
|
||||
*/
|
||||
if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
if (bkey_deleted(&n->k) && !(iter->flags & BTREE_ITER_is_extents))
|
||||
n->k.size = 0;
|
||||
|
||||
return bch2_trans_relock(trans) ?:
|
||||
bch2_trans_update(trans, iter, n, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
bch2_trans_update(trans, iter, n, BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
}
|
||||
|
||||
@ -540,7 +542,7 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||
m->op.watermark = m->data_opts.btree_insert_flags & BCH_WATERMARK_MASK;
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
percpu_ref_get(&bch2_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
|
||||
unsigned durability_have = 0, durability_removing = 0;
|
||||
|
||||
@ -652,7 +654,7 @@ err:
|
||||
if ((1U << i) & ptrs_locked)
|
||||
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
||||
PTR_BUCKET_POS(c, &p.ptr), 0);
|
||||
percpu_ref_put(&bch_dev_bkey_exists(c, p.ptr.dev)->ref);
|
||||
percpu_ref_put(&bch2_dev_bkey_exists(c, p.ptr.dev)->ref);
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ static bool bch2_btree_verify_replica(struct bch_fs *c, struct btree *b,
|
||||
struct btree_node *n_ondisk = c->verify_ondisk;
|
||||
struct btree_node *n_sorted = c->verify_data->data;
|
||||
struct bset *sorted, *inmemory = &b->data->keys;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
struct bio *bio;
|
||||
bool failed = false, saw_error = false;
|
||||
|
||||
@ -194,7 +194,7 @@ void bch2_btree_node_ondisk_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
return;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
if (!bch2_dev_get_ioref(ca, READ)) {
|
||||
prt_printf(out, "error getting device to read from: not online\n");
|
||||
return;
|
||||
@ -375,8 +375,8 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
|
||||
return flush_buf(i) ?:
|
||||
bch2_trans_run(i->c,
|
||||
for_each_btree_key(trans, iter, i->id, i->from,
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
bch2_bkey_val_to_text(&i->buf, i->c, k);
|
||||
prt_newline(&i->buf);
|
||||
bch2_trans_unlock(trans);
|
||||
@ -459,8 +459,8 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
|
||||
return flush_buf(i) ?:
|
||||
bch2_trans_run(i->c,
|
||||
for_each_btree_key(trans, iter, i->id, i->from,
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
struct btree_path_level *l =
|
||||
&btree_iter_path(trans, &iter)->l[0];
|
||||
struct bkey_packed *_k =
|
||||
@ -492,51 +492,26 @@ static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *
|
||||
if (!out->nr_tabstops)
|
||||
printbuf_tabstop_push(out, 32);
|
||||
|
||||
prt_printf(out, "%px btree=%s l=%u ",
|
||||
b,
|
||||
bch2_btree_id_str(b->c.btree_id),
|
||||
b->c.level);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "%px btree=%s l=%u\n", b, bch2_btree_id_str(b->c.btree_id), b->c.level);
|
||||
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "flags: ");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "flags:\t");
|
||||
prt_bitflags(out, bch2_btree_node_flags, b->flags);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "pcpu read locks: ");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", b->c.lock.readers != NULL);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "pcpu read locks:\t%u\n", b->c.lock.readers != NULL);
|
||||
prt_printf(out, "written:\t%u\n", b->written);
|
||||
prt_printf(out, "writes blocked:\t%u\n", !list_empty_careful(&b->write_blocked));
|
||||
prt_printf(out, "will make reachable:\t%lx\n", b->will_make_reachable);
|
||||
|
||||
prt_printf(out, "written:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", b->written);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "writes blocked:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", !list_empty_careful(&b->write_blocked));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "will make reachable:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%lx", b->will_make_reachable);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "journal pin %px:", &b->writes[0].journal);
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", b->writes[0].journal.seq);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "journal pin %px:", &b->writes[1].journal);
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", b->writes[1].journal.seq);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "journal pin %px:\t%llu\n",
|
||||
&b->writes[0].journal, b->writes[0].journal.seq);
|
||||
prt_printf(out, "journal pin %px:\t%llu\n",
|
||||
&b->writes[1].journal, b->writes[1].journal.seq);
|
||||
|
||||
printbuf_indent_sub(out, 2);
|
||||
}
|
||||
@ -625,8 +600,7 @@ restart:
|
||||
|
||||
bch2_btree_trans_to_text(&i->buf, trans);
|
||||
|
||||
prt_printf(&i->buf, "backtrace:");
|
||||
prt_newline(&i->buf);
|
||||
prt_printf(&i->buf, "backtrace:\n");
|
||||
printbuf_indent_add(&i->buf, 2);
|
||||
bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
|
||||
printbuf_indent_sub(&i->buf, 2);
|
||||
@ -782,25 +756,20 @@ static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf,
|
||||
!bch2_btree_transaction_fns[i->iter])
|
||||
break;
|
||||
|
||||
prt_printf(&i->buf, "%s: ", bch2_btree_transaction_fns[i->iter]);
|
||||
prt_newline(&i->buf);
|
||||
prt_printf(&i->buf, "%s:\n", bch2_btree_transaction_fns[i->iter]);
|
||||
printbuf_indent_add(&i->buf, 2);
|
||||
|
||||
mutex_lock(&s->lock);
|
||||
|
||||
prt_printf(&i->buf, "Max mem used: %u", s->max_mem);
|
||||
prt_newline(&i->buf);
|
||||
|
||||
prt_printf(&i->buf, "Transaction duration:");
|
||||
prt_newline(&i->buf);
|
||||
prt_printf(&i->buf, "Max mem used: %u\n", s->max_mem);
|
||||
prt_printf(&i->buf, "Transaction duration:\n");
|
||||
|
||||
printbuf_indent_add(&i->buf, 2);
|
||||
bch2_time_stats_to_text(&i->buf, &s->duration);
|
||||
printbuf_indent_sub(&i->buf, 2);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_LOCK_TIME_STATS)) {
|
||||
prt_printf(&i->buf, "Lock hold times:");
|
||||
prt_newline(&i->buf);
|
||||
prt_printf(&i->buf, "Lock hold times:\n");
|
||||
|
||||
printbuf_indent_add(&i->buf, 2);
|
||||
bch2_time_stats_to_text(&i->buf, &s->lock_hold_times);
|
||||
@ -808,8 +777,7 @@ static ssize_t btree_transaction_stats_read(struct file *file, char __user *buf,
|
||||
}
|
||||
|
||||
if (s->max_paths_text) {
|
||||
prt_printf(&i->buf, "Maximum allocated btree paths (%u):", s->nr_max_paths);
|
||||
prt_newline(&i->buf);
|
||||
prt_printf(&i->buf, "Maximum allocated btree paths (%u):\n", s->nr_max_paths);
|
||||
|
||||
printbuf_indent_add(&i->buf, 2);
|
||||
prt_str_indented(&i->buf, s->max_paths_text);
|
||||
|
@ -205,7 +205,7 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans,
|
||||
const struct bch_hash_info *hash_info,
|
||||
u8 type, const struct qstr *name, u64 dst_inum,
|
||||
u64 *dir_offset,
|
||||
bch_str_hash_flags_t str_hash_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
subvol_inum dir_inum = { .subvol = dir_subvol, .inum = dir };
|
||||
struct bkey_i_dirent *dirent;
|
||||
@ -220,9 +220,8 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans,
|
||||
dirent->k.p.snapshot = snapshot;
|
||||
|
||||
ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info,
|
||||
dir_inum, snapshot,
|
||||
&dirent->k_i, str_hash_flags,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
dir_inum, snapshot, &dirent->k_i,
|
||||
flags|BTREE_UPDATE_internal_snapshot_node);
|
||||
*dir_offset = dirent->k.p.offset;
|
||||
|
||||
return ret;
|
||||
@ -232,7 +231,7 @@ int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
|
||||
const struct bch_hash_info *hash_info,
|
||||
u8 type, const struct qstr *name, u64 dst_inum,
|
||||
u64 *dir_offset,
|
||||
bch_str_hash_flags_t str_hash_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_i_dirent *dirent;
|
||||
int ret;
|
||||
@ -243,7 +242,7 @@ int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
|
||||
return ret;
|
||||
|
||||
ret = bch2_hash_set(trans, bch2_dirent_hash_desc, hash_info,
|
||||
dir, &dirent->k_i, str_hash_flags);
|
||||
dir, &dirent->k_i, flags);
|
||||
*dir_offset = dirent->k.p.offset;
|
||||
|
||||
return ret;
|
||||
@ -272,7 +271,7 @@ int bch2_dirent_read_target(struct btree_trans *trans, subvol_inum dir,
|
||||
} else {
|
||||
target->subvol = le32_to_cpu(d.v->d_child_subvol);
|
||||
|
||||
ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_CACHED, &s);
|
||||
ret = bch2_subvolume_get(trans, target->subvol, true, BTREE_ITER_cached, &s);
|
||||
|
||||
target->inum = le64_to_cpu(s.inode);
|
||||
}
|
||||
@ -301,13 +300,9 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
memset(dst_inum, 0, sizeof(*dst_inum));
|
||||
|
||||
/* Lookup src: */
|
||||
ret = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
|
||||
src_hash, src_dir, src_name,
|
||||
BTREE_ITER_INTENT);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
old_src = bch2_btree_iter_peek_slot(&src_iter);
|
||||
old_src = bch2_hash_lookup(trans, &src_iter, bch2_dirent_hash_desc,
|
||||
src_hash, src_dir, src_name,
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(old_src);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -329,13 +324,9 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
if (ret)
|
||||
goto out;
|
||||
} else {
|
||||
ret = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
|
||||
dst_hash, dst_dir, dst_name,
|
||||
BTREE_ITER_INTENT);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
old_dst = bch2_btree_iter_peek_slot(&dst_iter);
|
||||
old_dst = bch2_hash_lookup(trans, &dst_iter, bch2_dirent_hash_desc,
|
||||
dst_hash, dst_dir, dst_name,
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(old_dst);
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -450,7 +441,7 @@ out_set_src:
|
||||
if (delete_src) {
|
||||
bch2_btree_iter_set_snapshot(&src_iter, old_src.k->p.snapshot);
|
||||
ret = bch2_btree_iter_traverse(&src_iter) ?:
|
||||
bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
bch2_btree_delete_at(trans, &src_iter, BTREE_UPDATE_internal_snapshot_node);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
@ -458,7 +449,7 @@ out_set_src:
|
||||
if (delete_dst) {
|
||||
bch2_btree_iter_set_snapshot(&dst_iter, old_dst.k->p.snapshot);
|
||||
ret = bch2_btree_iter_traverse(&dst_iter) ?:
|
||||
bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
bch2_btree_delete_at(trans, &dst_iter, BTREE_UPDATE_internal_snapshot_node);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
@ -479,13 +470,9 @@ int bch2_dirent_lookup_trans(struct btree_trans *trans,
|
||||
const struct qstr *name, subvol_inum *inum,
|
||||
unsigned flags)
|
||||
{
|
||||
int ret = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
|
||||
hash_info, dir, name, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
|
||||
ret = bkey_err(k);
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
|
||||
hash_info, dir, name, flags);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -541,16 +528,26 @@ int bch2_empty_dir_trans(struct btree_trans *trans, subvol_inum dir)
|
||||
bch2_empty_dir_snapshot(trans, dir.inum, dir.subvol, snapshot);
|
||||
}
|
||||
|
||||
static int bch2_dir_emit(struct dir_context *ctx, struct bkey_s_c_dirent d, subvol_inum target)
|
||||
{
|
||||
struct qstr name = bch2_dirent_get_name(d);
|
||||
bool ret = dir_emit(ctx, name.name,
|
||||
name.len,
|
||||
target.inum,
|
||||
vfs_d_type(d.v->d_type));
|
||||
if (ret)
|
||||
ctx->pos = d.k->p.offset + 1;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
|
||||
{
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_dirent dirent;
|
||||
subvol_inum target;
|
||||
u32 snapshot;
|
||||
struct bkey_buf sk;
|
||||
struct qstr name;
|
||||
int ret;
|
||||
|
||||
bch2_bkey_buf_init(&sk);
|
||||
@ -567,7 +564,9 @@ retry:
|
||||
if (k.k->type != KEY_TYPE_dirent)
|
||||
continue;
|
||||
|
||||
dirent = bkey_s_c_to_dirent(k);
|
||||
/* dir_emit() can fault and block: */
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
struct bkey_s_c_dirent dirent = bkey_i_to_s_c_dirent(sk.k);
|
||||
|
||||
ret = bch2_dirent_read_target(trans, inum, dirent, &target);
|
||||
if (ret < 0)
|
||||
@ -575,28 +574,22 @@ retry:
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
/* dir_emit() can fault and block: */
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
dirent = bkey_i_to_s_c_dirent(sk.k);
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
name = bch2_dirent_get_name(dirent);
|
||||
|
||||
ctx->pos = dirent.k->p.offset;
|
||||
if (!dir_emit(ctx, name.name,
|
||||
name.len,
|
||||
target.inum,
|
||||
vfs_d_type(dirent.v->d_type)))
|
||||
break;
|
||||
ctx->pos = dirent.k->p.offset + 1;
|
||||
|
||||
/*
|
||||
* read_target looks up subvolumes, we can overflow paths if the
|
||||
* directory has many subvolumes in it
|
||||
*
|
||||
* XXX: btree_trans_too_many_iters() is something we'd like to
|
||||
* get rid of, and there's no good reason to be using it here
|
||||
* except that we don't yet have a for_each_btree_key() helper
|
||||
* that does subvolume_get_snapshot().
|
||||
*/
|
||||
ret = btree_trans_too_many_iters(trans);
|
||||
if (ret)
|
||||
ret = drop_locks_do(trans,
|
||||
bch2_dir_emit(ctx, dirent, target)) ?:
|
||||
btree_trans_too_many_iters(trans);
|
||||
if (ret) {
|
||||
ret = ret < 0 ? ret : 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
err:
|
||||
|
@ -38,11 +38,11 @@ int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
|
||||
int bch2_dirent_create_snapshot(struct btree_trans *, u32, u64, u32,
|
||||
const struct bch_hash_info *, u8,
|
||||
const struct qstr *, u64, u64 *,
|
||||
bch_str_hash_flags_t);
|
||||
enum btree_iter_update_trigger_flags);
|
||||
int bch2_dirent_create(struct btree_trans *, subvol_inum,
|
||||
const struct bch_hash_info *, u8,
|
||||
const struct qstr *, u64, u64 *,
|
||||
bch_str_hash_flags_t);
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
static inline unsigned vfs_d_type(unsigned type)
|
||||
{
|
||||
|
@ -177,7 +177,7 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
|
||||
struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, i);
|
||||
struct bch_disk_group_cpu *dst;
|
||||
|
||||
if (!bch2_member_exists(&m))
|
||||
if (!bch2_member_alive(&m))
|
||||
continue;
|
||||
|
||||
g = BCH_MEMBER_GROUP(&m);
|
||||
@ -588,7 +588,7 @@ static void bch2_target_to_text_sb(struct printbuf *out, struct bch_sb *sb, unsi
|
||||
case TARGET_DEV: {
|
||||
struct bch_member m = bch2_sb_member_get(sb, t.dev);
|
||||
|
||||
if (bch2_dev_exists(sb, t.dev)) {
|
||||
if (bch2_member_exists(sb, t.dev)) {
|
||||
prt_printf(out, "Device ");
|
||||
pr_uuid(out, m.uuid.b);
|
||||
prt_printf(out, " (%u)", t.dev);
|
||||
|
@ -244,7 +244,7 @@ err:
|
||||
static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
struct bkey_s_c k,
|
||||
unsigned ptr_idx,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
|
||||
@ -253,12 +253,12 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
||||
enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
|
||||
s64 sectors = parity ? le16_to_cpu(s->sectors) : 0;
|
||||
const struct bch_extent_ptr *ptr = s->ptrs + ptr_idx;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket old, new, *g;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_gc));
|
||||
|
||||
/* * XXX doesn't handle deletion */
|
||||
|
||||
@ -302,7 +302,7 @@ err:
|
||||
int bch2_trigger_stripe(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s _new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_s_c new = _new.s_c;
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -312,7 +312,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
|
||||
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
|
||||
? bkey_s_c_to_stripe(new).v : NULL;
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
/*
|
||||
* If the pointers aren't changing, we don't need to do anything:
|
||||
*/
|
||||
@ -371,7 +371,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_ATOMIC) {
|
||||
if (flags & BTREE_TRIGGER_atomic) {
|
||||
struct stripe *m = genradix_ptr(&c->stripes, idx);
|
||||
|
||||
if (!m) {
|
||||
@ -410,7 +410,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
struct gc_stripe *m =
|
||||
genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
|
||||
|
||||
@ -609,7 +609,7 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
|
||||
|
||||
if (bch2_crc_cmp(want, got)) {
|
||||
struct printbuf err = PRINTBUF;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, v->ptrs[i].dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, v->ptrs[i].dev);
|
||||
|
||||
prt_str(&err, "stripe ");
|
||||
bch2_csum_err_msg(&err, v->csum_type, want, got);
|
||||
@ -705,7 +705,7 @@ static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
|
||||
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||
unsigned offset = 0, bytes = buf->size << 9;
|
||||
struct bch_extent_ptr *ptr = &v->ptrs[idx];
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
|
||||
? BCH_DATA_user
|
||||
: BCH_DATA_parity;
|
||||
@ -769,7 +769,7 @@ static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
|
||||
POS(0, idx), BTREE_ITER_SLOTS);
|
||||
POS(0, idx), BTREE_ITER_slots);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1060,7 +1060,7 @@ static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes, POS(0, idx),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1131,7 +1131,7 @@ static int ec_stripe_key_update(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
|
||||
new->k.p, BTREE_ITER_INTENT);
|
||||
new->k.p, BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1189,7 +1189,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
||||
int ret, dev, block;
|
||||
|
||||
ret = bch2_get_next_backpointer(trans, bucket, gen,
|
||||
bp_pos, &bp, BTREE_ITER_CACHED);
|
||||
bp_pos, &bp, BTREE_ITER_cached);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (bpos_eq(*bp_pos, SPOS_MAX))
|
||||
@ -1214,7 +1214,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_INTENT);
|
||||
k = bch2_backpointer_get_key(trans, &iter, *bp_pos, bp, BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1321,7 +1321,7 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
|
||||
unsigned block,
|
||||
struct open_bucket *ob)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
unsigned offset = ca->mi.bucket_size - ob->sectors_free;
|
||||
int ret;
|
||||
|
||||
@ -1527,7 +1527,7 @@ void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
|
||||
|
||||
BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
|
||||
|
||||
ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
ca = bch2_dev_bkey_exists(c, ob->dev);
|
||||
offset = ca->mi.bucket_size - ob->sectors_free;
|
||||
|
||||
return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
|
||||
@ -1937,7 +1937,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st
|
||||
}
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
|
||||
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
|
||||
if (start_pos.offset) {
|
||||
start_pos = min_pos;
|
||||
@ -2127,7 +2127,7 @@ int bch2_stripes_read(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_stripes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
if (k.k->type != KEY_TYPE_stripe)
|
||||
continue;
|
||||
|
||||
|
@ -13,7 +13,8 @@ int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c,
|
||||
void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bkey_s_c);
|
||||
int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
#define bch2_bkey_ops_stripe ((struct bkey_ops) { \
|
||||
.key_invalid = bch2_stripe_invalid, \
|
||||
|
@ -176,6 +176,21 @@ static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
|
||||
return s;
|
||||
}
|
||||
|
||||
/* s/fix?/fixing/ s/recreate?/recreating/ */
|
||||
static void prt_actioning(struct printbuf *out, const char *action)
|
||||
{
|
||||
unsigned len = strlen(action);
|
||||
|
||||
BUG_ON(action[len - 1] != '?');
|
||||
--len;
|
||||
|
||||
if (action[len - 1] == 'e')
|
||||
--len;
|
||||
|
||||
prt_bytes(out, action, len);
|
||||
prt_str(out, "ing");
|
||||
}
|
||||
|
||||
int bch2_fsck_err(struct bch_fs *c,
|
||||
enum bch_fsck_flags flags,
|
||||
enum bch_sb_error_id err,
|
||||
@ -186,6 +201,7 @@ int bch2_fsck_err(struct bch_fs *c,
|
||||
bool print = true, suppressing = false, inconsistent = false;
|
||||
struct printbuf buf = PRINTBUF, *out = &buf;
|
||||
int ret = -BCH_ERR_fsck_ignore;
|
||||
const char *action_orig = "fix?", *action = action_orig;
|
||||
|
||||
if ((flags & FSCK_CAN_FIX) &&
|
||||
test_bit(err, c->sb.errors_silent))
|
||||
@ -197,6 +213,19 @@ int bch2_fsck_err(struct bch_fs *c,
|
||||
prt_vprintf(out, fmt, args);
|
||||
va_end(args);
|
||||
|
||||
/* Custom fix/continue/recreate/etc.? */
|
||||
if (out->buf[out->pos - 1] == '?') {
|
||||
const char *p = strrchr(out->buf, ',');
|
||||
if (p) {
|
||||
out->pos = p - out->buf;
|
||||
action = kstrdup(p + 2, GFP_KERNEL);
|
||||
if (!action) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutex_lock(&c->fsck_error_msgs_lock);
|
||||
s = fsck_err_get(c, fmt);
|
||||
if (s) {
|
||||
@ -208,12 +237,16 @@ int bch2_fsck_err(struct bch_fs *c,
|
||||
if (s->last_msg && !strcmp(buf.buf, s->last_msg)) {
|
||||
ret = s->ret;
|
||||
mutex_unlock(&c->fsck_error_msgs_lock);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
goto err;
|
||||
}
|
||||
|
||||
kfree(s->last_msg);
|
||||
s->last_msg = kstrdup(buf.buf, GFP_KERNEL);
|
||||
if (!s->last_msg) {
|
||||
mutex_unlock(&c->fsck_error_msgs_lock);
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (c->opts.ratelimit_errors &&
|
||||
!(flags & FSCK_NO_RATELIMIT) &&
|
||||
@ -239,7 +272,8 @@ int bch2_fsck_err(struct bch_fs *c,
|
||||
inconsistent = true;
|
||||
ret = -BCH_ERR_fsck_errors_not_fixed;
|
||||
} else if (flags & FSCK_CAN_FIX) {
|
||||
prt_str(out, ", fixing");
|
||||
prt_str(out, ", ");
|
||||
prt_actioning(out, action);
|
||||
ret = -BCH_ERR_fsck_fix;
|
||||
} else {
|
||||
prt_str(out, ", continuing");
|
||||
@ -254,16 +288,16 @@ int bch2_fsck_err(struct bch_fs *c,
|
||||
: c->opts.fix_errors;
|
||||
|
||||
if (fix == FSCK_FIX_ask) {
|
||||
int ask;
|
||||
prt_str(out, ", ");
|
||||
prt_str(out, action);
|
||||
|
||||
prt_str(out, ": fix?");
|
||||
if (bch2_fs_stdio_redirect(c))
|
||||
bch2_print(c, "%s", out->buf);
|
||||
else
|
||||
bch2_print_string_as_lines(KERN_ERR, out->buf);
|
||||
print = false;
|
||||
|
||||
ask = bch2_fsck_ask_yn(c);
|
||||
int ask = bch2_fsck_ask_yn(c);
|
||||
|
||||
if (ask >= YN_ALLNO && s)
|
||||
s->fix = ask == YN_ALLNO
|
||||
@ -276,10 +310,12 @@ int bch2_fsck_err(struct bch_fs *c,
|
||||
} else if (fix == FSCK_FIX_yes ||
|
||||
(c->opts.nochanges &&
|
||||
!(flags & FSCK_CAN_IGNORE))) {
|
||||
prt_str(out, ", fixing");
|
||||
prt_str(out, ", ");
|
||||
prt_actioning(out, action);
|
||||
ret = -BCH_ERR_fsck_fix;
|
||||
} else {
|
||||
prt_str(out, ", not fixing");
|
||||
prt_str(out, ", not ");
|
||||
prt_actioning(out, action);
|
||||
}
|
||||
} else if (flags & FSCK_NEED_FSCK) {
|
||||
prt_str(out, " (run fsck to correct)");
|
||||
@ -311,8 +347,6 @@ int bch2_fsck_err(struct bch_fs *c,
|
||||
|
||||
mutex_unlock(&c->fsck_error_msgs_lock);
|
||||
|
||||
printbuf_exit(&buf);
|
||||
|
||||
if (inconsistent)
|
||||
bch2_inconsistent_error(c);
|
||||
|
||||
@ -322,7 +356,10 @@ int bch2_fsck_err(struct bch_fs *c,
|
||||
set_bit(BCH_FS_errors_not_fixed, &c->flags);
|
||||
set_bit(BCH_FS_error, &c->flags);
|
||||
}
|
||||
|
||||
err:
|
||||
if (action != action_orig)
|
||||
kfree(action);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -72,7 +72,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
|
||||
|
||||
for_each_btree_key_norestart(trans, iter,
|
||||
BTREE_ID_reflink, POS(0, idx + offset),
|
||||
BTREE_ITER_SLOTS, r_k, ret2) {
|
||||
BTREE_ITER_slots, r_k, ret2) {
|
||||
if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
|
||||
break;
|
||||
|
||||
|
@ -79,8 +79,8 @@ static inline bool ptr_better(struct bch_fs *c,
|
||||
const struct extent_ptr_decoded p2)
|
||||
{
|
||||
if (likely(!p1.idx && !p2.idx)) {
|
||||
struct bch_dev *dev1 = bch_dev_bkey_exists(c, p1.ptr.dev);
|
||||
struct bch_dev *dev2 = bch_dev_bkey_exists(c, p2.ptr.dev);
|
||||
struct bch_dev *dev1 = bch2_dev_bkey_exists(c, p1.ptr.dev);
|
||||
struct bch_dev *dev2 = bch2_dev_bkey_exists(c, p2.ptr.dev);
|
||||
|
||||
u64 l1 = atomic64_read(&dev1->cur_latency[READ]);
|
||||
u64 l2 = atomic64_read(&dev2->cur_latency[READ]);
|
||||
@ -123,7 +123,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
|
||||
if (p.ptr.unwritten)
|
||||
return 0;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, p.ptr.dev);
|
||||
|
||||
/*
|
||||
* If there are any dirty pointers it's an error if we can't
|
||||
@ -201,6 +201,11 @@ int bch2_btree_ptr_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
|
||||
c, err, btree_ptr_v2_min_key_bad,
|
||||
"min_key > key");
|
||||
|
||||
if (flags & BKEY_INVALID_WRITE)
|
||||
bkey_fsck_err_on(!bp.v->sectors_written,
|
||||
c, err, btree_ptr_v2_written_0,
|
||||
"sectors_written == 0");
|
||||
|
||||
ret = bch2_bkey_ptrs_invalid(c, k, flags, err);
|
||||
fsck_err:
|
||||
return ret;
|
||||
@ -278,7 +283,7 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
|
||||
return false;
|
||||
|
||||
/* Extents may not straddle buckets: */
|
||||
ca = bch_dev_bkey_exists(c, lp.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, lp.ptr.dev);
|
||||
if (PTR_BUCKET_NR(ca, &lp.ptr) != PTR_BUCKET_NR(ca, &rp.ptr))
|
||||
return false;
|
||||
|
||||
@ -667,14 +672,14 @@ static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent
|
||||
|
||||
unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p->ptr.dev);
|
||||
|
||||
return __extent_ptr_durability(ca, p);
|
||||
}
|
||||
|
||||
unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
|
||||
{
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p->ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, p->ptr.dev);
|
||||
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_failed)
|
||||
return 0;
|
||||
@ -864,7 +869,7 @@ bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
if (bch2_dev_in_target(c, ptr->dev, target) &&
|
||||
(!ptr->cached ||
|
||||
!ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
|
||||
!ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr)))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
@ -973,17 +978,16 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
|
||||
|
||||
bch2_bkey_drop_ptrs(k, ptr,
|
||||
ptr->cached &&
|
||||
ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr));
|
||||
ptr_stale(bch2_dev_bkey_exists(c, ptr->dev), ptr));
|
||||
|
||||
return bkey_deleted(k.k);
|
||||
}
|
||||
|
||||
void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struct bch_extent_ptr *ptr)
|
||||
{
|
||||
struct bch_dev *ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
|
||||
? bch_dev_bkey_exists(c, ptr->dev)
|
||||
: NULL;
|
||||
|
||||
out->atomic++;
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_safe(c, ptr->dev);
|
||||
if (!ca) {
|
||||
prt_printf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
|
||||
(u64) ptr->offset, ptr->gen,
|
||||
@ -998,11 +1002,11 @@ void bch2_extent_ptr_to_text(struct printbuf *out, struct bch_fs *c, const struc
|
||||
prt_str(out, " cached");
|
||||
if (ptr->unwritten)
|
||||
prt_str(out, " unwritten");
|
||||
if (b >= ca->mi.first_bucket &&
|
||||
b < ca->mi.nbuckets &&
|
||||
ptr_stale(ca, ptr))
|
||||
if (bucket_valid(ca, b) && ptr_stale(ca, ptr))
|
||||
prt_printf(out, " stale");
|
||||
}
|
||||
rcu_read_unlock();
|
||||
--out->atomic;
|
||||
}
|
||||
|
||||
void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
@ -1081,7 +1085,7 @@ static int extent_ptr_invalid(struct bch_fs *c,
|
||||
struct bch_dev *ca;
|
||||
int ret = 0;
|
||||
|
||||
if (!bch2_dev_exists2(c, ptr->dev)) {
|
||||
if (!bch2_dev_exists(c, ptr->dev)) {
|
||||
/*
|
||||
* If we're in the write path this key might have already been
|
||||
* overwritten, and we could be seeing a device that doesn't
|
||||
@ -1094,7 +1098,7 @@ static int extent_ptr_invalid(struct bch_fs *c,
|
||||
"pointer to invalid device (%u)", ptr->dev);
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
bkey_for_each_ptr(ptrs, ptr2)
|
||||
bkey_fsck_err_on(ptr != ptr2 && ptr->dev == ptr2->dev, c, err,
|
||||
ptr_to_duplicate_device,
|
||||
|
@ -171,7 +171,7 @@ void eytzinger0_sort_r(void *base, size_t n, size_t size,
|
||||
swap_r_func_t swap_func,
|
||||
const void *priv)
|
||||
{
|
||||
int i, c, r;
|
||||
int i, j, k;
|
||||
|
||||
/* called from 'sort' without swap function, let's pick the default */
|
||||
if (swap_func == SWAP_WRAPPER && !((struct wrapper *)priv)->swap_func)
|
||||
@ -188,17 +188,22 @@ void eytzinger0_sort_r(void *base, size_t n, size_t size,
|
||||
|
||||
/* heapify */
|
||||
for (i = n / 2 - 1; i >= 0; --i) {
|
||||
for (r = i; r * 2 + 1 < n; r = c) {
|
||||
c = r * 2 + 1;
|
||||
/* Find the sift-down path all the way to the leaves. */
|
||||
for (j = i; k = j * 2 + 1, k + 1 < n;)
|
||||
j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
|
||||
|
||||
if (c + 1 < n &&
|
||||
eytzinger0_do_cmp(base, n, size, cmp_func, priv, c, c + 1) < 0)
|
||||
c++;
|
||||
/* Special case for the last leaf with no sibling. */
|
||||
if (j * 2 + 2 == n)
|
||||
j = j * 2 + 1;
|
||||
|
||||
if (eytzinger0_do_cmp(base, n, size, cmp_func, priv, r, c) >= 0)
|
||||
break;
|
||||
/* Backtrack to the correct location. */
|
||||
while (j != i && eytzinger0_do_cmp(base, n, size, cmp_func, priv, i, j) >= 0)
|
||||
j = (j - 1) / 2;
|
||||
|
||||
eytzinger0_do_swap(base, n, size, swap_func, priv, r, c);
|
||||
/* Shift the element into its correct place. */
|
||||
for (k = j; j != i;) {
|
||||
j = (j - 1) / 2;
|
||||
eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
|
||||
}
|
||||
}
|
||||
|
||||
@ -206,17 +211,22 @@ void eytzinger0_sort_r(void *base, size_t n, size_t size,
|
||||
for (i = n - 1; i > 0; --i) {
|
||||
eytzinger0_do_swap(base, n, size, swap_func, priv, 0, i);
|
||||
|
||||
for (r = 0; r * 2 + 1 < i; r = c) {
|
||||
c = r * 2 + 1;
|
||||
/* Find the sift-down path all the way to the leaves. */
|
||||
for (j = 0; k = j * 2 + 1, k + 1 < i;)
|
||||
j = eytzinger0_do_cmp(base, n, size, cmp_func, priv, k, k + 1) > 0 ? k : k + 1;
|
||||
|
||||
if (c + 1 < i &&
|
||||
eytzinger0_do_cmp(base, n, size, cmp_func, priv, c, c + 1) < 0)
|
||||
c++;
|
||||
/* Special case for the last leaf with no sibling. */
|
||||
if (j * 2 + 2 == i)
|
||||
j = j * 2 + 1;
|
||||
|
||||
if (eytzinger0_do_cmp(base, n, size, cmp_func, priv, r, c) >= 0)
|
||||
break;
|
||||
/* Backtrack to the correct location. */
|
||||
while (j && eytzinger0_do_cmp(base, n, size, cmp_func, priv, 0, j) >= 0)
|
||||
j = (j - 1) / 2;
|
||||
|
||||
eytzinger0_do_swap(base, n, size, swap_func, priv, r, c);
|
||||
/* Shift the element into its correct place. */
|
||||
for (k = j; j;) {
|
||||
j = (j - 1) / 2;
|
||||
eytzinger0_do_swap(base, n, size, swap_func, priv, j, k);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -232,3 +242,64 @@ void eytzinger0_sort(void *base, size_t n, size_t size,
|
||||
|
||||
return eytzinger0_sort_r(base, n, size, _CMP_WRAPPER, SWAP_WRAPPER, &w);
|
||||
}
|
||||
|
||||
#if 0
|
||||
#include <linux/slab.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/ktime.h>
|
||||
|
||||
static u64 cmp_count;
|
||||
|
||||
static int mycmp(const void *a, const void *b)
|
||||
{
|
||||
u32 _a = *(u32 *)a;
|
||||
u32 _b = *(u32 *)b;
|
||||
|
||||
cmp_count++;
|
||||
if (_a < _b)
|
||||
return -1;
|
||||
else if (_a > _b)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int test(void)
|
||||
{
|
||||
size_t N, i;
|
||||
ktime_t start, end;
|
||||
s64 delta;
|
||||
u32 *arr;
|
||||
|
||||
for (N = 10000; N <= 100000; N += 10000) {
|
||||
arr = kmalloc_array(N, sizeof(u32), GFP_KERNEL);
|
||||
cmp_count = 0;
|
||||
|
||||
for (i = 0; i < N; i++)
|
||||
arr[i] = get_random_u32();
|
||||
|
||||
start = ktime_get();
|
||||
eytzinger0_sort(arr, N, sizeof(u32), mycmp, NULL);
|
||||
end = ktime_get();
|
||||
|
||||
delta = ktime_us_delta(end, start);
|
||||
printk(KERN_INFO "time: %lld\n", delta);
|
||||
printk(KERN_INFO "comparisons: %lld\n", cmp_count);
|
||||
|
||||
u32 prev = 0;
|
||||
|
||||
eytzinger0_for_each(i, N) {
|
||||
if (prev > arr[i])
|
||||
goto err;
|
||||
prev = arr[i];
|
||||
}
|
||||
|
||||
kfree(arr);
|
||||
}
|
||||
return 0;
|
||||
|
||||
err:
|
||||
kfree(arr);
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
@ -42,7 +42,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -70,7 +70,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
struct bch_subvolume s;
|
||||
|
||||
ret = bch2_subvolume_get(trans, snapshot_src.subvol, true,
|
||||
BTREE_ITER_CACHED, &s);
|
||||
BTREE_ITER_cached, &s);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -78,7 +78,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
ret = bch2_inode_peek(trans, &inode_iter, new_inode, snapshot_src,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -163,7 +163,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
name,
|
||||
dir_target,
|
||||
&dir_offset,
|
||||
BCH_HASH_SET_MUST_CREATE);
|
||||
STR_HASH_must_create);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -171,7 +171,7 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
new_inode->bi_dir_offset = dir_offset;
|
||||
}
|
||||
|
||||
inode_iter.flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
|
||||
inode_iter.flags &= ~BTREE_ITER_all_snapshots;
|
||||
bch2_btree_iter_set_snapshot(&inode_iter, snapshot);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&inode_iter) ?:
|
||||
@ -198,7 +198,7 @@ int bch2_link_trans(struct btree_trans *trans,
|
||||
if (dir.subvol != inum.subvol)
|
||||
return -EXDEV;
|
||||
|
||||
ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -207,7 +207,7 @@ int bch2_link_trans(struct btree_trans *trans,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -223,7 +223,7 @@ int bch2_link_trans(struct btree_trans *trans,
|
||||
ret = bch2_dirent_create(trans, dir, &dir_hash,
|
||||
mode_to_type(inode_u->bi_mode),
|
||||
name, inum.inum, &dir_offset,
|
||||
BCH_HASH_SET_MUST_CREATE);
|
||||
STR_HASH_must_create);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -255,19 +255,19 @@ int bch2_unlink_trans(struct btree_trans *trans,
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
dir_hash = bch2_hash_info_init(c, dir_u);
|
||||
|
||||
ret = bch2_dirent_lookup_trans(trans, &dirent_iter, dir, &dir_hash,
|
||||
name, &inum, BTREE_ITER_INTENT);
|
||||
name, &inum, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_inode_peek(trans, &inode_iter, inode_u, inum,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -322,7 +322,7 @@ int bch2_unlink_trans(struct btree_trans *trans,
|
||||
|
||||
ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
|
||||
&dir_hash, &dirent_iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_inode_write(trans, &dir_iter, dir_u) ?:
|
||||
bch2_inode_write(trans, &inode_iter, inode_u);
|
||||
err:
|
||||
@ -363,7 +363,7 @@ static int subvol_update_parent(struct btree_trans *trans, u32 subvol, u32 new_p
|
||||
struct bkey_i_subvolume *s =
|
||||
bch2_bkey_get_mut_typed(trans, &iter,
|
||||
BTREE_ID_subvolumes, POS(0, subvol),
|
||||
BTREE_ITER_CACHED, subvolume);
|
||||
BTREE_ITER_cached, subvolume);
|
||||
int ret = PTR_ERR_OR_ZERO(s);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -394,7 +394,7 @@ int bch2_rename_trans(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
ret = bch2_inode_peek(trans, &src_dir_iter, src_dir_u, src_dir,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -403,7 +403,7 @@ int bch2_rename_trans(struct btree_trans *trans,
|
||||
if (dst_dir.inum != src_dir.inum ||
|
||||
dst_dir.subvol != src_dir.subvol) {
|
||||
ret = bch2_inode_peek(trans, &dst_dir_iter, dst_dir_u, dst_dir,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -423,13 +423,13 @@ int bch2_rename_trans(struct btree_trans *trans,
|
||||
goto err;
|
||||
|
||||
ret = bch2_inode_peek(trans, &src_inode_iter, src_inode_u, src_inum,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (dst_inum.inum) {
|
||||
ret = bch2_inode_peek(trans, &dst_inode_iter, dst_inode_u, dst_inum,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ retry:
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
|
||||
BTREE_ITER_SLOTS);
|
||||
BTREE_ITER_slots);
|
||||
while (1) {
|
||||
struct bkey_s_c k;
|
||||
unsigned bytes, sectors, offset_into_extent;
|
||||
|
@ -254,7 +254,7 @@ retry:
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
|
||||
SPOS(inum.inum, offset, snapshot),
|
||||
BTREE_ITER_SLOTS, k, err) {
|
||||
BTREE_ITER_slots, k, err) {
|
||||
if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
|
||||
break;
|
||||
|
||||
|
@ -214,7 +214,7 @@ retry:
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
|
||||
SPOS(inum.inum, offset, snapshot),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BTREE_ITER_slots, k, ret) {
|
||||
unsigned nr_ptrs = bch2_bkey_nr_ptrs_fully_allocated(k);
|
||||
unsigned state = bkey_to_sector_state(k);
|
||||
|
||||
|
@ -594,7 +594,7 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
POS(inode->v.i_ino, start_sector),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
|
||||
while (!ret && bkey_lt(iter.pos, end_pos)) {
|
||||
s64 i_sectors_delta = 0;
|
||||
@ -1009,7 +1009,7 @@ retry:
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_extents,
|
||||
SPOS(inode->v.i_ino, offset >> 9, snapshot),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BTREE_ITER_slots, k, ret) {
|
||||
if (k.k->p.inode != inode->v.i_ino) {
|
||||
next_hole = bch2_seek_pagecache_hole(&inode->v,
|
||||
offset, MAX_LFS_FILESIZE, 0, false);
|
||||
|
@ -90,7 +90,7 @@ retry:
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inode_inum(inode),
|
||||
BTREE_ITER_INTENT) ?:
|
||||
BTREE_ITER_intent) ?:
|
||||
(set ? set(trans, inode, &inode_u, p) : 0) ?:
|
||||
bch2_inode_write(trans, &iter, &inode_u) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
@ -320,7 +320,7 @@ retry:
|
||||
inum.inum = inode_u.bi_inum;
|
||||
|
||||
ret = bch2_subvolume_get(trans, inum.subvol, true,
|
||||
BTREE_ITER_WITH_UPDATES, &subvol) ?:
|
||||
BTREE_ITER_with_updates, &subvol) ?:
|
||||
bch2_trans_commit(trans, NULL, &journal_seq, 0);
|
||||
if (unlikely(ret)) {
|
||||
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1,
|
||||
@ -374,16 +374,12 @@ static struct bch_inode_info *bch2_lookup_trans(struct btree_trans *trans,
|
||||
struct btree_iter dirent_iter = {};
|
||||
subvol_inum inum = {};
|
||||
|
||||
int ret = bch2_hash_lookup(trans, &dirent_iter, bch2_dirent_hash_desc,
|
||||
dir_hash_info, dir, name, 0);
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, &dirent_iter, bch2_dirent_hash_desc,
|
||||
dir_hash_info, dir, name, 0);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_slot(&dirent_iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), &inum);
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
@ -784,7 +780,7 @@ retry:
|
||||
acl = NULL;
|
||||
|
||||
ret = bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto btree_err;
|
||||
|
||||
@ -1037,6 +1033,10 @@ retry:
|
||||
|
||||
bch2_btree_iter_set_pos(&iter,
|
||||
POS(iter.pos.inode, iter.pos.offset + sectors));
|
||||
|
||||
ret = bch2_trans_relock(trans);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
start = iter.pos.offset;
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
@ -79,7 +79,7 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
|
||||
POS(0, inode_nr),
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_all_snapshots);
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -127,13 +127,13 @@ static int lookup_dirent_in_snapshot(struct btree_trans *trans,
|
||||
u64 *target, unsigned *type, u32 snapshot)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c_dirent d;
|
||||
int ret = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
|
||||
&hash_info, dir, name, 0, snapshot);
|
||||
struct bkey_s_c k = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
|
||||
&hash_info, dir, name, 0, snapshot);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
|
||||
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(bch2_btree_iter_peek_slot(&iter));
|
||||
*target = le64_to_cpu(d.v->d_inum);
|
||||
*type = d.v->d_type;
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -154,12 +154,12 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
|
||||
|
||||
dir_hash_info = bch2_hash_info_init(c, &dir_inode);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_dirents, pos, BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
|
||||
&dir_hash_info, &iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
err:
|
||||
bch_err_fn(c, ret);
|
||||
@ -274,9 +274,9 @@ create_lostfound:
|
||||
&lostfound_str,
|
||||
lostfound->bi_inum,
|
||||
&lostfound->bi_dir_offset,
|
||||
BCH_HASH_SET_MUST_CREATE) ?:
|
||||
STR_HASH_must_create) ?:
|
||||
bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
err:
|
||||
bch_err_msg(c, ret, "creating lost+found");
|
||||
bch2_trans_iter_exit(trans, &lostfound_iter);
|
||||
@ -333,7 +333,7 @@ static int reattach_inode(struct btree_trans *trans,
|
||||
&name,
|
||||
inode->bi_subvol ?: inode->bi_inum,
|
||||
&dir_offset,
|
||||
BCH_HASH_SET_MUST_CREATE);
|
||||
STR_HASH_must_create);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -708,7 +708,7 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
|
||||
w->inodes.nr = 0;
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
|
||||
BTREE_ITER_all_snapshots, k, ret) {
|
||||
if (k.k->p.offset != inum)
|
||||
break;
|
||||
|
||||
@ -799,7 +799,7 @@ static int __get_visible_inodes(struct btree_trans *trans,
|
||||
w->inodes.nr = 0;
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inum),
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
|
||||
BTREE_ITER_all_snapshots, k, ret) {
|
||||
u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
|
||||
|
||||
if (k.k->p.offset != inum)
|
||||
@ -832,7 +832,7 @@ static int check_key_has_snapshot(struct btree_trans *trans,
|
||||
"key in missing snapshot: %s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: 1;
|
||||
BTREE_UPDATE_internal_snapshot_node) ?: 1;
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
@ -861,8 +861,8 @@ static int hash_redo_key(struct btree_trans *trans,
|
||||
bch2_hash_set_in_snapshot(trans, desc, hash_info,
|
||||
(subvol_inum) { 0, k.k->p.inode },
|
||||
k.k->p.snapshot, tmp,
|
||||
BCH_HASH_SET_MUST_CREATE,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
STR_HASH_must_create|
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
}
|
||||
|
||||
@ -891,7 +891,7 @@ static int hash_check_key(struct btree_trans *trans,
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, desc.btree_id,
|
||||
SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BTREE_ITER_slots, k, ret) {
|
||||
if (bkey_eq(k.k->p, hash_k.k->p))
|
||||
break;
|
||||
|
||||
@ -1233,7 +1233,7 @@ int bch2_check_inodes(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_inode(trans, &iter, k, &prev, &s, full)));
|
||||
|
||||
@ -1362,8 +1362,8 @@ static int overlapping_extents_found(struct btree_trans *trans,
|
||||
BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
|
||||
|
||||
bch2_trans_iter_init(trans, &iter1, btree, pos1,
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_NOT_EXTENTS);
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_not_extents);
|
||||
k1 = bch2_btree_iter_peek_upto(&iter1, POS(pos1.inode, U64_MAX));
|
||||
ret = bkey_err(k1);
|
||||
if (ret)
|
||||
@ -1425,7 +1425,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
|
||||
trans->extra_disk_res += bch2_bkey_sectors_compressed(k2);
|
||||
|
||||
ret = bch2_trans_update_extent_overwrite(trans, old_iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
|
||||
BTREE_UPDATE_internal_snapshot_node,
|
||||
k1, k2) ?:
|
||||
bch2_trans_commit(trans, &res, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
@ -1625,7 +1625,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
|
||||
ret = bch2_btree_iter_traverse(&iter2) ?:
|
||||
bch2_btree_delete_at(trans, &iter2,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch2_trans_iter_exit(trans, &iter2);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1652,7 +1652,7 @@ fsck_err:
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
delete:
|
||||
ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -1673,7 +1673,7 @@ int bch2_check_extents(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_extents,
|
||||
POS(BCACHEFS_ROOT_INO, 0),
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
&res, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc, ({
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
@ -1698,7 +1698,7 @@ int bch2_check_indirect_extents(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_reflink,
|
||||
POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
&res, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc, ({
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
@ -2104,7 +2104,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2191,7 +2191,7 @@ int bch2_check_dirents(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
|
||||
POS(BCACHEFS_ROOT_INO, 0),
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
@ -2255,7 +2255,7 @@ int bch2_check_xattrs(struct bch_fs *c)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
|
||||
POS(BCACHEFS_ROOT_INO, 0),
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots,
|
||||
k,
|
||||
NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
@ -2422,7 +2422,7 @@ int bch2_check_subvolume_structure(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_subvol_path(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -2559,9 +2559,9 @@ int bch2_check_directory_structure(struct bch_fs *c)
|
||||
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
|
||||
if (!bkey_is_inode(k.k))
|
||||
continue;
|
||||
@ -2661,9 +2661,9 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_inodes,
|
||||
POS(0, start),
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
if (!bkey_is_inode(k.k))
|
||||
continue;
|
||||
|
||||
@ -2704,9 +2704,9 @@ static int check_nlinks_walk_dirents(struct bch_fs *c, struct nlink_table *links
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_dirents, POS_MIN,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
ret = snapshots_seen_update(c, &s, iter.btree_id, k.k->p);
|
||||
if (ret)
|
||||
break;
|
||||
@ -2781,7 +2781,7 @@ static int check_nlinks_update_hardlinks(struct bch_fs *c,
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes,
|
||||
POS(0, range_start),
|
||||
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_intent|BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_nlinks_update_inode(trans, &iter, k, links, &idx, range_end)));
|
||||
if (ret < 0) {
|
||||
@ -2849,7 +2849,7 @@ static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
|
||||
u->v.front_pad = 0;
|
||||
u->v.back_pad = 0;
|
||||
|
||||
return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_NORUN);
|
||||
return bch2_trans_update(trans, iter, &u->k_i, BTREE_TRIGGER_norun);
|
||||
}
|
||||
|
||||
int bch2_fix_reflink_p(struct bch_fs *c)
|
||||
@ -2860,8 +2860,8 @@ int bch2_fix_reflink_p(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_extents, POS_MIN,
|
||||
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_intent|BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
fix_reflink_p_key(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
|
@ -339,7 +339,7 @@ int bch2_inode_peek_nowarn(struct btree_trans *trans,
|
||||
|
||||
k = bch2_bkey_get_iter(trans, iter, BTREE_ID_inodes,
|
||||
SPOS(0, inum.inum, snapshot),
|
||||
flags|BTREE_ITER_CACHED);
|
||||
flags|BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -371,7 +371,7 @@ int bch2_inode_peek(struct btree_trans *trans,
|
||||
int bch2_inode_write_flags(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bch_inode_unpacked *inode,
|
||||
enum btree_update_flags flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_inode_buf *inode_p;
|
||||
|
||||
@ -399,7 +399,7 @@ int __bch2_fsck_write_inode(struct btree_trans *trans,
|
||||
|
||||
return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
|
||||
&inode_p->inode.k_i,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
}
|
||||
|
||||
int bch2_fsck_write_inode(struct btree_trans *trans,
|
||||
@ -535,29 +535,19 @@ static void __bch2_inode_unpacked_to_text(struct printbuf *out,
|
||||
struct bch_inode_unpacked *inode)
|
||||
{
|
||||
printbuf_indent_add(out, 2);
|
||||
prt_printf(out, "mode=%o", inode->bi_mode);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "mode=%o\n", inode->bi_mode);
|
||||
|
||||
prt_str(out, "flags=");
|
||||
prt_bitflags(out, bch2_inode_flag_strs, inode->bi_flags & ((1U << 20) - 1));
|
||||
prt_printf(out, " (%x)", inode->bi_flags);
|
||||
prt_newline(out);
|
||||
prt_printf(out, " (%x)\n", inode->bi_flags);
|
||||
|
||||
prt_printf(out, "journal_seq=%llu", inode->bi_journal_seq);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "bi_size=%llu", inode->bi_size);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "bi_sectors=%llu", inode->bi_sectors);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "bi_version=%llu", inode->bi_version);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "journal_seq=%llu\n", inode->bi_journal_seq);
|
||||
prt_printf(out, "bi_size=%llu\n", inode->bi_size);
|
||||
prt_printf(out, "bi_sectors=%llu\n", inode->bi_sectors);
|
||||
prt_printf(out, "bi_version=%llu\n", inode->bi_version);
|
||||
|
||||
#define x(_name, _bits) \
|
||||
prt_printf(out, #_name "=%llu", (u64) inode->_name); \
|
||||
prt_newline(out);
|
||||
prt_printf(out, #_name "=%llu\n", (u64) inode->_name);
|
||||
BCH_INODE_FIELDS_v3()
|
||||
#undef x
|
||||
printbuf_indent_sub(out, 2);
|
||||
@ -608,7 +598,7 @@ int bch2_trigger_inode(struct btree_trans *trans,
|
||||
{
|
||||
s64 nr = bkey_is_inode(new.k) - bkey_is_inode(old.k);
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
if (nr) {
|
||||
int ret = bch2_replicas_deltas_realloc(trans, 0);
|
||||
if (ret)
|
||||
@ -627,13 +617,13 @@ int bch2_trigger_inode(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
|
||||
if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
|
||||
BUG_ON(!trans->journal_res.seq);
|
||||
|
||||
bkey_s_to_inode_v3(new).v->bi_journal_seq = cpu_to_le64(trans->journal_res.seq);
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
@ -762,8 +752,8 @@ int bch2_inode_create(struct btree_trans *trans,
|
||||
|
||||
pos = start;
|
||||
bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos),
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_intent);
|
||||
again:
|
||||
while ((k = bch2_btree_iter_peek(iter)).k &&
|
||||
!(ret = bkey_err(k)) &&
|
||||
@ -824,7 +814,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
|
||||
* extent iterator:
|
||||
*/
|
||||
bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
@ -846,7 +836,7 @@ static int bch2_inode_delete_keys(struct btree_trans *trans,
|
||||
bkey_init(&delete.k);
|
||||
delete.k.p = iter.pos;
|
||||
|
||||
if (iter.flags & BTREE_ITER_IS_EXTENTS)
|
||||
if (iter.flags & BTREE_ITER_is_extents)
|
||||
bch2_key_resize(&delete.k,
|
||||
bpos_min(end, k.k->p).offset -
|
||||
iter.pos.offset);
|
||||
@ -895,7 +885,7 @@ retry:
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, inum.inum, snapshot),
|
||||
BTREE_ITER_INTENT|BTREE_ITER_CACHED);
|
||||
BTREE_ITER_intent|BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1055,7 +1045,7 @@ retry:
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, inum, snapshot), BTREE_ITER_INTENT);
|
||||
SPOS(0, inum, snapshot), BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1100,7 +1090,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
|
||||
struct bch_inode_unpacked inode;
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_CACHED);
|
||||
k = bch2_bkey_get_iter(trans, &inode_iter, BTREE_ID_inodes, pos, BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1152,7 +1142,7 @@ static int may_delete_deleted_inode(struct btree_trans *trans,
|
||||
inode.bi_flags &= ~BCH_INODE_unlinked;
|
||||
|
||||
ret = bch2_inode_write_flags(trans, &inode_iter, &inode,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch_err_msg(c, ret, "clearing inode unlinked flag");
|
||||
if (ret)
|
||||
goto out;
|
||||
@ -1199,7 +1189,7 @@ again:
|
||||
* flushed and we'd spin:
|
||||
*/
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_deleted_inodes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
|
||||
ret = may_delete_deleted_inode(trans, &iter, k.k->p, &need_another_pass);
|
||||
if (ret > 0) {
|
||||
|
@ -101,7 +101,7 @@ int bch2_inode_peek(struct btree_trans *, struct btree_iter *,
|
||||
struct bch_inode_unpacked *, subvol_inum, unsigned);
|
||||
|
||||
int bch2_inode_write_flags(struct btree_trans *, struct btree_iter *,
|
||||
struct bch_inode_unpacked *, enum btree_update_flags);
|
||||
struct bch_inode_unpacked *, enum btree_iter_update_trigger_flags);
|
||||
|
||||
static inline int bch2_inode_write(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
|
@ -198,7 +198,7 @@ int bch2_fpunch(struct bch_fs *c, subvol_inum inum, u64 start, u64 end,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
POS(inum.inum, start),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_fpunch_at(trans, &iter, inum, end, i_sectors_delta);
|
||||
|
||||
@ -230,7 +230,7 @@ static int truncate_set_isize(struct btree_trans *trans,
|
||||
struct bch_inode_unpacked inode_u;
|
||||
int ret;
|
||||
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT) ?:
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent) ?:
|
||||
(inode_u.bi_size = new_i_size, 0) ?:
|
||||
bch2_inode_write(trans, &iter, &inode_u);
|
||||
|
||||
@ -256,7 +256,7 @@ static int __bch2_resume_logged_op_truncate(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_iter_init(trans, &fpunch_iter, BTREE_ID_extents,
|
||||
POS(inum.inum, round_up(new_i_size, block_bytes(c)) >> 9),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
ret = bch2_fpunch_at(trans, &fpunch_iter, inum, U64_MAX, i_sectors_delta);
|
||||
bch2_trans_iter_exit(trans, &fpunch_iter);
|
||||
|
||||
@ -317,7 +317,7 @@ static int adjust_i_size(struct btree_trans *trans, subvol_inum inum, u64 offset
|
||||
offset <<= 9;
|
||||
len <<= 9;
|
||||
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_INTENT);
|
||||
ret = bch2_inode_peek(trans, &iter, &inode_u, inum, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -365,7 +365,7 @@ static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
POS(inum.inum, 0),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
switch (op->v.state) {
|
||||
case LOGGED_OP_FINSERT_start:
|
||||
|
@ -378,7 +378,7 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
|
||||
bch2_bkey_buf_init(&sk);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, rbio->data_btree,
|
||||
rbio->read_pos, BTREE_ITER_SLOTS);
|
||||
rbio->read_pos, BTREE_ITER_slots);
|
||||
retry:
|
||||
rbio->bio.bi_status = 0;
|
||||
|
||||
@ -487,7 +487,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
|
||||
return 0;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, rbio->data_btree, rbio->data_pos,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
if ((ret = bkey_err(k)))
|
||||
goto out;
|
||||
|
||||
@ -523,7 +523,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
|
||||
goto out;
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, new,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
out:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
@ -541,7 +541,7 @@ static void __bch2_read_endio(struct work_struct *work)
|
||||
struct bch_read_bio *rbio =
|
||||
container_of(work, struct bch_read_bio, work);
|
||||
struct bch_fs *c = rbio->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rbio->pick.ptr.dev);
|
||||
struct bio *src = &rbio->bio;
|
||||
struct bio *dst = &bch2_rbio_parent(rbio)->bio;
|
||||
struct bvec_iter dst_iter = rbio->bvec_iter;
|
||||
@ -675,7 +675,7 @@ static void bch2_read_endio(struct bio *bio)
|
||||
struct bch_read_bio *rbio =
|
||||
container_of(bio, struct bch_read_bio, bio);
|
||||
struct bch_fs *c = rbio->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, rbio->pick.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, rbio->pick.ptr.dev);
|
||||
struct workqueue_struct *wq = NULL;
|
||||
enum rbio_context context = RBIO_CONTEXT_NULL;
|
||||
|
||||
@ -762,18 +762,17 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
|
||||
struct bch_extent_ptr ptr)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr.dev);
|
||||
struct btree_iter iter;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
||||
PTR_BUCKET_POS(c, &ptr),
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
|
||||
prt_printf(&buf, "Attempting to read from stale dirty pointer:");
|
||||
prt_printf(&buf, "Attempting to read from stale dirty pointer:\n");
|
||||
printbuf_indent_add(&buf, 2);
|
||||
prt_newline(&buf);
|
||||
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
prt_newline(&buf);
|
||||
@ -832,7 +831,7 @@ retry_pick:
|
||||
goto err;
|
||||
}
|
||||
|
||||
ca = bch_dev_bkey_exists(c, pick.ptr.dev);
|
||||
ca = bch2_dev_bkey_exists(c, pick.ptr.dev);
|
||||
|
||||
/*
|
||||
* Stale dirty pointers are treated as IO errors, but @failed isn't
|
||||
@ -1113,7 +1112,7 @@ retry:
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
|
||||
BTREE_ITER_SLOTS);
|
||||
BTREE_ITER_slots);
|
||||
while (1) {
|
||||
unsigned bytes, sectors, offset_into_extent;
|
||||
enum btree_id data_btree = BTREE_ID_extents;
|
||||
|
@ -166,7 +166,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
|
||||
bch2_trans_copy_iter(&iter, extent_iter);
|
||||
|
||||
for_each_btree_key_upto_continue_norestart(iter,
|
||||
new->k.p, BTREE_ITER_SLOTS, old, ret) {
|
||||
new->k.p, BTREE_ITER_slots, old, ret) {
|
||||
s64 sectors = min(new->k.p.offset, old.k->p.offset) -
|
||||
max(bkey_start_offset(&new->k),
|
||||
bkey_start_offset(old.k));
|
||||
@ -213,14 +213,14 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
|
||||
* to be journalled - if we crash, the bi_journal_seq update will be
|
||||
* lost, but that's fine.
|
||||
*/
|
||||
unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL;
|
||||
unsigned inode_update_flags = BTREE_UPDATE_nojournal;
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_mut_noupdate(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0,
|
||||
extent_iter->pos.inode,
|
||||
extent_iter->snapshot),
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
@ -251,7 +251,7 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, &inode->k_i,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
||||
BTREE_UPDATE_internal_snapshot_node|
|
||||
inode_update_flags);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -360,7 +360,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
bkey_start_pos(&sk.k->k),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_bkey_set_needs_rebalance(c, sk.k, &op->opts) ?:
|
||||
bch2_extent_update(trans, inum, &iter, sk.k,
|
||||
@ -399,9 +399,9 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
|
||||
BUG_ON(c->opts.nochanges);
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
BUG_ON(!bch2_dev_exists2(c, ptr->dev));
|
||||
BUG_ON(!bch2_dev_exists(c, ptr->dev));
|
||||
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (to_entry(ptr + 1) < ptrs.end) {
|
||||
n = to_wbio(bio_alloc_clone(NULL, &wbio->bio,
|
||||
@ -642,7 +642,7 @@ static void bch2_write_endio(struct bio *bio)
|
||||
struct bch_write_bio *wbio = to_wbio(bio);
|
||||
struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
|
||||
struct bch_fs *c = wbio->c;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, wbio->dev);
|
||||
|
||||
if (bch2_dev_inum_io_err_on(bio->bi_status, ca, BCH_MEMBER_ERROR_write,
|
||||
op->pos.inode,
|
||||
@ -1150,7 +1150,7 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
|
||||
return bch2_extent_update_i_size_sectors(trans, iter,
|
||||
min(new->k.p.offset << 9, new_i_size), 0) ?:
|
||||
bch2_trans_update(trans, iter, new,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
}
|
||||
|
||||
static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
|
||||
@ -1161,7 +1161,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
|
||||
for_each_keylist_key(&op->insert_keys, orig) {
|
||||
int ret = for_each_btree_key_upto_commit(trans, iter, BTREE_ID_extents,
|
||||
bkey_start_pos(&orig->k), orig->k.p,
|
||||
BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_intent, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
|
||||
bch2_nocow_write_convert_one_unwritten(trans, &iter, orig, k, op->new_i_size);
|
||||
}));
|
||||
@ -1234,12 +1234,16 @@ retry:
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
SPOS(op->pos.inode, op->pos.offset, snapshot),
|
||||
BTREE_ITER_SLOTS);
|
||||
BTREE_ITER_slots);
|
||||
while (1) {
|
||||
struct bio *bio = &op->wbio.bio;
|
||||
|
||||
buckets.nr = 0;
|
||||
|
||||
ret = bch2_trans_relock(trans);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -1264,7 +1268,7 @@ retry:
|
||||
bucket_nocow_lock(&c->nocow_locks, bucket_to_u64(b));
|
||||
prefetch(l);
|
||||
|
||||
if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
|
||||
if (unlikely(!bch2_dev_get_ioref(bch2_dev_bkey_exists(c, ptr->dev), WRITE)))
|
||||
goto err_get_ioref;
|
||||
|
||||
/* XXX allocating memory with btree locks held - rare */
|
||||
@ -1285,7 +1289,7 @@ retry:
|
||||
bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
|
||||
|
||||
darray_for_each(buckets, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, i->b.inode);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, i->b.inode);
|
||||
|
||||
__bch2_bucket_nocow_lock(&c->nocow_locks, i->l,
|
||||
bucket_to_u64(i->b),
|
||||
@ -1362,7 +1366,7 @@ err:
|
||||
return;
|
||||
err_get_ioref:
|
||||
darray_for_each(buckets, i)
|
||||
percpu_ref_put(&bch_dev_bkey_exists(c, i->b.inode)->io_ref);
|
||||
percpu_ref_put(&bch2_dev_bkey_exists(c, i->b.inode)->io_ref);
|
||||
|
||||
/* Fall back to COW path: */
|
||||
goto out;
|
||||
@ -1639,8 +1643,7 @@ void bch2_write_op_to_text(struct printbuf *out, struct bch_write_op *op)
|
||||
prt_bitflags(out, bch2_write_flags, op->flags);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "ref: %u", closure_nr_remaining(&op->cl));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "ref: %u\n", closure_nr_remaining(&op->cl));
|
||||
|
||||
printbuf_indent_sub(out, 2);
|
||||
}
|
||||
|
@ -53,29 +53,19 @@ static void bch2_journal_buf_to_text(struct printbuf *out, struct journal *j, u6
|
||||
unsigned i = seq & JOURNAL_BUF_MASK;
|
||||
struct journal_buf *buf = j->buf + i;
|
||||
|
||||
prt_str(out, "seq:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", seq);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "seq:\t%llu\n", seq);
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
prt_str(out, "refcount:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", journal_state_count(s, i));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "refcount:\t%u\n", journal_state_count(s, i));
|
||||
|
||||
prt_str(out, "size:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "size:\t");
|
||||
prt_human_readable_u64(out, vstruct_bytes(buf->data));
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "expires:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%li jiffies", buf->expires - jiffies);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "expires:\t");
|
||||
prt_printf(out, "%li jiffies\n", buf->expires - jiffies);
|
||||
|
||||
prt_str(out, "flags:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "flags:\t");
|
||||
if (buf->noflush)
|
||||
prt_str(out, "noflush ");
|
||||
if (buf->must_flush)
|
||||
@ -948,7 +938,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
||||
ret = bch2_trans_run(c,
|
||||
bch2_trans_mark_metadata_bucket(trans, ca,
|
||||
ob[nr_got]->bucket, BCH_DATA_journal,
|
||||
ca->mi.bucket_size));
|
||||
ca->mi.bucket_size, BTREE_TRIGGER_transactional));
|
||||
if (ret) {
|
||||
bch2_open_bucket_put(c, ob[nr_got]);
|
||||
bch_err_msg(c, ret, "marking new journal buckets");
|
||||
@ -1028,7 +1018,8 @@ err_unblock:
|
||||
for (i = 0; i < nr_got; i++)
|
||||
bch2_trans_run(c,
|
||||
bch2_trans_mark_metadata_bucket(trans, ca,
|
||||
bu[i], BCH_DATA_free, 0));
|
||||
bu[i], BCH_DATA_free, 0,
|
||||
BTREE_TRIGGER_transactional));
|
||||
err_free:
|
||||
if (!new_fs)
|
||||
for (i = 0; i < nr_got; i++)
|
||||
@ -1179,12 +1170,13 @@ void bch2_fs_journal_stop(struct journal *j)
|
||||
bch2_journal_meta(j);
|
||||
|
||||
journal_quiesce(j);
|
||||
cancel_delayed_work_sync(&j->write_work);
|
||||
|
||||
BUG_ON(!bch2_journal_error(j) &&
|
||||
test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
|
||||
j->last_empty_seq != journal_cur_seq(j));
|
||||
|
||||
cancel_delayed_work_sync(&j->write_work);
|
||||
clear_bit(JOURNAL_RUNNING, &j->flags);
|
||||
}
|
||||
|
||||
int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
|
||||
@ -1258,7 +1250,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
|
||||
|
||||
spin_lock(&j->lock);
|
||||
|
||||
set_bit(JOURNAL_STARTED, &j->flags);
|
||||
set_bit(JOURNAL_RUNNING, &j->flags);
|
||||
j->last_flush_write = jiffies;
|
||||
|
||||
j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
|
||||
@ -1414,12 +1406,12 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
s = READ_ONCE(j->reservations);
|
||||
|
||||
prt_printf(out, "dirty journal entries:\t%llu/%llu\n", fifo_used(&j->pin), j->pin.size);
|
||||
prt_printf(out, "seq:\t\t\t%llu\n", journal_cur_seq(j));
|
||||
prt_printf(out, "seq_ondisk:\t\t%llu\n", j->seq_ondisk);
|
||||
prt_printf(out, "last_seq:\t\t%llu\n", journal_last_seq(j));
|
||||
prt_printf(out, "seq:\t%llu\n", journal_cur_seq(j));
|
||||
prt_printf(out, "seq_ondisk:\t%llu\n", j->seq_ondisk);
|
||||
prt_printf(out, "last_seq:\t%llu\n", journal_last_seq(j));
|
||||
prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
|
||||
prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
|
||||
prt_printf(out, "watermark:\t\t%s\n", bch2_watermarks[j->watermark]);
|
||||
prt_printf(out, "watermark:\t%s\n", bch2_watermarks[j->watermark]);
|
||||
prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
|
||||
prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
|
||||
prt_printf(out, "nr noflush writes:\t%llu\n", j->nr_noflush_writes);
|
||||
@ -1428,48 +1420,48 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
prt_newline(out);
|
||||
prt_printf(out, "nr direct reclaim:\t%llu\n", j->nr_direct_reclaim);
|
||||
prt_printf(out, "nr background reclaim:\t%llu\n", j->nr_background_reclaim);
|
||||
prt_printf(out, "reclaim kicked:\t\t%u\n", j->reclaim_kicked);
|
||||
prt_printf(out, "reclaim kicked:\t%u\n", j->reclaim_kicked);
|
||||
prt_printf(out, "reclaim runs in:\t%u ms\n", time_after(j->next_reclaim, now)
|
||||
? jiffies_to_msecs(j->next_reclaim - jiffies) : 0);
|
||||
prt_printf(out, "blocked:\t\t%u\n", j->blocked);
|
||||
prt_printf(out, "blocked:\t%u\n", j->blocked);
|
||||
prt_printf(out, "current entry sectors:\t%u\n", j->cur_entry_sectors);
|
||||
prt_printf(out, "current entry error:\t%s\n", bch2_journal_errors[j->cur_entry_error]);
|
||||
prt_printf(out, "current entry:\t\t");
|
||||
prt_printf(out, "current entry:\t");
|
||||
|
||||
switch (s.cur_entry_offset) {
|
||||
case JOURNAL_ENTRY_ERROR_VAL:
|
||||
prt_printf(out, "error");
|
||||
prt_printf(out, "error\n");
|
||||
break;
|
||||
case JOURNAL_ENTRY_CLOSED_VAL:
|
||||
prt_printf(out, "closed");
|
||||
prt_printf(out, "closed\n");
|
||||
break;
|
||||
default:
|
||||
prt_printf(out, "%u/%u", s.cur_entry_offset, j->cur_entry_u64s);
|
||||
prt_printf(out, "%u/%u\n", s.cur_entry_offset, j->cur_entry_u64s);
|
||||
break;
|
||||
}
|
||||
|
||||
prt_newline(out);
|
||||
prt_printf(out, "unwritten entries:");
|
||||
prt_newline(out);
|
||||
prt_printf(out, "unwritten entries:\n");
|
||||
bch2_journal_bufs_to_text(out, j);
|
||||
|
||||
prt_printf(out,
|
||||
"replay done:\t\t%i\n",
|
||||
"replay done:\t%i\n",
|
||||
test_bit(JOURNAL_REPLAY_DONE, &j->flags));
|
||||
|
||||
prt_printf(out, "space:\n");
|
||||
prt_printf(out, "\tdiscarded\t%u:%u\n",
|
||||
printbuf_indent_add(out, 2);
|
||||
prt_printf(out, "discarded\t%u:%u\n",
|
||||
j->space[journal_space_discarded].next_entry,
|
||||
j->space[journal_space_discarded].total);
|
||||
prt_printf(out, "\tclean ondisk\t%u:%u\n",
|
||||
prt_printf(out, "clean ondisk\t%u:%u\n",
|
||||
j->space[journal_space_clean_ondisk].next_entry,
|
||||
j->space[journal_space_clean_ondisk].total);
|
||||
prt_printf(out, "\tclean\t\t%u:%u\n",
|
||||
prt_printf(out, "clean\t%u:%u\n",
|
||||
j->space[journal_space_clean].next_entry,
|
||||
j->space[journal_space_clean].total);
|
||||
prt_printf(out, "\ttotal\t\t%u:%u\n",
|
||||
prt_printf(out, "total\t%u:%u\n",
|
||||
j->space[journal_space_total].next_entry,
|
||||
j->space[journal_space_total].total);
|
||||
printbuf_indent_sub(out, 2);
|
||||
|
||||
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal]) {
|
||||
struct journal_device *ja = &ca->journal;
|
||||
@ -1480,14 +1472,16 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
if (!ja->nr)
|
||||
continue;
|
||||
|
||||
prt_printf(out, "dev %u:\n", ca->dev_idx);
|
||||
prt_printf(out, "\tnr\t\t%u\n", ja->nr);
|
||||
prt_printf(out, "\tbucket size\t%u\n", ca->mi.bucket_size);
|
||||
prt_printf(out, "\tavailable\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
|
||||
prt_printf(out, "\tdiscard_idx\t%u\n", ja->discard_idx);
|
||||
prt_printf(out, "\tdirty_ondisk\t%u (seq %llu)\n", ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
|
||||
prt_printf(out, "\tdirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
|
||||
prt_printf(out, "\tcur_idx\t\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
|
||||
prt_printf(out, "dev %u:\n", ca->dev_idx);
|
||||
printbuf_indent_add(out, 2);
|
||||
prt_printf(out, "nr\t%u\n", ja->nr);
|
||||
prt_printf(out, "bucket size\t%u\n", ca->mi.bucket_size);
|
||||
prt_printf(out, "available\t%u:%u\n", bch2_journal_dev_buckets_available(j, ja, journal_space_discarded), ja->sectors_free);
|
||||
prt_printf(out, "discard_idx\t%u\n", ja->discard_idx);
|
||||
prt_printf(out, "dirty_ondisk\t%u (seq %llu)\n",ja->dirty_idx_ondisk, ja->bucket_seq[ja->dirty_idx_ondisk]);
|
||||
prt_printf(out, "dirty_idx\t%u (seq %llu)\n", ja->dirty_idx, ja->bucket_seq[ja->dirty_idx]);
|
||||
prt_printf(out, "cur_idx\t%u (seq %llu)\n", ja->cur_idx, ja->bucket_seq[ja->cur_idx]);
|
||||
printbuf_indent_sub(out, 2);
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
@ -1519,25 +1513,18 @@ bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64
|
||||
|
||||
pin_list = journal_seq_pin(j, *seq);
|
||||
|
||||
prt_printf(out, "%llu: count %u", *seq, atomic_read(&pin_list->count));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "%llu: count %u\n", *seq, atomic_read(&pin_list->count));
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(pin_list->list); i++)
|
||||
list_for_each_entry(pin, &pin_list->list[i], list) {
|
||||
prt_printf(out, "\t%px %ps", pin, pin->flush);
|
||||
prt_newline(out);
|
||||
}
|
||||
list_for_each_entry(pin, &pin_list->list[i], list)
|
||||
prt_printf(out, "\t%px %ps\n", pin, pin->flush);
|
||||
|
||||
if (!list_empty(&pin_list->flushed)) {
|
||||
prt_printf(out, "flushed:");
|
||||
prt_newline(out);
|
||||
}
|
||||
if (!list_empty(&pin_list->flushed))
|
||||
prt_printf(out, "flushed:\n");
|
||||
|
||||
list_for_each_entry(pin, &pin_list->flushed, list) {
|
||||
prt_printf(out, "\t%px %ps", pin, pin->flush);
|
||||
prt_newline(out);
|
||||
}
|
||||
list_for_each_entry(pin, &pin_list->flushed, list)
|
||||
prt_printf(out, "\t%px %ps\n", pin, pin->flush);
|
||||
|
||||
printbuf_indent_sub(out, 2);
|
||||
|
||||
|
@ -372,7 +372,7 @@ static inline int bch2_journal_res_get(struct journal *j, struct journal_res *re
|
||||
int ret;
|
||||
|
||||
EBUG_ON(res->ref);
|
||||
EBUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
|
||||
EBUG_ON(!test_bit(JOURNAL_RUNNING, &j->flags));
|
||||
|
||||
res->u64s = u64s;
|
||||
|
||||
@ -418,7 +418,7 @@ struct bch_dev;
|
||||
|
||||
static inline void bch2_journal_set_replay_done(struct journal *j)
|
||||
{
|
||||
BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
|
||||
BUG_ON(!test_bit(JOURNAL_RUNNING, &j->flags));
|
||||
set_bit(JOURNAL_REPLAY_DONE, &j->flags);
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@ void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
struct journal_replay *j)
|
||||
{
|
||||
darray_for_each(j->ptrs, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, i->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, i->dev);
|
||||
u64 offset;
|
||||
|
||||
div64_u64_rem(i->sector, ca->mi.bucket_size, &offset);
|
||||
@ -677,7 +677,7 @@ static int journal_entry_dev_usage_validate(struct bch_fs *c,
|
||||
|
||||
dev = le32_to_cpu(u->dev);
|
||||
|
||||
if (journal_entry_err_on(!bch2_dev_exists2(c, dev),
|
||||
if (journal_entry_err_on(!bch2_dev_exists(c, dev),
|
||||
c, version, jset, entry,
|
||||
journal_entry_dev_usage_bad_dev,
|
||||
"bad dev")) {
|
||||
@ -1366,7 +1366,7 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
fsck_err(c, journal_entries_missing,
|
||||
"journal entries %llu-%llu missing! (replaying %llu-%llu)\n"
|
||||
" prev at %s\n"
|
||||
" next at %s",
|
||||
" next at %s, continue?",
|
||||
missing_start, missing_end,
|
||||
*last_seq, *blacklist_seq - 1,
|
||||
buf1.buf, buf2.buf);
|
||||
@ -1390,7 +1390,7 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
continue;
|
||||
|
||||
darray_for_each(i->ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
|
||||
if (!ptr->csum_good)
|
||||
bch_err_dev_offset(ca, ptr->sector,
|
||||
@ -1400,7 +1400,7 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
}
|
||||
|
||||
ret = jset_validate(c,
|
||||
bch_dev_bkey_exists(c, i->ptrs.data[0].dev),
|
||||
bch2_dev_bkey_exists(c, i->ptrs.data[0].dev),
|
||||
&i->j,
|
||||
i->ptrs.data[0].sector,
|
||||
READ);
|
||||
@ -1731,7 +1731,7 @@ static CLOSURE_CALLBACK(do_journal_write)
|
||||
unsigned sectors = vstruct_sectors(w->data, c->block_bits);
|
||||
|
||||
extent_for_each_ptr(bkey_i_to_s_extent(&w->key), ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, ptr->dev);
|
||||
struct journal_device *ja = &ca->journal;
|
||||
|
||||
if (!percpu_ref_tryget(&ca->io_ref)) {
|
||||
|
@ -833,7 +833,7 @@ bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
|
||||
/* time_stats this */
|
||||
bool did_work = false;
|
||||
|
||||
if (!test_bit(JOURNAL_STARTED, &j->flags))
|
||||
if (!test_bit(JOURNAL_RUNNING, &j->flags))
|
||||
return false;
|
||||
|
||||
closure_wait_event(&j->async_wait,
|
||||
|
@ -233,7 +233,7 @@ void bch2_blacklist_entries_gc(struct work_struct *work)
|
||||
struct btree *b;
|
||||
|
||||
bch2_trans_node_iter_init(trans, &iter, i, POS_MIN,
|
||||
0, 0, BTREE_ITER_PREFETCH);
|
||||
0, 0, BTREE_ITER_prefetch);
|
||||
retry:
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
|
@ -131,7 +131,7 @@ enum journal_space_from {
|
||||
|
||||
enum journal_flags {
|
||||
JOURNAL_REPLAY_DONE,
|
||||
JOURNAL_STARTED,
|
||||
JOURNAL_RUNNING,
|
||||
JOURNAL_MAY_SKIP_FLUSH,
|
||||
JOURNAL_NEED_FLUSH_WRITE,
|
||||
JOURNAL_SPACE_LOW,
|
||||
|
@ -56,7 +56,7 @@ int bch2_resume_logged_ops(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter,
|
||||
BTREE_ID_logged_ops, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
resume_logged_op(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
|
@ -149,7 +149,7 @@ int bch2_check_lrus(struct bch_fs *c)
|
||||
struct bpos last_flushed_pos = POS_MIN;
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_lru, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc|BCH_TRANS_COMMIT_lazy_rw,
|
||||
bch2_check_lru_key(trans, &iter, k, &last_flushed_pos)));
|
||||
bch_err_fn(c, ret);
|
||||
|
@ -49,7 +49,7 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
|
||||
if (!bch2_bkey_has_device_c(k, dev_idx))
|
||||
return 0;
|
||||
|
||||
n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_internal_snapshot_node);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -67,7 +67,7 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
|
||||
|
||||
/*
|
||||
* Since we're not inserting through an extent iterator
|
||||
* (BTREE_ITER_ALL_SNAPSHOTS iterators aren't extent iterators),
|
||||
* (BTREE_ITER_all_snapshots iterators aren't extent iterators),
|
||||
* we aren't using the extent overwrite path to delete, we're
|
||||
* just using the normal key deletion path:
|
||||
*/
|
||||
@ -87,7 +87,7 @@ static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
||||
continue;
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter, id, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_dev_usrdata_drop_key(trans, &iter, k, dev_idx, flags));
|
||||
if (ret)
|
||||
@ -119,7 +119,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
||||
|
||||
for (id = 0; id < BTREE_ID_NR; id++) {
|
||||
bch2_trans_node_iter_init(trans, &iter, id, POS_MIN, 0, 0,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
retry:
|
||||
ret = 0;
|
||||
while (bch2_trans_begin(trans),
|
||||
|
@ -41,28 +41,23 @@ static void bch2_data_update_opts_to_text(struct printbuf *out, struct bch_fs *c
|
||||
struct data_update_opts *data_opts)
|
||||
{
|
||||
printbuf_tabstop_push(out, 20);
|
||||
prt_str(out, "rewrite ptrs:");
|
||||
prt_tab(out);
|
||||
prt_str(out, "rewrite ptrs:\t");
|
||||
bch2_prt_u64_base2(out, data_opts->rewrite_ptrs);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "kill ptrs: ");
|
||||
prt_tab(out);
|
||||
prt_str(out, "kill ptrs:\t");
|
||||
bch2_prt_u64_base2(out, data_opts->kill_ptrs);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "target: ");
|
||||
prt_tab(out);
|
||||
prt_str(out, "target:\t");
|
||||
bch2_target_to_text(out, c, data_opts->target);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "compression: ");
|
||||
prt_tab(out);
|
||||
prt_str(out, "compression:\t");
|
||||
bch2_compression_opt_to_text(out, background_compression(*io_opts));
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "extra replicas: ");
|
||||
prt_tab(out);
|
||||
prt_str(out, "extra replicas:\t");
|
||||
prt_u64(out, data_opts->extra_replicas);
|
||||
}
|
||||
|
||||
@ -421,7 +416,7 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
|
||||
io_opts->d.nr = 0;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, extent_k.k->p.inode),
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
if (k.k->p.offset != extent_k.k->p.inode)
|
||||
break;
|
||||
|
||||
@ -467,7 +462,7 @@ int bch2_move_get_io_opts_one(struct btree_trans *trans,
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, extent_k.k->p.inode, extent_k.k->p.snapshot),
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
return ret;
|
||||
@ -553,8 +548,8 @@ static int bch2_move_data_btree(struct moving_context *ctxt,
|
||||
}
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree_id, start,
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_prefetch|
|
||||
BTREE_ITER_all_snapshots);
|
||||
|
||||
if (ctxt->rate)
|
||||
bch2_ratelimit_reset(ctxt->rate);
|
||||
@ -705,7 +700,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
||||
bucket, BTREE_ITER_CACHED);
|
||||
bucket, BTREE_ITER_cached);
|
||||
ret = lockrestart_do(trans,
|
||||
bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -716,7 +711,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
|
||||
|
||||
a = bch2_alloc_to_v4(k, &a_convert);
|
||||
dirty_sectors = bch2_bucket_sectors_dirty(*a);
|
||||
bucket_size = bch_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
|
||||
bucket_size = bch2_dev_bkey_exists(c, bucket.inode)->mi.bucket_size;
|
||||
fragmentation = a->fragmentation_lru;
|
||||
|
||||
ret = bch2_btree_write_buffer_tryflush(trans);
|
||||
@ -732,7 +727,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
|
||||
|
||||
ret = bch2_get_next_backpointer(trans, bucket, gen,
|
||||
&bp_pos, &bp,
|
||||
BTREE_ITER_CACHED);
|
||||
BTREE_ITER_cached);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
continue;
|
||||
if (ret)
|
||||
@ -868,7 +863,7 @@ static int bch2_move_btree(struct bch_fs *c,
|
||||
continue;
|
||||
|
||||
bch2_trans_node_iter_init(trans, &iter, btree, POS_MIN, 0, 0,
|
||||
BTREE_ITER_PREFETCH);
|
||||
BTREE_ITER_prefetch);
|
||||
retry:
|
||||
ret = 0;
|
||||
while (bch2_trans_begin(trans),
|
||||
@ -1137,23 +1132,17 @@ void bch2_move_stats_to_text(struct printbuf *out, struct bch_move_stats *stats)
|
||||
prt_newline(out);
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
prt_str(out, "keys moved: ");
|
||||
prt_u64(out, atomic64_read(&stats->keys_moved));
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "keys raced: ");
|
||||
prt_u64(out, atomic64_read(&stats->keys_raced));
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "bytes seen: ");
|
||||
prt_printf(out, "keys moved: %llu\n", atomic64_read(&stats->keys_moved));
|
||||
prt_printf(out, "keys raced: %llu\n", atomic64_read(&stats->keys_raced));
|
||||
prt_printf(out, "bytes seen: ");
|
||||
prt_human_readable_u64(out, atomic64_read(&stats->sectors_seen) << 9);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "bytes moved: ");
|
||||
prt_printf(out, "bytes moved: ");
|
||||
prt_human_readable_u64(out, atomic64_read(&stats->sectors_moved) << 9);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "bytes raced: ");
|
||||
prt_printf(out, "bytes raced: ");
|
||||
prt_human_readable_u64(out, atomic64_read(&stats->sectors_raced) << 9);
|
||||
prt_newline(out);
|
||||
|
||||
@ -1167,19 +1156,17 @@ static void bch2_moving_ctxt_to_text(struct printbuf *out, struct bch_fs *c, str
|
||||
bch2_move_stats_to_text(out, ctxt->stats);
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
prt_printf(out, "reads: ios %u/%u sectors %u/%u",
|
||||
prt_printf(out, "reads: ios %u/%u sectors %u/%u\n",
|
||||
atomic_read(&ctxt->read_ios),
|
||||
c->opts.move_ios_in_flight,
|
||||
atomic_read(&ctxt->read_sectors),
|
||||
c->opts.move_bytes_in_flight >> 9);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "writes: ios %u/%u sectors %u/%u",
|
||||
prt_printf(out, "writes: ios %u/%u sectors %u/%u\n",
|
||||
atomic_read(&ctxt->write_ios),
|
||||
c->opts.move_ios_in_flight,
|
||||
atomic_read(&ctxt->write_sectors),
|
||||
c->opts.move_bytes_in_flight >> 9);
|
||||
prt_newline(out);
|
||||
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
|
@ -84,7 +84,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
|
||||
return 0;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
|
||||
b->k.bucket, BTREE_ITER_CACHED);
|
||||
b->k.bucket, BTREE_ITER_cached);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -158,6 +158,8 @@ static int bch2_copygc_get_buckets(struct moving_context *ctxt,
|
||||
if (bch2_fs_fatal_err_on(ret, c, "%s: from bch2_btree_write_buffer_tryflush()", bch2_err_str(ret)))
|
||||
return ret;
|
||||
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
|
||||
lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
|
||||
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
|
||||
|
@ -480,7 +480,7 @@ enum fsck_err_opts {
|
||||
OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
|
||||
OPT_BOOL(), \
|
||||
BCH2_NO_SB_OPT, true, \
|
||||
NULL, "BTREE_ITER_PREFETCH casuse btree nodes to be\n"\
|
||||
NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\
|
||||
" prefetched sequentially")
|
||||
|
||||
struct bch_opts {
|
||||
|
@ -10,35 +10,50 @@
|
||||
|
||||
#include "printbuf.h"
|
||||
|
||||
static inline unsigned __printbuf_linelen(struct printbuf *buf, unsigned pos)
|
||||
{
|
||||
return pos - buf->last_newline;
|
||||
}
|
||||
|
||||
static inline unsigned printbuf_linelen(struct printbuf *buf)
|
||||
{
|
||||
return buf->pos - buf->last_newline;
|
||||
return __printbuf_linelen(buf, buf->pos);
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns spaces from start of line, if set, or 0 if unset:
|
||||
*/
|
||||
static inline unsigned cur_tabstop(struct printbuf *buf)
|
||||
{
|
||||
return buf->cur_tabstop < buf->nr_tabstops
|
||||
? buf->_tabstops[buf->cur_tabstop]
|
||||
: 0;
|
||||
}
|
||||
|
||||
int bch2_printbuf_make_room(struct printbuf *out, unsigned extra)
|
||||
{
|
||||
unsigned new_size;
|
||||
char *buf;
|
||||
|
||||
if (!out->heap_allocated)
|
||||
return 0;
|
||||
|
||||
/* Reserved space for terminating nul: */
|
||||
extra += 1;
|
||||
|
||||
if (out->pos + extra < out->size)
|
||||
if (out->pos + extra <= out->size)
|
||||
return 0;
|
||||
|
||||
new_size = roundup_pow_of_two(out->size + extra);
|
||||
if (!out->heap_allocated) {
|
||||
out->overflow = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned new_size = roundup_pow_of_two(out->size + extra);
|
||||
|
||||
/*
|
||||
* Note: output buffer must be freeable with kfree(), it's not required
|
||||
* that the user use printbuf_exit().
|
||||
*/
|
||||
buf = krealloc(out->buf, new_size, !out->atomic ? GFP_KERNEL : GFP_NOWAIT);
|
||||
char *buf = krealloc(out->buf, new_size, !out->atomic ? GFP_KERNEL : GFP_NOWAIT);
|
||||
|
||||
if (!buf) {
|
||||
out->allocation_failure = true;
|
||||
out->overflow = true;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -47,6 +62,92 @@ int bch2_printbuf_make_room(struct printbuf *out, unsigned extra)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void printbuf_advance_pos(struct printbuf *out, unsigned len)
|
||||
{
|
||||
out->pos += min(len, printbuf_remaining(out));
|
||||
}
|
||||
|
||||
static void printbuf_insert_spaces(struct printbuf *out, unsigned pos, unsigned nr)
|
||||
{
|
||||
unsigned move = out->pos - pos;
|
||||
|
||||
bch2_printbuf_make_room(out, nr);
|
||||
|
||||
if (pos + nr < out->size)
|
||||
memmove(out->buf + pos + nr,
|
||||
out->buf + pos,
|
||||
min(move, out->size - 1 - pos - nr));
|
||||
|
||||
if (pos < out->size)
|
||||
memset(out->buf + pos, ' ', min(nr, out->size - pos));
|
||||
|
||||
printbuf_advance_pos(out, nr);
|
||||
printbuf_nul_terminate_reserved(out);
|
||||
}
|
||||
|
||||
static void __printbuf_do_indent(struct printbuf *out, unsigned pos)
|
||||
{
|
||||
while (true) {
|
||||
int pad;
|
||||
unsigned len = out->pos - pos;
|
||||
char *p = out->buf + pos;
|
||||
char *n = memscan(p, '\n', len);
|
||||
if (cur_tabstop(out)) {
|
||||
n = min(n, (char *) memscan(p, '\r', len));
|
||||
n = min(n, (char *) memscan(p, '\t', len));
|
||||
}
|
||||
|
||||
pos = n - out->buf;
|
||||
if (pos == out->pos)
|
||||
break;
|
||||
|
||||
switch (*n) {
|
||||
case '\n':
|
||||
pos++;
|
||||
out->last_newline = pos;
|
||||
|
||||
printbuf_insert_spaces(out, pos, out->indent);
|
||||
|
||||
pos = min(pos + out->indent, out->pos);
|
||||
out->last_field = pos;
|
||||
out->cur_tabstop = 0;
|
||||
break;
|
||||
case '\r':
|
||||
memmove(n, n + 1, out->pos - pos);
|
||||
--out->pos;
|
||||
pad = (int) cur_tabstop(out) - (int) __printbuf_linelen(out, pos);
|
||||
if (pad > 0) {
|
||||
printbuf_insert_spaces(out, out->last_field, pad);
|
||||
pos += pad;
|
||||
}
|
||||
|
||||
out->last_field = pos;
|
||||
out->cur_tabstop++;
|
||||
break;
|
||||
case '\t':
|
||||
pad = (int) cur_tabstop(out) - (int) __printbuf_linelen(out, pos) - 1;
|
||||
if (pad > 0) {
|
||||
*n = ' ';
|
||||
printbuf_insert_spaces(out, pos, pad - 1);
|
||||
pos += pad;
|
||||
} else {
|
||||
memmove(n, n + 1, out->pos - pos);
|
||||
--out->pos;
|
||||
}
|
||||
|
||||
out->last_field = pos;
|
||||
out->cur_tabstop++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void printbuf_do_indent(struct printbuf *out, unsigned pos)
|
||||
{
|
||||
if (out->has_indent_or_tabstops && !out->suppress_indent_tabstop_handling)
|
||||
__printbuf_do_indent(out, pos);
|
||||
}
|
||||
|
||||
void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list args)
|
||||
{
|
||||
int len;
|
||||
@ -55,14 +156,14 @@ void bch2_prt_vprintf(struct printbuf *out, const char *fmt, va_list args)
|
||||
va_list args2;
|
||||
|
||||
va_copy(args2, args);
|
||||
len = vsnprintf(out->buf + out->pos, printbuf_remaining(out), fmt, args2);
|
||||
len = vsnprintf(out->buf + out->pos, printbuf_remaining_size(out), fmt, args2);
|
||||
va_end(args2);
|
||||
} while (len + 1 >= printbuf_remaining(out) &&
|
||||
!bch2_printbuf_make_room(out, len + 1));
|
||||
} while (len > printbuf_remaining(out) &&
|
||||
!bch2_printbuf_make_room(out, len));
|
||||
|
||||
len = min_t(size_t, len,
|
||||
printbuf_remaining(out) ? printbuf_remaining(out) - 1 : 0);
|
||||
out->pos += len;
|
||||
unsigned indent_pos = out->pos;
|
||||
printbuf_advance_pos(out, len);
|
||||
printbuf_do_indent(out, indent_pos);
|
||||
}
|
||||
|
||||
void bch2_prt_printf(struct printbuf *out, const char *fmt, ...)
|
||||
@ -72,14 +173,14 @@ void bch2_prt_printf(struct printbuf *out, const char *fmt, ...)
|
||||
|
||||
do {
|
||||
va_start(args, fmt);
|
||||
len = vsnprintf(out->buf + out->pos, printbuf_remaining(out), fmt, args);
|
||||
len = vsnprintf(out->buf + out->pos, printbuf_remaining_size(out), fmt, args);
|
||||
va_end(args);
|
||||
} while (len + 1 >= printbuf_remaining(out) &&
|
||||
!bch2_printbuf_make_room(out, len + 1));
|
||||
} while (len > printbuf_remaining(out) &&
|
||||
!bch2_printbuf_make_room(out, len));
|
||||
|
||||
len = min_t(size_t, len,
|
||||
printbuf_remaining(out) ? printbuf_remaining(out) - 1 : 0);
|
||||
out->pos += len;
|
||||
unsigned indent_pos = out->pos;
|
||||
printbuf_advance_pos(out, len);
|
||||
printbuf_do_indent(out, indent_pos);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -194,33 +295,20 @@ void bch2_printbuf_indent_sub(struct printbuf *buf, unsigned spaces)
|
||||
|
||||
void bch2_prt_newline(struct printbuf *buf)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
bch2_printbuf_make_room(buf, 1 + buf->indent);
|
||||
|
||||
__prt_char(buf, '\n');
|
||||
__prt_char_reserved(buf, '\n');
|
||||
|
||||
buf->last_newline = buf->pos;
|
||||
|
||||
for (i = 0; i < buf->indent; i++)
|
||||
__prt_char(buf, ' ');
|
||||
__prt_chars_reserved(buf, ' ', buf->indent);
|
||||
|
||||
printbuf_nul_terminate(buf);
|
||||
printbuf_nul_terminate_reserved(buf);
|
||||
|
||||
buf->last_field = buf->pos;
|
||||
buf->cur_tabstop = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns spaces from start of line, if set, or 0 if unset:
|
||||
*/
|
||||
static inline unsigned cur_tabstop(struct printbuf *buf)
|
||||
{
|
||||
return buf->cur_tabstop < buf->nr_tabstops
|
||||
? buf->_tabstops[buf->cur_tabstop]
|
||||
: 0;
|
||||
}
|
||||
|
||||
static void __prt_tab(struct printbuf *out)
|
||||
{
|
||||
int spaces = max_t(int, 0, cur_tabstop(out) - printbuf_linelen(out));
|
||||
@ -247,24 +335,9 @@ void bch2_prt_tab(struct printbuf *out)
|
||||
|
||||
static void __prt_tab_rjust(struct printbuf *buf)
|
||||
{
|
||||
unsigned move = buf->pos - buf->last_field;
|
||||
int pad = (int) cur_tabstop(buf) - (int) printbuf_linelen(buf);
|
||||
|
||||
if (pad > 0) {
|
||||
bch2_printbuf_make_room(buf, pad);
|
||||
|
||||
if (buf->last_field + pad < buf->size)
|
||||
memmove(buf->buf + buf->last_field + pad,
|
||||
buf->buf + buf->last_field,
|
||||
min(move, buf->size - 1 - buf->last_field - pad));
|
||||
|
||||
if (buf->last_field < buf->size)
|
||||
memset(buf->buf + buf->last_field, ' ',
|
||||
min((unsigned) pad, buf->size - buf->last_field));
|
||||
|
||||
buf->pos += pad;
|
||||
printbuf_nul_terminate(buf);
|
||||
}
|
||||
if (pad > 0)
|
||||
printbuf_insert_spaces(buf, buf->last_field, pad);
|
||||
|
||||
buf->last_field = buf->pos;
|
||||
buf->cur_tabstop++;
|
||||
@ -301,41 +374,9 @@ void bch2_prt_tab_rjust(struct printbuf *buf)
|
||||
*/
|
||||
void bch2_prt_bytes_indented(struct printbuf *out, const char *str, unsigned count)
|
||||
{
|
||||
const char *unprinted_start = str;
|
||||
const char *end = str + count;
|
||||
|
||||
if (!out->has_indent_or_tabstops || out->suppress_indent_tabstop_handling) {
|
||||
prt_bytes(out, str, count);
|
||||
return;
|
||||
}
|
||||
|
||||
while (str != end) {
|
||||
switch (*str) {
|
||||
case '\n':
|
||||
prt_bytes(out, unprinted_start, str - unprinted_start);
|
||||
unprinted_start = str + 1;
|
||||
bch2_prt_newline(out);
|
||||
break;
|
||||
case '\t':
|
||||
if (likely(cur_tabstop(out))) {
|
||||
prt_bytes(out, unprinted_start, str - unprinted_start);
|
||||
unprinted_start = str + 1;
|
||||
__prt_tab(out);
|
||||
}
|
||||
break;
|
||||
case '\r':
|
||||
if (likely(cur_tabstop(out))) {
|
||||
prt_bytes(out, unprinted_start, str - unprinted_start);
|
||||
unprinted_start = str + 1;
|
||||
__prt_tab_rjust(out);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
str++;
|
||||
}
|
||||
|
||||
prt_bytes(out, unprinted_start, str - unprinted_start);
|
||||
unsigned indent_pos = out->pos;
|
||||
prt_bytes(out, str, count);
|
||||
printbuf_do_indent(out, indent_pos);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -348,9 +389,10 @@ void bch2_prt_bytes_indented(struct printbuf *out, const char *str, unsigned cou
|
||||
void bch2_prt_human_readable_u64(struct printbuf *out, u64 v)
|
||||
{
|
||||
bch2_printbuf_make_room(out, 10);
|
||||
out->pos += string_get_size(v, 1, !out->si_units,
|
||||
out->buf + out->pos,
|
||||
printbuf_remaining_size(out));
|
||||
unsigned len = string_get_size(v, 1, !out->si_units,
|
||||
out->buf + out->pos,
|
||||
printbuf_remaining_size(out));
|
||||
printbuf_advance_pos(out, len);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -402,9 +444,7 @@ void bch2_prt_string_option(struct printbuf *out,
|
||||
const char * const list[],
|
||||
size_t selected)
|
||||
{
|
||||
size_t i;
|
||||
|
||||
for (i = 0; list[i]; i++)
|
||||
for (size_t i = 0; list[i]; i++)
|
||||
bch2_prt_printf(out, i == selected ? "[%s] " : "%s ", list[i]);
|
||||
}
|
||||
|
||||
|
@ -86,6 +86,7 @@ struct printbuf {
|
||||
u8 atomic;
|
||||
bool allocation_failure:1;
|
||||
bool heap_allocated:1;
|
||||
bool overflow:1;
|
||||
enum printbuf_si si_units:1;
|
||||
bool human_readable_units:1;
|
||||
bool has_indent_or_tabstops:1;
|
||||
@ -142,7 +143,9 @@ void bch2_prt_bitflags_vector(struct printbuf *, const char * const[],
|
||||
*/
|
||||
static inline unsigned printbuf_remaining_size(struct printbuf *out)
|
||||
{
|
||||
return out->pos < out->size ? out->size - out->pos : 0;
|
||||
if (WARN_ON(out->size && out->pos >= out->size))
|
||||
out->pos = out->size - 1;
|
||||
return out->size - out->pos;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -151,7 +154,7 @@ static inline unsigned printbuf_remaining_size(struct printbuf *out)
|
||||
*/
|
||||
static inline unsigned printbuf_remaining(struct printbuf *out)
|
||||
{
|
||||
return out->pos < out->size ? out->size - out->pos - 1 : 0;
|
||||
return out->size ? printbuf_remaining_size(out) - 1 : 0;
|
||||
}
|
||||
|
||||
static inline unsigned printbuf_written(struct printbuf *out)
|
||||
@ -159,30 +162,25 @@ static inline unsigned printbuf_written(struct printbuf *out)
|
||||
return out->size ? min(out->pos, out->size - 1) : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if output was truncated:
|
||||
*/
|
||||
static inline bool printbuf_overflowed(struct printbuf *out)
|
||||
static inline void printbuf_nul_terminate_reserved(struct printbuf *out)
|
||||
{
|
||||
return out->pos >= out->size;
|
||||
if (WARN_ON(out->size && out->pos >= out->size))
|
||||
out->pos = out->size - 1;
|
||||
if (out->size)
|
||||
out->buf[out->pos] = 0;
|
||||
}
|
||||
|
||||
static inline void printbuf_nul_terminate(struct printbuf *out)
|
||||
{
|
||||
bch2_printbuf_make_room(out, 1);
|
||||
|
||||
if (out->pos < out->size)
|
||||
out->buf[out->pos] = 0;
|
||||
else if (out->size)
|
||||
out->buf[out->size - 1] = 0;
|
||||
printbuf_nul_terminate_reserved(out);
|
||||
}
|
||||
|
||||
/* Doesn't call bch2_printbuf_make_room(), doesn't nul terminate: */
|
||||
static inline void __prt_char_reserved(struct printbuf *out, char c)
|
||||
{
|
||||
if (printbuf_remaining(out))
|
||||
out->buf[out->pos] = c;
|
||||
out->pos++;
|
||||
out->buf[out->pos++] = c;
|
||||
}
|
||||
|
||||
/* Doesn't nul terminate: */
|
||||
@ -194,37 +192,34 @@ static inline void __prt_char(struct printbuf *out, char c)
|
||||
|
||||
static inline void prt_char(struct printbuf *out, char c)
|
||||
{
|
||||
__prt_char(out, c);
|
||||
printbuf_nul_terminate(out);
|
||||
bch2_printbuf_make_room(out, 2);
|
||||
__prt_char_reserved(out, c);
|
||||
printbuf_nul_terminate_reserved(out);
|
||||
}
|
||||
|
||||
static inline void __prt_chars_reserved(struct printbuf *out, char c, unsigned n)
|
||||
{
|
||||
unsigned i, can_print = min(n, printbuf_remaining(out));
|
||||
unsigned can_print = min(n, printbuf_remaining(out));
|
||||
|
||||
for (i = 0; i < can_print; i++)
|
||||
for (unsigned i = 0; i < can_print; i++)
|
||||
out->buf[out->pos++] = c;
|
||||
out->pos += n - can_print;
|
||||
}
|
||||
|
||||
static inline void prt_chars(struct printbuf *out, char c, unsigned n)
|
||||
{
|
||||
bch2_printbuf_make_room(out, n);
|
||||
__prt_chars_reserved(out, c, n);
|
||||
printbuf_nul_terminate(out);
|
||||
printbuf_nul_terminate_reserved(out);
|
||||
}
|
||||
|
||||
static inline void prt_bytes(struct printbuf *out, const void *b, unsigned n)
|
||||
{
|
||||
unsigned i, can_print;
|
||||
|
||||
bch2_printbuf_make_room(out, n);
|
||||
|
||||
can_print = min(n, printbuf_remaining(out));
|
||||
unsigned can_print = min(n, printbuf_remaining(out));
|
||||
|
||||
for (i = 0; i < can_print; i++)
|
||||
for (unsigned i = 0; i < can_print; i++)
|
||||
out->buf[out->pos++] = ((char *) b)[i];
|
||||
out->pos += n - can_print;
|
||||
|
||||
printbuf_nul_terminate(out);
|
||||
}
|
||||
@ -241,18 +236,18 @@ static inline void prt_str_indented(struct printbuf *out, const char *str)
|
||||
|
||||
static inline void prt_hex_byte(struct printbuf *out, u8 byte)
|
||||
{
|
||||
bch2_printbuf_make_room(out, 2);
|
||||
bch2_printbuf_make_room(out, 3);
|
||||
__prt_char_reserved(out, hex_asc_hi(byte));
|
||||
__prt_char_reserved(out, hex_asc_lo(byte));
|
||||
printbuf_nul_terminate(out);
|
||||
printbuf_nul_terminate_reserved(out);
|
||||
}
|
||||
|
||||
static inline void prt_hex_byte_upper(struct printbuf *out, u8 byte)
|
||||
{
|
||||
bch2_printbuf_make_room(out, 2);
|
||||
bch2_printbuf_make_room(out, 3);
|
||||
__prt_char_reserved(out, hex_asc_upper_hi(byte));
|
||||
__prt_char_reserved(out, hex_asc_upper_lo(byte));
|
||||
printbuf_nul_terminate(out);
|
||||
printbuf_nul_terminate_reserved(out);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -97,45 +97,14 @@ static void qc_info_to_text(struct printbuf *out, struct qc_info *i)
|
||||
printbuf_tabstops_reset(out);
|
||||
printbuf_tabstop_push(out, 20);
|
||||
|
||||
prt_str(out, "i_fieldmask");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%x", i->i_fieldmask);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "i_flags");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", i->i_flags);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "i_spc_timelimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", i->i_spc_timelimit);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "i_ino_timelimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", i->i_ino_timelimit);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "i_rt_spc_timelimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", i->i_rt_spc_timelimit);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "i_spc_warnlimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", i->i_spc_warnlimit);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "i_ino_warnlimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", i->i_ino_warnlimit);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "i_rt_spc_warnlimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", i->i_rt_spc_warnlimit);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "i_fieldmask\t%x\n", i->i_fieldmask);
|
||||
prt_printf(out, "i_flags\t%u\n", i->i_flags);
|
||||
prt_printf(out, "i_spc_timelimit\t%u\n", i->i_spc_timelimit);
|
||||
prt_printf(out, "i_ino_timelimit\t%u\n", i->i_ino_timelimit);
|
||||
prt_printf(out, "i_rt_spc_timelimit\t%u\n", i->i_rt_spc_timelimit);
|
||||
prt_printf(out, "i_spc_warnlimit\t%u\n", i->i_spc_warnlimit);
|
||||
prt_printf(out, "i_ino_warnlimit\t%u\n", i->i_ino_warnlimit);
|
||||
prt_printf(out, "i_rt_spc_warnlimit\t%u\n", i->i_rt_spc_warnlimit);
|
||||
}
|
||||
|
||||
static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
|
||||
@ -143,60 +112,17 @@ static void qc_dqblk_to_text(struct printbuf *out, struct qc_dqblk *q)
|
||||
printbuf_tabstops_reset(out);
|
||||
printbuf_tabstop_push(out, 20);
|
||||
|
||||
prt_str(out, "d_fieldmask");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%x", q->d_fieldmask);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_spc_hardlimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", q->d_spc_hardlimit);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_spc_softlimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", q->d_spc_softlimit);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_ino_hardlimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", q->d_ino_hardlimit);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_ino_softlimit");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", q->d_ino_softlimit);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_space");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", q->d_space);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_ino_count");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", q->d_ino_count);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_ino_timer");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", q->d_ino_timer);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_spc_timer");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", q->d_spc_timer);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_ino_warns");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%i", q->d_ino_warns);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "d_spc_warns");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%i", q->d_spc_warns);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "d_fieldmask\t%x\n", q->d_fieldmask);
|
||||
prt_printf(out, "d_spc_hardlimit\t%llu\n", q->d_spc_hardlimit);
|
||||
prt_printf(out, "d_spc_softlimit\t%llu\n", q->d_spc_softlimit);
|
||||
prt_printf(out, "d_ino_hardlimit\%llu\n", q->d_ino_hardlimit);
|
||||
prt_printf(out, "d_ino_softlimit\t%llu\n", q->d_ino_softlimit);
|
||||
prt_printf(out, "d_space\t%llu\n", q->d_space);
|
||||
prt_printf(out, "d_ino_count\t%llu\n", q->d_ino_count);
|
||||
prt_printf(out, "d_ino_timer\t%llu\n", q->d_ino_timer);
|
||||
prt_printf(out, "d_spc_timer\t%llu\n", q->d_spc_timer);
|
||||
prt_printf(out, "d_ino_warns\t%i\n", q->d_ino_warns);
|
||||
prt_printf(out, "d_spc_warns\t%i\n", q->d_spc_warns);
|
||||
}
|
||||
|
||||
static inline unsigned __next_qtype(unsigned i, unsigned qtypes)
|
||||
@ -612,10 +538,10 @@ int bch2_fs_quota_read(struct bch_fs *c)
|
||||
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key(trans, iter, BTREE_ID_quotas, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
__bch2_quota_set(c, k, NULL)) ?:
|
||||
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
bch2_fs_quota_read_inode(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
@ -902,7 +828,7 @@ static int bch2_set_quota_trans(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_quotas, new_quota->k.p,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
BTREE_ITER_slots|BTREE_ITER_intent);
|
||||
ret = bkey_err(k);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
@ -42,7 +42,7 @@ static int __bch2_set_rebalance_needs_scan(struct btree_trans *trans, u64 inum)
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
|
||||
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -89,7 +89,7 @@ static int bch2_clear_rebalance_needs_scan(struct btree_trans *trans, u64 inum,
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_rebalance_work,
|
||||
SPOS(inum, REBALANCE_WORK_SCAN_OFFSET, U32_MAX),
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -140,7 +140,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
|
||||
bch2_trans_iter_init(trans, extent_iter,
|
||||
work_pos.inode ? BTREE_ID_extents : BTREE_ID_reflink,
|
||||
work_pos,
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_all_snapshots);
|
||||
k = bch2_btree_iter_peek_slot(extent_iter);
|
||||
if (bkey_err(k))
|
||||
return k;
|
||||
@ -323,12 +323,14 @@ static int do_rebalance(struct moving_context *ctxt)
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
bch2_move_stats_init(&r->work_stats, "rebalance_work");
|
||||
bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
|
||||
|
||||
bch2_trans_iter_init(trans, &rebalance_work_iter,
|
||||
BTREE_ID_rebalance_work, POS_MIN,
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_all_snapshots);
|
||||
|
||||
while (!bch2_move_ratelimit(ctxt)) {
|
||||
if (!r->enabled) {
|
||||
|
@ -65,9 +65,20 @@ static void bch2_reconstruct_alloc(struct bch_fs *c)
|
||||
__set_bit_le64(BCH_FSCK_ERR_ptr_to_missing_alloc_key, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_ptr_gen_newer_than_bucket_gen, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_stale_dirty_ptr, ext->errors_silent);
|
||||
|
||||
__set_bit_le64(BCH_FSCK_ERR_dev_usage_buckets_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_dev_usage_sectors_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_dev_usage_fragmented_wrong, ext->errors_silent);
|
||||
|
||||
__set_bit_le64(BCH_FSCK_ERR_fs_usage_btree_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_fs_usage_cached_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_fs_usage_persistent_reserved_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_fs_usage_replicas_wrong, ext->errors_silent);
|
||||
|
||||
__set_bit_le64(BCH_FSCK_ERR_alloc_key_data_type_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_alloc_key_gen_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_alloc_key_cached_sectors_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_alloc_key_stripe_redundancy_wrong, ext->errors_silent);
|
||||
__set_bit_le64(BCH_FSCK_ERR_need_discard_key_wrong, ext->errors_silent);
|
||||
@ -125,9 +136,9 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
|
||||
{
|
||||
struct btree_iter iter;
|
||||
unsigned iter_flags =
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_NOT_EXTENTS;
|
||||
unsigned update_flags = BTREE_TRIGGER_NORUN;
|
||||
BTREE_ITER_intent|
|
||||
BTREE_ITER_not_extents;
|
||||
unsigned update_flags = BTREE_TRIGGER_norun;
|
||||
int ret;
|
||||
|
||||
if (k->overwritten)
|
||||
@ -136,17 +147,17 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
|
||||
trans->journal_res.seq = k->journal_seq;
|
||||
|
||||
/*
|
||||
* BTREE_UPDATE_KEY_CACHE_RECLAIM disables key cache lookup/update to
|
||||
* BTREE_UPDATE_key_cache_reclaim disables key cache lookup/update to
|
||||
* keep the key cache coherent with the underlying btree. Nothing
|
||||
* besides the allocator is doing updates yet so we don't need key cache
|
||||
* coherency for non-alloc btrees, and key cache fills for snapshots
|
||||
* btrees use BTREE_ITER_FILTER_SNAPSHOTS, which isn't available until
|
||||
* btrees use BTREE_ITER_filter_snapshots, which isn't available until
|
||||
* the snapshots recovery pass runs.
|
||||
*/
|
||||
if (!k->level && k->btree_id == BTREE_ID_alloc)
|
||||
iter_flags |= BTREE_ITER_CACHED;
|
||||
iter_flags |= BTREE_ITER_cached;
|
||||
else
|
||||
update_flags |= BTREE_UPDATE_KEY_CACHE_RECLAIM;
|
||||
update_flags |= BTREE_UPDATE_key_cache_reclaim;
|
||||
|
||||
bch2_trans_node_iter_init(trans, &iter, k->btree_id, k->k->k.p,
|
||||
BTREE_MAX_DEPTH, k->level,
|
||||
@ -191,7 +202,7 @@ int bch2_journal_replay(struct bch_fs *c)
|
||||
struct journal *j = &c->journal;
|
||||
u64 start_seq = c->journal_replay_seq_start;
|
||||
u64 end_seq = c->journal_replay_seq_start;
|
||||
struct btree_trans *trans = bch2_trans_get(c);
|
||||
struct btree_trans *trans = NULL;
|
||||
bool immediate_flush = false;
|
||||
int ret = 0;
|
||||
|
||||
@ -205,6 +216,7 @@ int bch2_journal_replay(struct bch_fs *c)
|
||||
BUG_ON(!atomic_read(&keys->ref));
|
||||
|
||||
move_gap(keys, keys->nr);
|
||||
trans = bch2_trans_get(c);
|
||||
|
||||
/*
|
||||
* First, attempt to replay keys in sorted order. This is more
|
||||
@ -361,7 +373,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
|
||||
case BCH_JSET_ENTRY_dev_usage: {
|
||||
struct jset_entry_dev_usage *u =
|
||||
container_of(entry, struct jset_entry_dev_usage, entry);
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, le32_to_cpu(u->dev));
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, le32_to_cpu(u->dev));
|
||||
unsigned i, nr_types = jset_entry_dev_usage_nr_types(u);
|
||||
|
||||
for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
|
||||
@ -660,7 +672,7 @@ int bch2_fs_recovery(struct bch_fs *c)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!c->sb.clean || c->opts.fsck || c->opts.retain_recovery_info) {
|
||||
if (!c->sb.clean || c->opts.retain_recovery_info) {
|
||||
struct genradix_iter iter;
|
||||
struct journal_replay **i;
|
||||
|
||||
|
@ -26,11 +26,6 @@ const char * const bch2_recovery_passes[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static int bch2_check_allocations(struct bch_fs *c)
|
||||
{
|
||||
return bch2_gc(c, true, false);
|
||||
}
|
||||
|
||||
static int bch2_set_may_go_rw(struct bch_fs *c)
|
||||
{
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
|
@ -74,20 +74,20 @@ bool bch2_reflink_p_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
|
||||
}
|
||||
|
||||
static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
struct bkey_s_c_reflink_p p,
|
||||
u64 *idx, unsigned flags)
|
||||
struct bkey_s_c_reflink_p p, u64 *idx,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_i *k;
|
||||
__le64 *refcount;
|
||||
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
|
||||
int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_mut_noupdate(trans, &iter,
|
||||
BTREE_ID_reflink, POS(0, *idx),
|
||||
BTREE_ITER_WITH_UPDATES);
|
||||
BTREE_ITER_with_updates);
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -102,7 +102,7 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!*refcount && (flags & BTREE_TRIGGER_OVERWRITE)) {
|
||||
if (!*refcount && (flags & BTREE_TRIGGER_overwrite)) {
|
||||
bch2_bkey_val_to_text(&buf, c, p.s_c);
|
||||
bch2_trans_inconsistent(trans,
|
||||
"indirect extent refcount underflow at %llu while marking\n %s",
|
||||
@ -111,7 +111,7 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_INSERT) {
|
||||
if (flags & BTREE_TRIGGER_insert) {
|
||||
struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
|
||||
u64 pad;
|
||||
|
||||
@ -141,12 +141,13 @@ err:
|
||||
}
|
||||
|
||||
static s64 gc_trigger_reflink_p_segment(struct btree_trans *trans,
|
||||
struct bkey_s_c_reflink_p p,
|
||||
u64 *idx, unsigned flags, size_t r_idx)
|
||||
struct bkey_s_c_reflink_p p, u64 *idx,
|
||||
enum btree_iter_update_trigger_flags flags,
|
||||
size_t r_idx)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct reflink_gc *r;
|
||||
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
|
||||
int add = !(flags & BTREE_TRIGGER_overwrite) ? 1 : -1;
|
||||
u64 start = le64_to_cpu(p.v->idx);
|
||||
u64 end = le64_to_cpu(p.v->idx) + p.k->size;
|
||||
u64 next_idx = end + le32_to_cpu(p.v->back_pad);
|
||||
@ -189,7 +190,7 @@ not_found:
|
||||
set_bkey_val_u64s(&update->k, 0);
|
||||
}
|
||||
|
||||
ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_NORUN);
|
||||
ret = bch2_btree_insert_trans(trans, BTREE_ID_extents, update, BTREE_TRIGGER_norun);
|
||||
}
|
||||
|
||||
*idx = next_idx;
|
||||
@ -200,8 +201,8 @@ fsck_err:
|
||||
}
|
||||
|
||||
static int __trigger_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
enum btree_id btree_id, unsigned level, struct bkey_s_c k,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
|
||||
@ -210,12 +211,12 @@ static int __trigger_reflink_p(struct btree_trans *trans,
|
||||
u64 idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
|
||||
u64 end = le64_to_cpu(p.v->idx) + p.k->size + le32_to_cpu(p.v->back_pad);
|
||||
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
while (idx < end && !ret)
|
||||
ret = trans_trigger_reflink_p_segment(trans, p, &idx, flags);
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
if (flags & BTREE_TRIGGER_gc) {
|
||||
size_t l = 0, r = c->reflink_gc_nr;
|
||||
|
||||
while (l < r) {
|
||||
@ -238,10 +239,10 @@ int bch2_trigger_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
|
||||
(flags & BTREE_TRIGGER_INSERT)) {
|
||||
if ((flags & BTREE_TRIGGER_transactional) &&
|
||||
(flags & BTREE_TRIGGER_insert)) {
|
||||
struct bch_reflink_p *v = bkey_s_to_reflink_p(new).v;
|
||||
|
||||
v->front_pad = v->back_pad = 0;
|
||||
@ -283,21 +284,21 @@ bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
|
||||
|
||||
static inline void check_indirect_extent_deleting(struct bkey_s new, unsigned *flags)
|
||||
{
|
||||
if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
|
||||
if ((*flags & BTREE_TRIGGER_insert) && !*bkey_refcount(new)) {
|
||||
new.k->type = KEY_TYPE_deleted;
|
||||
new.k->size = 0;
|
||||
set_bkey_val_u64s(new.k, 0);
|
||||
*flags &= ~BTREE_TRIGGER_INSERT;
|
||||
*flags &= ~BTREE_TRIGGER_insert;
|
||||
}
|
||||
}
|
||||
|
||||
int bch2_trigger_reflink_v(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
if ((flags & BTREE_TRIGGER_TRANSACTIONAL) &&
|
||||
(flags & BTREE_TRIGGER_INSERT))
|
||||
if ((flags & BTREE_TRIGGER_transactional) &&
|
||||
(flags & BTREE_TRIGGER_insert))
|
||||
check_indirect_extent_deleting(new, &flags);
|
||||
|
||||
return bch2_trigger_extent(trans, btree_id, level, old, new, flags);
|
||||
@ -349,7 +350,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
|
||||
bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
|
||||
|
||||
bch2_trans_iter_init(trans, &reflink_iter, BTREE_ID_reflink, POS_MAX,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_prev(&reflink_iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -394,7 +395,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
|
||||
r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
|
||||
|
||||
ret = bch2_trans_update(trans, extent_iter, &r_p->k_i,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &reflink_iter);
|
||||
|
||||
@ -455,9 +456,9 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
goto err;
|
||||
|
||||
bch2_trans_iter_init(trans, &src_iter, BTREE_ID_extents, src_start,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
bch2_trans_iter_init(trans, &dst_iter, BTREE_ID_extents, dst_start,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
while ((ret == 0 ||
|
||||
bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
|
||||
@ -567,7 +568,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret2 = bch2_inode_peek(trans, &inode_iter, &inode_u,
|
||||
dst_inum, BTREE_ITER_INTENT);
|
||||
dst_inum, BTREE_ITER_intent);
|
||||
|
||||
if (!ret2 &&
|
||||
inode_u.bi_size < new_i_size) {
|
||||
|
@ -10,7 +10,8 @@ void bch2_reflink_p_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bkey_s_c);
|
||||
bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
|
||||
int bch2_trigger_reflink_p(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
#define bch2_bkey_ops_reflink_p ((struct bkey_ops) { \
|
||||
.key_invalid = bch2_reflink_p_invalid, \
|
||||
@ -25,7 +26,8 @@ int bch2_reflink_v_invalid(struct bch_fs *, struct bkey_s_c,
|
||||
void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bkey_s_c);
|
||||
int bch2_trigger_reflink_v(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s, unsigned);
|
||||
struct bkey_s_c, struct bkey_s,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
#define bch2_bkey_ops_reflink_v ((struct bkey_ops) { \
|
||||
.key_invalid = bch2_reflink_v_invalid, \
|
||||
|
@ -84,7 +84,7 @@ int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < r->nr_devs; i++)
|
||||
if (!bch2_dev_exists(sb, r->devs[i])) {
|
||||
if (!bch2_member_exists(sb, r->devs[i])) {
|
||||
prt_printf(err, "invalid device %u in entry ", r->devs[i]);
|
||||
goto bad;
|
||||
}
|
||||
@ -200,7 +200,7 @@ cpu_replicas_add_entry(struct bch_fs *c,
|
||||
};
|
||||
|
||||
for (i = 0; i < new_entry->nr_devs; i++)
|
||||
BUG_ON(!bch2_dev_exists2(c, new_entry->devs[i]));
|
||||
BUG_ON(!bch2_dev_exists(c, new_entry->devs[i]));
|
||||
|
||||
BUG_ON(!new_entry->data_type);
|
||||
verify_replicas_entry(new_entry);
|
||||
@ -954,7 +954,7 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
|
||||
continue;
|
||||
|
||||
for (i = 0; i < e->nr_devs; i++) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, e->devs[i]);
|
||||
struct bch_dev *ca = bch2_dev_bkey_exists(c, e->devs[i]);
|
||||
|
||||
nr_online += test_bit(e->devs[i], devs.d);
|
||||
nr_failed += ca->mi.state == BCH_MEMBER_STATE_failed;
|
||||
|
@ -279,10 +279,8 @@ static void bch2_sb_clean_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
struct bch_sb_field_clean *clean = field_to_type(f, clean);
|
||||
struct jset_entry *entry;
|
||||
|
||||
prt_printf(out, "flags: %x", le32_to_cpu(clean->flags));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "journal_seq: %llu", le64_to_cpu(clean->journal_seq));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "flags: %x\n", le32_to_cpu(clean->flags));
|
||||
prt_printf(out, "journal_seq: %llu\n", le64_to_cpu(clean->journal_seq));
|
||||
|
||||
for (entry = clean->start;
|
||||
entry != vstruct_end(&clean->field);
|
||||
|
@ -31,19 +31,12 @@ static void bch2_sb_counters_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
struct bch_sb_field *f)
|
||||
{
|
||||
struct bch_sb_field_counters *ctrs = field_to_type(f, counters);
|
||||
unsigned int i;
|
||||
unsigned int nr = bch2_sb_counter_nr_entries(ctrs);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
if (i < BCH_COUNTER_NR)
|
||||
prt_printf(out, "%s ", bch2_counter_names[i]);
|
||||
else
|
||||
prt_printf(out, "(unknown)");
|
||||
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", le64_to_cpu(ctrs->d[i]));
|
||||
prt_newline(out);
|
||||
}
|
||||
for (unsigned i = 0; i < nr; i++)
|
||||
prt_printf(out, "%s\t%llu\n",
|
||||
i < BCH_COUNTER_NR ? bch2_counter_names[i] : "(unknown)",
|
||||
le64_to_cpu(ctrs->d[i]));
|
||||
};
|
||||
|
||||
int bch2_sb_counters_to_cpu(struct bch_fs *c)
|
||||
|
@ -164,19 +164,16 @@ static void bch2_sb_downgrade_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
printbuf_tabstop_push(out, 16);
|
||||
|
||||
for_each_downgrade_entry(e, i) {
|
||||
prt_str(out, "version:");
|
||||
prt_tab(out);
|
||||
prt_str(out, "version:\t");
|
||||
bch2_version_to_text(out, le16_to_cpu(i->version));
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "recovery passes:");
|
||||
prt_tab(out);
|
||||
prt_str(out, "recovery passes:\t");
|
||||
prt_bitflags(out, bch2_recovery_passes,
|
||||
bch2_recovery_passes_from_stable(le64_to_cpu(i->recovery_passes[0])));
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "errors:");
|
||||
prt_tab(out);
|
||||
prt_str(out, "errors:\t");
|
||||
bool first = true;
|
||||
for (unsigned j = 0; j < le16_to_cpu(i->nr_errors); j++) {
|
||||
if (!first)
|
||||
|
@ -271,7 +271,8 @@
|
||||
x(btree_root_unreadable_and_scan_found_nothing, 263) \
|
||||
x(snapshot_node_missing, 264) \
|
||||
x(dup_backpointer_to_bad_csum_extent, 265) \
|
||||
x(btree_bitmap_not_marked, 266)
|
||||
x(btree_bitmap_not_marked, 266) \
|
||||
x(btree_ptr_v2_written_0, 267)
|
||||
|
||||
enum bch_sb_error_id {
|
||||
#define x(t, n) BCH_FSCK_ERR_##t = n,
|
||||
|
@ -164,18 +164,14 @@ static void member_to_text(struct printbuf *out,
|
||||
u64 bucket_size = le16_to_cpu(m.bucket_size);
|
||||
u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size;
|
||||
|
||||
if (!bch2_member_exists(&m))
|
||||
if (!bch2_member_alive(&m))
|
||||
return;
|
||||
|
||||
prt_printf(out, "Device:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", i);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "Device:\t%u\n", i);
|
||||
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
prt_printf(out, "Label:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Label:\t");
|
||||
if (BCH_MEMBER_GROUP(&m)) {
|
||||
unsigned idx = BCH_MEMBER_GROUP(&m) - 1;
|
||||
|
||||
@ -189,96 +185,59 @@ static void member_to_text(struct printbuf *out,
|
||||
}
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "UUID:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "UUID:\t");
|
||||
pr_uuid(out, m.uuid.b);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Size:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Size:\t");
|
||||
prt_units_u64(out, device_size << 9);
|
||||
prt_newline(out);
|
||||
|
||||
for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
|
||||
prt_printf(out, "%s errors:", bch2_member_error_strs[i]);
|
||||
prt_tab(out);
|
||||
prt_u64(out, le64_to_cpu(m.errors[i]));
|
||||
prt_newline(out);
|
||||
}
|
||||
for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++)
|
||||
prt_printf(out, "%s errors:\t%llu\n", bch2_member_error_strs[i], le64_to_cpu(m.errors[i]));
|
||||
|
||||
for (unsigned i = 0; i < BCH_IOPS_NR; i++) {
|
||||
prt_printf(out, "%s iops:", bch2_iops_measurements[i]);
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", le32_to_cpu(m.iops[i]));
|
||||
prt_newline(out);
|
||||
}
|
||||
for (unsigned i = 0; i < BCH_IOPS_NR; i++)
|
||||
prt_printf(out, "%s iops:\t%u\n", bch2_iops_measurements[i], le32_to_cpu(m.iops[i]));
|
||||
|
||||
prt_printf(out, "Bucket size:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Bucket size:\t");
|
||||
prt_units_u64(out, bucket_size << 9);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "First bucket:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", le16_to_cpu(m.first_bucket));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "First bucket:\t%u\n", le16_to_cpu(m.first_bucket));
|
||||
prt_printf(out, "Buckets:\t%llu\n", le64_to_cpu(m.nbuckets));
|
||||
|
||||
prt_printf(out, "Buckets:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", le64_to_cpu(m.nbuckets));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Last mount:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Last mount:\t");
|
||||
if (m.last_mount)
|
||||
bch2_prt_datetime(out, le64_to_cpu(m.last_mount));
|
||||
else
|
||||
prt_printf(out, "(never)");
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Last superblock write:");
|
||||
prt_tab(out);
|
||||
prt_u64(out, le64_to_cpu(m.seq));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "Last superblock write:\t%llu\n", le64_to_cpu(m.seq));
|
||||
|
||||
prt_printf(out, "State:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%s",
|
||||
prt_printf(out, "State:\t%s\n",
|
||||
BCH_MEMBER_STATE(&m) < BCH_MEMBER_STATE_NR
|
||||
? bch2_member_states[BCH_MEMBER_STATE(&m)]
|
||||
: "unknown");
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Data allowed:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Data allowed:\t");
|
||||
if (BCH_MEMBER_DATA_ALLOWED(&m))
|
||||
prt_bitflags(out, __bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m));
|
||||
else
|
||||
prt_printf(out, "(none)");
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Has data:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Has data:\t");
|
||||
if (data_have)
|
||||
prt_bitflags(out, __bch2_data_types, data_have);
|
||||
else
|
||||
prt_printf(out, "(none)");
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "Durability:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", BCH_MEMBER_DURABILITY(&m) ? BCH_MEMBER_DURABILITY(&m) - 1 : 1);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "Durability:\t%llu\n", BCH_MEMBER_DURABILITY(&m) ? BCH_MEMBER_DURABILITY(&m) - 1 : 1);
|
||||
|
||||
prt_printf(out, "Discard:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", BCH_MEMBER_DISCARD(&m));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Freespace initialized:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", BCH_MEMBER_FREESPACE_INITIALIZED(&m));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "Discard:\t%llu\n", BCH_MEMBER_DISCARD(&m));
|
||||
prt_printf(out, "Freespace initialized:\t%llu\n", BCH_MEMBER_FREESPACE_INITIALIZED(&m));
|
||||
|
||||
printbuf_indent_sub(out, 2);
|
||||
}
|
||||
@ -390,12 +349,8 @@ void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
prt_newline(out);
|
||||
|
||||
printbuf_indent_add(out, 2);
|
||||
for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
|
||||
prt_printf(out, "%s:", bch2_member_error_strs[i]);
|
||||
prt_tab(out);
|
||||
prt_u64(out, atomic64_read(&ca->errors[i]));
|
||||
prt_newline(out);
|
||||
}
|
||||
for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++)
|
||||
prt_printf(out, "%s:\t%llu\n", bch2_member_error_strs[i], atomic64_read(&ca->errors[i]));
|
||||
printbuf_indent_sub(out, 2);
|
||||
|
||||
prt_str(out, "IO errors since ");
|
||||
@ -404,12 +359,9 @@ void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
prt_newline(out);
|
||||
|
||||
printbuf_indent_add(out, 2);
|
||||
for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) {
|
||||
prt_printf(out, "%s:", bch2_member_error_strs[i]);
|
||||
prt_tab(out);
|
||||
prt_u64(out, atomic64_read(&ca->errors[i]) - le64_to_cpu(m.errors_at_reset[i]));
|
||||
prt_newline(out);
|
||||
}
|
||||
for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++)
|
||||
prt_printf(out, "%s:\t%llu\n", bch2_member_error_strs[i],
|
||||
atomic64_read(&ca->errors[i]) - le64_to_cpu(m.errors_at_reset[i]));
|
||||
printbuf_indent_sub(out, 2);
|
||||
}
|
||||
|
||||
@ -438,7 +390,7 @@ void bch2_dev_errors_reset(struct bch_dev *ca)
|
||||
bool bch2_dev_btree_bitmap_marked(struct bch_fs *c, struct bkey_s_c k)
|
||||
{
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs_c(k), ptr)
|
||||
if (!bch2_dev_btree_bitmap_marked_sectors(bch_dev_bkey_exists(c, ptr->dev),
|
||||
if (!bch2_dev_btree_bitmap_marked_sectors(bch2_dev_bkey_exists(c, ptr->dev),
|
||||
ptr->offset, btree_sectors(c)))
|
||||
return false;
|
||||
return true;
|
||||
@ -463,6 +415,9 @@ static void __bch2_dev_btree_bitmap_mark(struct bch_sb_field_members_v2 *mi, uns
|
||||
m->btree_bitmap_shift += resize;
|
||||
}
|
||||
|
||||
BUG_ON(m->btree_bitmap_shift > 57);
|
||||
BUG_ON(end > 64ULL << m->btree_bitmap_shift);
|
||||
|
||||
for (unsigned bit = sectors >> m->btree_bitmap_shift;
|
||||
bit << m->btree_bitmap_shift < end;
|
||||
bit++)
|
||||
|
@ -158,26 +158,38 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
|
||||
#define for_each_readable_member(c, ca) \
|
||||
__for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro))
|
||||
|
||||
static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
return dev < c->sb.nr_devices && c->devs[dev];
|
||||
}
|
||||
|
||||
/*
|
||||
* If a key exists that references a device, the device won't be going away and
|
||||
* we can omit rcu_read_lock():
|
||||
*/
|
||||
static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
|
||||
static inline struct bch_dev *bch2_dev_bkey_exists(const struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
|
||||
EBUG_ON(!bch2_dev_exists(c, dev));
|
||||
|
||||
return rcu_dereference_check(c->devs[idx], 1);
|
||||
return rcu_dereference_check(c->devs[dev], 1);
|
||||
}
|
||||
|
||||
static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
|
||||
static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
|
||||
EBUG_ON(!bch2_dev_exists(c, dev));
|
||||
|
||||
return rcu_dereference_protected(c->devs[idx],
|
||||
return rcu_dereference_protected(c->devs[dev],
|
||||
lockdep_is_held(&c->sb_lock) ||
|
||||
lockdep_is_held(&c->state_lock));
|
||||
}
|
||||
|
||||
static inline struct bch_dev *bch2_dev_safe(struct bch_fs *c, unsigned dev)
|
||||
{
|
||||
return c && dev < c->sb.nr_devices
|
||||
? rcu_dereference(c->devs[dev])
|
||||
: NULL;
|
||||
}
|
||||
|
||||
/* XXX kill, move to struct bch_fs */
|
||||
static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
|
||||
{
|
||||
@ -192,16 +204,16 @@ static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
|
||||
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
|
||||
extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
|
||||
|
||||
static inline bool bch2_member_exists(struct bch_member *m)
|
||||
static inline bool bch2_member_alive(struct bch_member *m)
|
||||
{
|
||||
return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
|
||||
}
|
||||
|
||||
static inline bool bch2_dev_exists(struct bch_sb *sb, unsigned dev)
|
||||
static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev)
|
||||
{
|
||||
if (dev < sb->nr_devices) {
|
||||
struct bch_member m = bch2_sb_member_get(sb, dev);
|
||||
return bch2_member_exists(&m);
|
||||
return bch2_member_alive(&m);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
@ -210,6 +222,8 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
|
||||
{
|
||||
return (struct bch_member_cpu) {
|
||||
.nbuckets = le64_to_cpu(mi->nbuckets),
|
||||
.nbuckets_minus_first = le64_to_cpu(mi->nbuckets) -
|
||||
le16_to_cpu(mi->first_bucket),
|
||||
.first_bucket = le16_to_cpu(mi->first_bucket),
|
||||
.bucket_size = le16_to_cpu(mi->bucket_size),
|
||||
.group = BCH_MEMBER_GROUP(mi),
|
||||
@ -220,7 +234,7 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
|
||||
? BCH_MEMBER_DURABILITY(mi) - 1
|
||||
: 1,
|
||||
.freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
|
||||
.valid = bch2_member_exists(mi),
|
||||
.valid = bch2_member_alive(mi),
|
||||
.btree_bitmap_shift = mi->btree_bitmap_shift,
|
||||
.btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap),
|
||||
};
|
||||
|
21
libbcachefs/sb-members_types.h
Normal file
21
libbcachefs/sb-members_types.h
Normal file
@ -0,0 +1,21 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _BCACHEFS_SB_MEMBERS_TYPES_H
|
||||
#define _BCACHEFS_SB_MEMBERS_TYPES_H
|
||||
|
||||
struct bch_member_cpu {
|
||||
u64 nbuckets; /* device size */
|
||||
u64 nbuckets_minus_first;
|
||||
u16 first_bucket; /* index of first bucket used */
|
||||
u16 bucket_size; /* sectors */
|
||||
u16 group;
|
||||
u8 state;
|
||||
u8 discard;
|
||||
u8 data_allowed;
|
||||
u8 durability;
|
||||
u8 freespace_initialized;
|
||||
u8 valid;
|
||||
u8 btree_bitmap_shift;
|
||||
u64 btree_allocated_bitmap;
|
||||
};
|
||||
|
||||
#endif /* _BCACHEFS_SB_MEMBERS_H */
|
@ -49,7 +49,7 @@ int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
|
||||
struct bch_snapshot_tree *s)
|
||||
{
|
||||
int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
|
||||
BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
|
||||
BTREE_ITER_with_updates, snapshot_tree, s);
|
||||
|
||||
if (bch2_err_matches(ret, ENOENT))
|
||||
ret = -BCH_ERR_ENOENT_snapshot_tree;
|
||||
@ -361,7 +361,7 @@ int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
|
||||
struct bch_snapshot *s)
|
||||
{
|
||||
return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots, POS(0, id),
|
||||
BTREE_ITER_WITH_UPDATES, snapshot, s);
|
||||
BTREE_ITER_with_updates, snapshot, s);
|
||||
}
|
||||
|
||||
static int bch2_snapshot_live(struct btree_trans *trans, u32 id)
|
||||
@ -618,7 +618,7 @@ int bch2_check_snapshot_trees(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_snapshot_trees, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_snapshot_tree(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -695,7 +695,7 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans,
|
||||
|
||||
root = bch2_bkey_get_iter_typed(trans, &root_iter,
|
||||
BTREE_ID_snapshots, POS(0, root_id),
|
||||
BTREE_ITER_WITH_UPDATES, snapshot);
|
||||
BTREE_ITER_with_updates, snapshot);
|
||||
ret = bkey_err(root);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -886,7 +886,7 @@ int bch2_check_snapshots(struct bch_fs *c)
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_reverse_commit(trans, iter,
|
||||
BTREE_ID_snapshots, POS_MAX,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_snapshot(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -1001,7 +1001,7 @@ int bch2_reconstruct_snapshots(struct bch_fs *c)
|
||||
r.btree = btree;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, btree, POS_MIN,
|
||||
BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_PREFETCH, k, ({
|
||||
BTREE_ITER_all_snapshots|BTREE_ITER_prefetch, k, ({
|
||||
get_snapshot_trees(c, &r, k.k->p);
|
||||
}));
|
||||
if (ret)
|
||||
@ -1018,7 +1018,7 @@ int bch2_reconstruct_snapshots(struct bch_fs *c)
|
||||
darray_for_each(*t, id) {
|
||||
if (fsck_err_on(!bch2_snapshot_equiv(c, *id),
|
||||
c, snapshot_node_missing,
|
||||
"snapshot node %u from tree %s missing", *id, buf.buf)) {
|
||||
"snapshot node %u from tree %s missing, recreate?", *id, buf.buf)) {
|
||||
if (t->nr > 1) {
|
||||
bch_err(c, "cannot reconstruct snapshot trees with multiple nodes");
|
||||
ret = -BCH_ERR_fsck_repair_unimplemented;
|
||||
@ -1090,7 +1090,7 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
|
||||
int ret = 0;
|
||||
|
||||
s = bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_snapshots, POS(0, id),
|
||||
BTREE_ITER_INTENT, snapshot);
|
||||
BTREE_ITER_intent, snapshot);
|
||||
ret = bkey_err(s);
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
|
||||
"missing snapshot %u", id);
|
||||
@ -1199,7 +1199,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots,
|
||||
POS_MIN, BTREE_ITER_INTENT);
|
||||
POS_MIN, BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -1367,7 +1367,7 @@ static int snapshot_delete_key(struct btree_trans *trans,
|
||||
if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
|
||||
snapshot_list_has_id(equiv_seen, equiv)) {
|
||||
return bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
} else {
|
||||
return snapshot_list_add(c, equiv_seen, equiv);
|
||||
}
|
||||
@ -1404,15 +1404,15 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
|
||||
new->k.p.snapshot = equiv;
|
||||
|
||||
bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_cached|
|
||||
BTREE_ITER_intent);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&new_iter) ?:
|
||||
bch2_trans_update(trans, &new_iter, new,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
BTREE_UPDATE_internal_snapshot_node) ?:
|
||||
bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
bch2_trans_iter_exit(trans, &new_iter);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1603,12 +1603,12 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter,
|
||||
id, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
id, POS_MIN,
|
||||
BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
move_key_to_correct_snapshot(trans, &iter, k));
|
||||
|
||||
@ -1643,7 +1643,7 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
* nodes some depth fields will be off:
|
||||
*/
|
||||
ret = for_each_btree_key_commit(trans, iter, BTREE_ID_snapshots, POS_MIN,
|
||||
BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_intent, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_fix_child_of_deleted_snapshot(trans, &iter, k, &deleted_interior));
|
||||
if (ret)
|
||||
@ -1699,8 +1699,8 @@ int __bch2_key_has_snapshot_overwrites(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, id, pos,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
BTREE_ITER_not_extents|
|
||||
BTREE_ITER_all_snapshots);
|
||||
while (1) {
|
||||
k = bch2_btree_iter_prev(&iter);
|
||||
ret = bkey_err(k);
|
||||
@ -1752,7 +1752,7 @@ static int bch2_propagate_key_to_snapshot_leaf(struct btree_trans *trans,
|
||||
|
||||
pos.snapshot = leaf_id;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_INTENT);
|
||||
bch2_trans_iter_init(trans, &iter, btree, pos, BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
|
@ -77,7 +77,7 @@ static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
|
||||
return 0;
|
||||
|
||||
u32 parent = s->parent;
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBU) &&
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
|
||||
parent &&
|
||||
s->depth != snapshot_t(c, parent)->depth + 1)
|
||||
panic("id %u depth=%u parent %u depth=%u\n",
|
||||
|
@ -15,16 +15,6 @@
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/sha2.h>
|
||||
|
||||
typedef unsigned __bitwise bch_str_hash_flags_t;
|
||||
|
||||
enum bch_str_hash_flags {
|
||||
__BCH_HASH_SET_MUST_CREATE,
|
||||
__BCH_HASH_SET_MUST_REPLACE,
|
||||
};
|
||||
|
||||
#define BCH_HASH_SET_MUST_CREATE (__force bch_str_hash_flags_t) BIT(__BCH_HASH_SET_MUST_CREATE)
|
||||
#define BCH_HASH_SET_MUST_REPLACE (__force bch_str_hash_flags_t) BIT(__BCH_HASH_SET_MUST_REPLACE)
|
||||
|
||||
static inline enum bch_str_hash_type
|
||||
bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
|
||||
{
|
||||
@ -159,13 +149,14 @@ static inline bool is_visible_key(struct bch_hash_desc desc, subvol_inum inum, s
|
||||
desc.is_visible(inum, k));
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
static __always_inline struct bkey_s_c
|
||||
bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
const struct bch_hash_desc desc,
|
||||
const struct bch_hash_info *info,
|
||||
subvol_inum inum, const void *key,
|
||||
unsigned flags, u32 snapshot)
|
||||
enum btree_iter_update_trigger_flags flags,
|
||||
u32 snapshot)
|
||||
{
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
@ -173,10 +164,10 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
|
||||
for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
|
||||
SPOS(inum.inum, desc.hash_key(info, key), snapshot),
|
||||
POS(inum.inum, U64_MAX),
|
||||
BTREE_ITER_SLOTS|flags, k, ret) {
|
||||
BTREE_ITER_slots|flags, k, ret) {
|
||||
if (is_visible_key(desc, inum, k)) {
|
||||
if (!desc.cmp_key(k, key))
|
||||
return 0;
|
||||
return k;
|
||||
} else if (k.k->type == KEY_TYPE_hash_whiteout) {
|
||||
;
|
||||
} else {
|
||||
@ -186,20 +177,23 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
|
||||
}
|
||||
bch2_trans_iter_exit(trans, iter);
|
||||
|
||||
return ret ?: -BCH_ERR_ENOENT_str_hash_lookup;
|
||||
return bkey_s_c_err(ret ?: -BCH_ERR_ENOENT_str_hash_lookup);
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
static __always_inline struct bkey_s_c
|
||||
bch2_hash_lookup(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
const struct bch_hash_desc desc,
|
||||
const struct bch_hash_info *info,
|
||||
subvol_inum inum, const void *key,
|
||||
unsigned flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
u32 snapshot;
|
||||
return bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?:
|
||||
bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot);
|
||||
int ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
|
||||
if (ret)
|
||||
return bkey_s_c_err(ret);
|
||||
|
||||
return bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot);
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
@ -220,7 +214,7 @@ bch2_hash_hole(struct btree_trans *trans,
|
||||
for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
|
||||
SPOS(inum.inum, desc.hash_key(info, key), snapshot),
|
||||
POS(inum.inum, U64_MAX),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret)
|
||||
BTREE_ITER_slots|BTREE_ITER_intent, k, ret)
|
||||
if (!is_visible_key(desc, inum, k))
|
||||
return 0;
|
||||
bch2_trans_iter_exit(trans, iter);
|
||||
@ -242,7 +236,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
|
||||
|
||||
bch2_btree_iter_advance(&iter);
|
||||
|
||||
for_each_btree_key_continue_norestart(iter, BTREE_ITER_SLOTS, k, ret) {
|
||||
for_each_btree_key_continue_norestart(iter, BTREE_ITER_slots, k, ret) {
|
||||
if (k.k->type != desc.key_type &&
|
||||
k.k->type != KEY_TYPE_hash_whiteout)
|
||||
break;
|
||||
@ -264,8 +258,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
|
||||
const struct bch_hash_info *info,
|
||||
subvol_inum inum, u32 snapshot,
|
||||
struct bkey_i *insert,
|
||||
bch_str_hash_flags_t str_hash_flags,
|
||||
int update_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct btree_iter iter, slot = { NULL };
|
||||
struct bkey_s_c k;
|
||||
@ -277,7 +270,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
|
||||
desc.hash_bkey(info, bkey_i_to_s_c(insert)),
|
||||
snapshot),
|
||||
POS(insert->k.p.inode, U64_MAX),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
|
||||
if (is_visible_key(desc, inum, k)) {
|
||||
if (!desc.cmp_bkey(k, bkey_i_to_s_c(insert)))
|
||||
goto found;
|
||||
@ -286,8 +279,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!slot.path &&
|
||||
!(str_hash_flags & BCH_HASH_SET_MUST_REPLACE))
|
||||
if (!slot.path && !(flags & STR_HASH_must_replace))
|
||||
bch2_trans_copy_iter(&slot, &iter);
|
||||
|
||||
if (k.k->type != KEY_TYPE_hash_whiteout)
|
||||
@ -305,16 +297,16 @@ found:
|
||||
found = true;
|
||||
not_found:
|
||||
|
||||
if (!found && (str_hash_flags & BCH_HASH_SET_MUST_REPLACE)) {
|
||||
if (!found && (flags & STR_HASH_must_replace)) {
|
||||
ret = -BCH_ERR_ENOENT_str_hash_set_must_replace;
|
||||
} else if (found && (str_hash_flags & BCH_HASH_SET_MUST_CREATE)) {
|
||||
} else if (found && (flags & STR_HASH_must_create)) {
|
||||
ret = -EEXIST;
|
||||
} else {
|
||||
if (!found && slot.path)
|
||||
swap(iter, slot);
|
||||
|
||||
insert->k.p = iter.pos;
|
||||
ret = bch2_trans_update(trans, &iter, insert, update_flags);
|
||||
ret = bch2_trans_update(trans, &iter, insert, flags);
|
||||
}
|
||||
|
||||
goto out;
|
||||
@ -326,14 +318,14 @@ int bch2_hash_set(struct btree_trans *trans,
|
||||
const struct bch_hash_info *info,
|
||||
subvol_inum inum,
|
||||
struct bkey_i *insert,
|
||||
bch_str_hash_flags_t str_hash_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
insert->k.p.inode = inum.inum;
|
||||
|
||||
u32 snapshot;
|
||||
return bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?:
|
||||
bch2_hash_set_in_snapshot(trans, desc, info, inum,
|
||||
snapshot, insert, str_hash_flags, 0);
|
||||
snapshot, insert, flags);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@ -341,7 +333,7 @@ int bch2_hash_delete_at(struct btree_trans *trans,
|
||||
const struct bch_hash_desc desc,
|
||||
const struct bch_hash_info *info,
|
||||
struct btree_iter *iter,
|
||||
unsigned update_flags)
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bkey_i *delete;
|
||||
int ret;
|
||||
@ -359,7 +351,7 @@ int bch2_hash_delete_at(struct btree_trans *trans,
|
||||
delete->k.p = iter->pos;
|
||||
delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted;
|
||||
|
||||
return bch2_trans_update(trans, iter, delete, update_flags);
|
||||
return bch2_trans_update(trans, iter, delete, flags);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@ -369,14 +361,10 @@ int bch2_hash_delete(struct btree_trans *trans,
|
||||
subvol_inum inum, const void *key)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
int ret;
|
||||
|
||||
ret = bch2_hash_lookup(trans, &iter, desc, info, inum, key,
|
||||
BTREE_ITER_INTENT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = bch2_hash_delete_at(trans, desc, info, &iter, 0);
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, &iter, desc, info, inum, key,
|
||||
BTREE_ITER_intent);
|
||||
int ret = bkey_err(k) ?:
|
||||
bch2_hash_delete_at(trans, desc, info, &iter, 0);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ int bch2_check_subvols(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_subvol(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -198,7 +198,7 @@ int bch2_check_subvol_children(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_subvolume_children, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_subvolume_children, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_subvol_child(trans, &iter, k)));
|
||||
bch_err_fn(c, ret);
|
||||
@ -247,7 +247,7 @@ int bch2_subvolume_trigger(struct btree_trans *trans,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
unsigned flags)
|
||||
{
|
||||
if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
|
||||
if (flags & BTREE_TRIGGER_transactional) {
|
||||
struct bpos children_pos_old = subvolume_children_pos(old);
|
||||
struct bpos children_pos_new = subvolume_children_pos(new.s_c);
|
||||
|
||||
@ -333,7 +333,7 @@ int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvolid,
|
||||
|
||||
subvol = bch2_bkey_get_iter_typed(trans, &iter,
|
||||
BTREE_ID_subvolumes, POS(0, subvolid),
|
||||
BTREE_ITER_CACHED|BTREE_ITER_WITH_UPDATES,
|
||||
BTREE_ITER_cached|BTREE_ITER_with_updates,
|
||||
subvolume);
|
||||
ret = bkey_err(subvol);
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
|
||||
@ -383,9 +383,9 @@ static int bch2_subvolumes_reparent(struct btree_trans *trans, u32 subvolid_to_d
|
||||
|
||||
return lockrestart_do(trans,
|
||||
bch2_subvolume_get(trans, subvolid_to_delete, true,
|
||||
BTREE_ITER_CACHED, &s)) ?:
|
||||
BTREE_ITER_cached, &s)) ?:
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
BTREE_ID_subvolumes, POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_subvolume_reparent(trans, &iter, k,
|
||||
subvolid_to_delete, le32_to_cpu(s.creation_parent)));
|
||||
@ -404,7 +404,7 @@ static int __bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
|
||||
|
||||
subvol = bch2_bkey_get_iter_typed(trans, &iter,
|
||||
BTREE_ID_subvolumes, POS(0, subvolid),
|
||||
BTREE_ITER_CACHED|BTREE_ITER_INTENT,
|
||||
BTREE_ITER_cached|BTREE_ITER_intent,
|
||||
subvolume);
|
||||
ret = bkey_err(subvol);
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
|
||||
@ -505,7 +505,7 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
|
||||
|
||||
n = bch2_bkey_get_mut_typed(trans, &iter,
|
||||
BTREE_ID_subvolumes, POS(0, subvolid),
|
||||
BTREE_ITER_CACHED, subvolume);
|
||||
BTREE_ITER_cached, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (unlikely(ret)) {
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), trans->c,
|
||||
@ -547,7 +547,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
|
||||
|
||||
src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter,
|
||||
BTREE_ID_subvolumes, POS(0, src_subvolid),
|
||||
BTREE_ITER_CACHED, subvolume);
|
||||
BTREE_ITER_cached, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(src_subvol);
|
||||
if (unlikely(ret)) {
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
|
||||
|
@ -1150,8 +1150,7 @@ static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
{
|
||||
struct bch_sb_field_ext *e = field_to_type(f, ext);
|
||||
|
||||
prt_printf(out, "Recovery passes required:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Recovery passes required:\t");
|
||||
prt_bitflags(out, bch2_recovery_passes,
|
||||
bch2_recovery_passes_from_stable(le64_to_cpu(e->recovery_passes_required[0])));
|
||||
prt_newline(out);
|
||||
@ -1160,16 +1159,14 @@ static void bch2_sb_ext_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
if (errors_silent) {
|
||||
le_bitvector_to_cpu(errors_silent, (void *) e->errors_silent, sizeof(e->errors_silent) * 8);
|
||||
|
||||
prt_printf(out, "Errors to silently fix:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Errors to silently fix:\t");
|
||||
prt_bitflags_vector(out, bch2_sb_error_strs, errors_silent, sizeof(e->errors_silent) * 8);
|
||||
prt_newline(out);
|
||||
|
||||
kfree(errors_silent);
|
||||
}
|
||||
|
||||
prt_printf(out, "Btrees with missing data:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Btrees with missing data:\t");
|
||||
prt_bitflags(out, __bch2_btree_ids, le64_to_cpu(e->btrees_lost_data));
|
||||
prt_newline(out);
|
||||
}
|
||||
@ -1277,97 +1274,73 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
printbuf_tabstop_push(out, 44);
|
||||
|
||||
for (int i = 0; i < sb->nr_devices; i++)
|
||||
nr_devices += bch2_dev_exists(sb, i);
|
||||
nr_devices += bch2_member_exists(sb, i);
|
||||
|
||||
prt_printf(out, "External UUID:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "External UUID:\t");
|
||||
pr_uuid(out, sb->user_uuid.b);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Internal UUID:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Internal UUID:\t");
|
||||
pr_uuid(out, sb->uuid.b);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Magic number:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Magic number:\t");
|
||||
pr_uuid(out, sb->magic.b);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "Device index:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", sb->dev_idx);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "Device index:\t%u\n", sb->dev_idx);
|
||||
|
||||
prt_str(out, "Label:");
|
||||
prt_tab(out);
|
||||
prt_str(out, "Label:\t");
|
||||
prt_printf(out, "%.*s", (int) sizeof(sb->label), sb->label);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "Version:");
|
||||
prt_tab(out);
|
||||
prt_str(out, "Version:\t");
|
||||
bch2_version_to_text(out, le16_to_cpu(sb->version));
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "Version upgrade complete:");
|
||||
prt_tab(out);
|
||||
prt_str(out, "Version upgrade complete:\t");
|
||||
bch2_version_to_text(out, BCH_SB_VERSION_UPGRADE_COMPLETE(sb));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Oldest version on disk:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Oldest version on disk:\t");
|
||||
bch2_version_to_text(out, le16_to_cpu(sb->version_min));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Created:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Created:\t");
|
||||
if (sb->time_base_lo)
|
||||
bch2_prt_datetime(out, div_u64(le64_to_cpu(sb->time_base_lo), NSEC_PER_SEC));
|
||||
else
|
||||
prt_printf(out, "(not set)");
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Sequence number:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Sequence number:\t");
|
||||
prt_printf(out, "%llu", le64_to_cpu(sb->seq));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Time of last write:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Time of last write:\t");
|
||||
bch2_prt_datetime(out, le64_to_cpu(sb->write_time));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Superblock size:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Superblock size:\t");
|
||||
prt_units_u64(out, vstruct_bytes(sb));
|
||||
prt_str(out, "/");
|
||||
prt_units_u64(out, 512ULL << sb->layout.sb_max_size_bits);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Clean:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu", BCH_SB_CLEAN(sb));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "Clean:\t%llu\n", BCH_SB_CLEAN(sb));
|
||||
prt_printf(out, "Devices:\t%u\n", nr_devices);
|
||||
|
||||
prt_printf(out, "Devices:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%u", nr_devices);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Sections:");
|
||||
prt_printf(out, "Sections:\t");
|
||||
vstruct_for_each(sb, f)
|
||||
fields_have |= 1 << le32_to_cpu(f->type);
|
||||
prt_tab(out);
|
||||
prt_bitflags(out, bch2_sb_fields, fields_have);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Features:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Features:\t");
|
||||
prt_bitflags(out, bch2_sb_features, le64_to_cpu(sb->features[0]));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "Compat features:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "Compat features:\t");
|
||||
prt_bitflags(out, bch2_sb_compat, le64_to_cpu(sb->compat[0]));
|
||||
prt_newline(out);
|
||||
|
||||
@ -1384,8 +1357,7 @@ void bch2_sb_to_text(struct printbuf *out, struct bch_sb *sb,
|
||||
if (opt->get_sb != BCH2_NO_SB_OPT) {
|
||||
u64 v = bch2_opt_from_sb(sb, id);
|
||||
|
||||
prt_printf(out, "%s:", opt->attr.name);
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%s:\t", opt->attr.name);
|
||||
bch2_opt_to_text(out, NULL, sb, opt, v,
|
||||
OPT_HUMAN_READABLE|OPT_SHOW_FULL_LIST);
|
||||
prt_newline(out);
|
||||
|
@ -468,6 +468,7 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
* at least one non-flush write in the journal or recovery will fail:
|
||||
*/
|
||||
set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
|
||||
set_bit(JOURNAL_RUNNING, &c->journal.flags);
|
||||
|
||||
for_each_rw_member(c, ca)
|
||||
bch2_dev_allocator_add(c, ca);
|
||||
@ -571,6 +572,7 @@ static void __bch2_fs_free(struct bch_fs *c)
|
||||
BUG_ON(atomic_read(&c->journal_keys.ref));
|
||||
bch2_fs_btree_write_buffer_exit(c);
|
||||
percpu_free_rwsem(&c->mark_lock);
|
||||
EBUG_ON(percpu_u64_get(c->online_reserved));
|
||||
free_percpu(c->online_reserved);
|
||||
|
||||
darray_exit(&c->btree_roots_extra);
|
||||
@ -939,7 +941,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < c->sb.nr_devices; i++)
|
||||
if (bch2_dev_exists(c->disk_sb.sb, i) &&
|
||||
if (bch2_member_exists(c->disk_sb.sb, i) &&
|
||||
bch2_dev_alloc(c, i)) {
|
||||
ret = -EEXIST;
|
||||
goto err;
|
||||
@ -1100,7 +1102,7 @@ static int bch2_dev_in_fs(struct bch_sb_handle *fs,
|
||||
if (!uuid_equal(&fs->sb->uuid, &sb->sb->uuid))
|
||||
return -BCH_ERR_device_not_a_member_of_filesystem;
|
||||
|
||||
if (!bch2_dev_exists(fs->sb, sb->sb->dev_idx))
|
||||
if (!bch2_member_exists(fs->sb, sb->sb->dev_idx))
|
||||
return -BCH_ERR_device_has_been_removed;
|
||||
|
||||
if (fs->sb->block_size != sb->sb->block_size)
|
||||
@ -1410,10 +1412,9 @@ static int bch2_dev_attach_bdev(struct bch_fs *c, struct bch_sb_handle *sb)
|
||||
le64_to_cpu(c->disk_sb.sb->seq))
|
||||
bch2_sb_to_fs(c, sb->sb);
|
||||
|
||||
BUG_ON(sb->sb->dev_idx >= c->sb.nr_devices ||
|
||||
!c->devs[sb->sb->dev_idx]);
|
||||
BUG_ON(!bch2_dev_exists(c, sb->sb->dev_idx));
|
||||
|
||||
ca = bch_dev_locked(c, sb->sb->dev_idx);
|
||||
ca = bch2_dev_locked(c, sb->sb->dev_idx);
|
||||
|
||||
ret = __bch2_dev_attach_bdev(ca, sb);
|
||||
if (ret)
|
||||
@ -1505,10 +1506,10 @@ static bool bch2_fs_may_start(struct bch_fs *c)
|
||||
mutex_lock(&c->sb_lock);
|
||||
|
||||
for (i = 0; i < c->disk_sb.sb->nr_devices; i++) {
|
||||
if (!bch2_dev_exists(c->disk_sb.sb, i))
|
||||
if (!bch2_member_exists(c->disk_sb.sb, i))
|
||||
continue;
|
||||
|
||||
ca = bch_dev_locked(c, i);
|
||||
ca = bch2_dev_locked(c, i);
|
||||
|
||||
if (!bch2_dev_is_online(ca) &&
|
||||
(ca->mi.state == BCH_MEMBER_STATE_rw ||
|
||||
@ -1598,17 +1599,17 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
|
||||
* with bch2_do_invalidates() and bch2_do_discards()
|
||||
*/
|
||||
ret = bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL) ?:
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
|
||||
BTREE_TRIGGER_NORUN, NULL);
|
||||
BTREE_TRIGGER_norun, NULL);
|
||||
bch_err_msg(c, ret, "removing dev alloc info");
|
||||
return ret;
|
||||
}
|
||||
@ -1777,7 +1778,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
goto no_slot;
|
||||
|
||||
for (dev_idx = 0; dev_idx < BCH_SB_MEMBERS_MAX; dev_idx++)
|
||||
if (!bch2_dev_exists(c->disk_sb.sb, dev_idx))
|
||||
if (!bch2_member_exists(c->disk_sb.sb, dev_idx))
|
||||
goto have_slot;
|
||||
no_slot:
|
||||
ret = -BCH_ERR_ENOSPC_sb_members;
|
||||
@ -1820,7 +1821,7 @@ have_slot:
|
||||
|
||||
bch2_dev_usage_journal_reserve(c);
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
||||
bch_err_msg(ca, ret, "marking new superblock");
|
||||
if (ret)
|
||||
goto err_late;
|
||||
@ -1883,9 +1884,9 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ca = bch_dev_locked(c, dev_idx);
|
||||
ca = bch2_dev_locked(c, dev_idx);
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
||||
bch_err_msg(c, ret, "bringing %s online: error from bch2_trans_mark_dev_sb", path);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1971,7 +1972,7 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
|
@ -26,19 +26,4 @@ struct bch_devs_list {
|
||||
u8 data[BCH_BKEY_PTRS_MAX];
|
||||
};
|
||||
|
||||
struct bch_member_cpu {
|
||||
u64 nbuckets; /* device size */
|
||||
u16 first_bucket; /* index of first bucket used */
|
||||
u16 bucket_size; /* sectors */
|
||||
u16 group;
|
||||
u8 state;
|
||||
u8 discard;
|
||||
u8 data_allowed;
|
||||
u8 durability;
|
||||
u8 freespace_initialized;
|
||||
u8 valid;
|
||||
u8 btree_bitmap_shift;
|
||||
u64 btree_allocated_bitmap;
|
||||
};
|
||||
|
||||
#endif /* _BCACHEFS_SUPER_TYPES_H */
|
||||
|
@ -189,12 +189,8 @@ static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
{
|
||||
bch2_printbuf_tabstop_push(out, 24);
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++) {
|
||||
prt_str(out, bch2_write_refs[i]);
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%li", atomic_long_read(&c->writes[i]));
|
||||
prt_newline(out);
|
||||
}
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->writes); i++)
|
||||
prt_printf(out, "%s\t%li\n", bch2_write_refs[i], atomic_long_read(&c->writes[i]));
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -278,7 +274,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
|
||||
continue;
|
||||
|
||||
ret = for_each_btree_key(trans, iter, id, POS_MIN,
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
BTREE_ITER_all_snapshots, k, ({
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
struct bch_extent_crc_unpacked crc;
|
||||
const union bch_extent_entry *entry;
|
||||
@ -313,22 +309,11 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
prt_str(out, "type");
|
||||
printbuf_tabstop_push(out, 12);
|
||||
prt_tab(out);
|
||||
|
||||
prt_str(out, "compressed");
|
||||
printbuf_tabstop_push(out, 16);
|
||||
prt_tab_rjust(out);
|
||||
|
||||
prt_str(out, "uncompressed");
|
||||
printbuf_tabstop_push(out, 16);
|
||||
prt_tab_rjust(out);
|
||||
|
||||
prt_str(out, "average extent size");
|
||||
printbuf_tabstop_push(out, 24);
|
||||
prt_tab_rjust(out);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "type\tcompressed\runcompressed\raverage extent size\r\n");
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(s); i++) {
|
||||
bch2_prt_compression_type(out, i);
|
||||
@ -377,6 +362,34 @@ static void bch2_btree_wakeup_all(struct bch_fs *c)
|
||||
seqmutex_unlock(&c->btree_trans_lock);
|
||||
}
|
||||
|
||||
static void fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
{
|
||||
unsigned nr[BCH_DATA_NR];
|
||||
|
||||
memset(nr, 0, sizeof(nr));
|
||||
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
|
||||
nr[c->open_buckets[i].data_type]++;
|
||||
|
||||
printbuf_tabstop_push(out, 24);
|
||||
prt_printf(out, "hidden\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.hidden));
|
||||
prt_printf(out, "btree\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.btree));
|
||||
prt_printf(out, "data\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.data));
|
||||
prt_printf(out, "cached\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.cached));
|
||||
prt_printf(out, "reserved\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.reserved));
|
||||
prt_printf(out, "online_reserved\t%llu\n", percpu_u64_get(c->online_reserved));
|
||||
prt_printf(out, "nr_inodes\t%llu\n", bch2_fs_usage_read_one(c, &c->usage_base->b.nr_inodes));
|
||||
|
||||
prt_newline(out);
|
||||
prt_printf(out, "freelist_wait\t%s\n", c->freelist_wait.list.first ? "waiting" : "empty");
|
||||
prt_printf(out, "open buckets allocated\t%i\n", OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
|
||||
prt_printf(out, "open buckets total\t%u\n", OPEN_BUCKETS_COUNT);
|
||||
prt_printf(out, "open_buckets_wait\t%s\n", c->open_buckets_wait.list.first ? "waiting" : "empty");
|
||||
prt_printf(out, "open_buckets_btree\t%u\n", nr[BCH_DATA_btree]);
|
||||
prt_printf(out, "open_buckets_user\t%u\n", nr[BCH_DATA_user]);
|
||||
prt_printf(out, "btree reserve cache\t%u\n", c->btree_reserve_cache_nr);
|
||||
}
|
||||
|
||||
SHOW(bch2_fs)
|
||||
{
|
||||
struct bch_fs *c = container_of(kobj, struct bch_fs, kobj);
|
||||
@ -459,6 +472,9 @@ SHOW(bch2_fs)
|
||||
if (attr == &sysfs_disk_groups)
|
||||
bch2_disk_groups_to_text(out, c);
|
||||
|
||||
if (attr == &sysfs_alloc_debug)
|
||||
fs_alloc_debug_to_text(out, c);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -516,18 +532,8 @@ STORE(bch2_fs)
|
||||
if (attr == &sysfs_btree_wakeup)
|
||||
bch2_btree_wakeup_all(c);
|
||||
|
||||
if (attr == &sysfs_trigger_gc) {
|
||||
/*
|
||||
* Full gc is currently incompatible with btree key cache:
|
||||
*/
|
||||
#if 0
|
||||
down_read(&c->state_lock);
|
||||
bch2_gc(c, false, false);
|
||||
up_read(&c->state_lock);
|
||||
#else
|
||||
if (attr == &sysfs_trigger_gc)
|
||||
bch2_gc_gens(c);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (attr == &sysfs_trigger_discards)
|
||||
bch2_do_discards(c);
|
||||
@ -594,13 +600,11 @@ SHOW(bch2_fs_counters)
|
||||
if (attr == &sysfs_##t) { \
|
||||
counter = percpu_u64_get(&c->counters[BCH_COUNTER_##t]);\
|
||||
counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
|
||||
prt_printf(out, "since mount:"); \
|
||||
prt_tab(out); \
|
||||
prt_printf(out, "since mount:\t"); \
|
||||
prt_human_readable_u64(out, counter_since_mount); \
|
||||
prt_newline(out); \
|
||||
\
|
||||
prt_printf(out, "since filesystem creation:"); \
|
||||
prt_tab(out); \
|
||||
prt_printf(out, "since filesystem creation:\t"); \
|
||||
prt_human_readable_u64(out, counter); \
|
||||
prt_newline(out); \
|
||||
}
|
||||
@ -677,6 +681,7 @@ struct attribute *bch2_fs_internal_files[] = {
|
||||
&sysfs_internal_uuid,
|
||||
|
||||
&sysfs_disk_groups,
|
||||
&sysfs_alloc_debug,
|
||||
NULL
|
||||
};
|
||||
|
||||
@ -796,11 +801,11 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
{
|
||||
struct bch_fs *c = ca->fs;
|
||||
struct bch_dev_usage stats = bch2_dev_usage_read(ca);
|
||||
unsigned i, nr[BCH_DATA_NR];
|
||||
unsigned nr[BCH_DATA_NR];
|
||||
|
||||
memset(nr, 0, sizeof(nr));
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
|
||||
nr[c->open_buckets[i].data_type]++;
|
||||
|
||||
printbuf_tabstop_push(out, 8);
|
||||
@ -813,65 +818,24 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "reserves:");
|
||||
prt_newline(out);
|
||||
for (i = 0; i < BCH_WATERMARK_NR; i++) {
|
||||
prt_str(out, bch2_watermarks[i]);
|
||||
prt_tab(out);
|
||||
prt_u64(out, bch2_dev_buckets_reserved(ca, i));
|
||||
prt_tab_rjust(out);
|
||||
prt_newline(out);
|
||||
}
|
||||
prt_printf(out, "reserves:\n");
|
||||
for (unsigned i = 0; i < BCH_WATERMARK_NR; i++)
|
||||
prt_printf(out, "%s\t%llu\r\n", bch2_watermarks[i], bch2_dev_buckets_reserved(ca, i));
|
||||
|
||||
prt_newline(out);
|
||||
|
||||
printbuf_tabstops_reset(out);
|
||||
printbuf_tabstop_push(out, 24);
|
||||
|
||||
prt_str(out, "freelist_wait");
|
||||
prt_tab(out);
|
||||
prt_str(out, c->freelist_wait.list.first ? "waiting" : "empty");
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "open buckets allocated");
|
||||
prt_tab(out);
|
||||
prt_u64(out, OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "open buckets this dev");
|
||||
prt_tab(out);
|
||||
prt_u64(out, ca->nr_open_buckets);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "open buckets total");
|
||||
prt_tab(out);
|
||||
prt_u64(out, OPEN_BUCKETS_COUNT);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "open_buckets_wait");
|
||||
prt_tab(out);
|
||||
prt_str(out, c->open_buckets_wait.list.first ? "waiting" : "empty");
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "open_buckets_btree");
|
||||
prt_tab(out);
|
||||
prt_u64(out, nr[BCH_DATA_btree]);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "open_buckets_user");
|
||||
prt_tab(out);
|
||||
prt_u64(out, nr[BCH_DATA_user]);
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "buckets_to_invalidate");
|
||||
prt_tab(out);
|
||||
prt_u64(out, should_invalidate_buckets(ca, stats));
|
||||
prt_newline(out);
|
||||
|
||||
prt_str(out, "btree reserve cache");
|
||||
prt_tab(out);
|
||||
prt_u64(out, c->btree_reserve_cache_nr);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "freelist_wait\t%s\n", c->freelist_wait.list.first ? "waiting" : "empty");
|
||||
prt_printf(out, "open buckets allocated\t%i\n", OPEN_BUCKETS_COUNT - c->open_buckets_nr_free);
|
||||
prt_printf(out, "open buckets this dev\t%i\n", ca->nr_open_buckets);
|
||||
prt_printf(out, "open buckets total\t%u\n", OPEN_BUCKETS_COUNT);
|
||||
prt_printf(out, "open_buckets_wait\t%s\n", c->open_buckets_wait.list.first ? "waiting" : "empty");
|
||||
prt_printf(out, "open_buckets_btree\t%u\n", nr[BCH_DATA_btree]);
|
||||
prt_printf(out, "open_buckets_user\t%u\n", nr[BCH_DATA_user]);
|
||||
prt_printf(out, "buckets_to_invalidate\t%llu\n", should_invalidate_buckets(ca, stats));
|
||||
prt_printf(out, "btree reserve cache\t%u\n", c->btree_reserve_cache_nr);
|
||||
}
|
||||
|
||||
static const char * const bch2_rw[] = {
|
||||
|
@ -40,7 +40,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_btree_iter_traverse(&iter) ?:
|
||||
@ -81,7 +81,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, k.k.p,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_btree_iter_traverse(&iter) ?:
|
||||
@ -261,7 +261,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
BTREE_ITER_SLOTS, k, ({
|
||||
BTREE_ITER_slots, k, ({
|
||||
if (i >= nr * 2)
|
||||
break;
|
||||
|
||||
@ -322,7 +322,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_upto(trans, iter, BTREE_ID_extents,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
BTREE_ITER_SLOTS, k, ({
|
||||
BTREE_ITER_slots, k, ({
|
||||
if (i == nr)
|
||||
break;
|
||||
BUG_ON(bkey_deleted(k.k) != !(i % 16));
|
||||
@ -452,7 +452,7 @@ static int insert_test_overlapping_extent(struct bch_fs *c, u64 inum, u64 start,
|
||||
|
||||
ret = bch2_trans_do(c, NULL, NULL, 0,
|
||||
bch2_btree_insert_nonextent(trans, BTREE_ID_extents, &k.k_i,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE));
|
||||
BTREE_UPDATE_internal_snapshot_node));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
@ -671,7 +671,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
|
||||
BTREE_ITER_INTENT);
|
||||
BTREE_ITER_intent);
|
||||
k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX));
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -714,7 +714,7 @@ static int seq_insert(struct bch_fs *c, u64 nr)
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_slots|BTREE_ITER_intent, k,
|
||||
NULL, NULL, 0, ({
|
||||
if (iter.pos.offset >= nr)
|
||||
break;
|
||||
@ -737,7 +737,7 @@ static int seq_overwrite(struct bch_fs *c, u64 nr)
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
BTREE_ITER_INTENT, k,
|
||||
BTREE_ITER_intent, k,
|
||||
NULL, NULL, 0, ({
|
||||
struct bkey_i_cookie u;
|
||||
|
||||
|
@ -348,15 +348,12 @@ static void bch2_pr_time_units_aligned(struct printbuf *out, u64 ns)
|
||||
{
|
||||
const struct time_unit *u = bch2_pick_time_units(ns);
|
||||
|
||||
prt_printf(out, "%llu ", div64_u64(ns, u->nsecs));
|
||||
prt_tab_rjust(out);
|
||||
prt_printf(out, "%s", u->name);
|
||||
prt_printf(out, "%llu \r%s", div64_u64(ns, u->nsecs), u->name);
|
||||
}
|
||||
|
||||
static inline void pr_name_and_units(struct printbuf *out, const char *name, u64 ns)
|
||||
{
|
||||
prt_str(out, name);
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%s\t", name);
|
||||
bch2_pr_time_units_aligned(out, ns);
|
||||
prt_newline(out);
|
||||
}
|
||||
@ -389,12 +386,8 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
|
||||
}
|
||||
|
||||
printbuf_tabstop_push(out, out->indent + TABSTOP_SIZE);
|
||||
prt_printf(out, "count:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llu ",
|
||||
stats->duration_stats.n);
|
||||
prt_printf(out, "count:\t%llu\n", stats->duration_stats.n);
|
||||
printbuf_tabstop_pop(out);
|
||||
prt_newline(out);
|
||||
|
||||
printbuf_tabstops_reset(out);
|
||||
|
||||
@ -403,13 +396,8 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
|
||||
printbuf_tabstop_push(out, 0);
|
||||
printbuf_tabstop_push(out, TABSTOP_SIZE + 2);
|
||||
|
||||
prt_tab(out);
|
||||
prt_printf(out, "since mount");
|
||||
prt_tab_rjust(out);
|
||||
prt_tab(out);
|
||||
prt_printf(out, "\tsince mount\r\trecent\r\n");
|
||||
prt_printf(out, "recent");
|
||||
prt_tab_rjust(out);
|
||||
prt_newline(out);
|
||||
|
||||
printbuf_tabstops_reset(out);
|
||||
printbuf_tabstop_push(out, out->indent + 20);
|
||||
@ -417,23 +405,20 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
|
||||
printbuf_tabstop_push(out, 2);
|
||||
printbuf_tabstop_push(out, TABSTOP_SIZE);
|
||||
|
||||
prt_printf(out, "duration of events");
|
||||
prt_newline(out);
|
||||
prt_printf(out, "duration of events\n");
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
pr_name_and_units(out, "min:", stats->min_duration);
|
||||
pr_name_and_units(out, "max:", stats->max_duration);
|
||||
pr_name_and_units(out, "total:", stats->total_duration);
|
||||
|
||||
prt_printf(out, "mean:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "mean:\t");
|
||||
bch2_pr_time_units_aligned(out, d_mean);
|
||||
prt_tab(out);
|
||||
bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->duration_stats_weighted, TIME_STATS_MV_WEIGHT));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "stddev:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "stddev:\t");
|
||||
bch2_pr_time_units_aligned(out, d_stddev);
|
||||
prt_tab(out);
|
||||
bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->duration_stats_weighted, TIME_STATS_MV_WEIGHT));
|
||||
@ -441,22 +426,19 @@ void bch2_time_stats_to_text(struct printbuf *out, struct bch2_time_stats *stats
|
||||
printbuf_indent_sub(out, 2);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "time between events");
|
||||
prt_newline(out);
|
||||
prt_printf(out, "time between events\n");
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
pr_name_and_units(out, "min:", stats->min_freq);
|
||||
pr_name_and_units(out, "max:", stats->max_freq);
|
||||
|
||||
prt_printf(out, "mean:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "mean:\t");
|
||||
bch2_pr_time_units_aligned(out, f_mean);
|
||||
prt_tab(out);
|
||||
bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_mean(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT));
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "stddev:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "stddev:\t");
|
||||
bch2_pr_time_units_aligned(out, f_stddev);
|
||||
prt_tab(out);
|
||||
bch2_pr_time_units_aligned(out, mean_and_variance_weighted_get_stddev(stats->freq_stats_weighted, TIME_STATS_MV_WEIGHT));
|
||||
@ -589,40 +571,31 @@ void bch2_pd_controller_debug_to_text(struct printbuf *out, struct bch_pd_contro
|
||||
if (!out->nr_tabstops)
|
||||
printbuf_tabstop_push(out, 20);
|
||||
|
||||
prt_printf(out, "rate:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "rate:\t");
|
||||
prt_human_readable_s64(out, pd->rate.rate);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "target:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "target:\t");
|
||||
prt_human_readable_u64(out, pd->last_target);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "actual:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "actual:\t");
|
||||
prt_human_readable_u64(out, pd->last_actual);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "proportional:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "proportional:\t");
|
||||
prt_human_readable_s64(out, pd->last_proportional);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "derivative:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "derivative:\t");
|
||||
prt_human_readable_s64(out, pd->last_derivative);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "change:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "change:\t");
|
||||
prt_human_readable_s64(out, pd->last_change);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "next io:");
|
||||
prt_tab(out);
|
||||
prt_printf(out, "%llims", div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC));
|
||||
prt_newline(out);
|
||||
prt_printf(out, "next io:\t%llims\n", div64_s64(pd->rate.next - local_clock(), NSEC_PER_MSEC));
|
||||
}
|
||||
|
||||
/* misc: */
|
||||
|
@ -138,21 +138,13 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
|
||||
struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
|
||||
struct xattr_search_key search = X_SEARCH(type, name, strlen(name));
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c_xattr xattr;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
ret = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
|
||||
inode_inum(inode), &search, 0);
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
|
||||
inode_inum(inode), &search, 0);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err1;
|
||||
return ret;
|
||||
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err2;
|
||||
|
||||
xattr = bkey_s_c_to_xattr(k);
|
||||
struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
|
||||
ret = le16_to_cpu(xattr.v->x_val_len);
|
||||
if (buffer) {
|
||||
if (ret > size)
|
||||
@ -160,10 +152,8 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
|
||||
else
|
||||
memcpy(buffer, xattr_val(xattr.v), ret);
|
||||
}
|
||||
err2:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
err1:
|
||||
return ret < 0 && bch2_err_matches(ret, ENOENT) ? -ENODATA : ret;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
|
||||
@ -177,7 +167,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
|
||||
int ret;
|
||||
|
||||
ret = bch2_subvol_is_ro_trans(trans, inum.subvol) ?:
|
||||
bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_INTENT);
|
||||
bch2_inode_peek(trans, &inode_iter, inode_u, inum, BTREE_ITER_intent);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -212,8 +202,8 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
|
||||
|
||||
ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
|
||||
inum, &xattr->k_i,
|
||||
(flags & XATTR_CREATE ? BCH_HASH_SET_MUST_CREATE : 0)|
|
||||
(flags & XATTR_REPLACE ? BCH_HASH_SET_MUST_REPLACE : 0));
|
||||
(flags & XATTR_CREATE ? STR_HASH_must_create : 0)|
|
||||
(flags & XATTR_REPLACE ? STR_HASH_must_replace : 0));
|
||||
} else {
|
||||
struct xattr_search_key search =
|
||||
X_SEARCH(type, name, strlen(name));
|
||||
@ -359,6 +349,9 @@ static int bch2_xattr_get_handler(const struct xattr_handler *handler,
|
||||
int ret = bch2_trans_do(c, NULL, NULL, 0,
|
||||
bch2_xattr_get_trans(trans, inode, name, buffer, size, handler->flags));
|
||||
|
||||
if (ret < 0 && bch2_err_matches(ret, ENOENT))
|
||||
ret = -ENODATA;
|
||||
|
||||
return bch2_err_class(ret);
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user