mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-12-08 00:00:12 +03:00
Update bcachefs sources to 99a43760af01 bcachefs: ret_fsck_err()
This commit is contained in:
parent
cafd061641
commit
5e19f0d66f
@ -1 +1 @@
|
||||
f4a2c8cad65c14cd6feb8c79aae2d708b7928d9b
|
||||
99a43760af01b64e736624b984240eefcc821148
|
||||
|
||||
@ -673,9 +673,8 @@ static int disk_accounting_invalid_dev(struct btree_trans *trans,
|
||||
{
|
||||
CLASS(printbuf, buf)();
|
||||
bch2_accounting_key_to_text(&buf, acc);
|
||||
int ret = 0;
|
||||
|
||||
if (fsck_err(trans, accounting_to_invalid_device,
|
||||
if (ret_fsck_err(trans, accounting_to_invalid_device,
|
||||
"accounting entry points to invalid device %u\n%s",
|
||||
dev, buf.buf)) {
|
||||
bch2_u64s_neg(v, nr);
|
||||
@ -686,8 +685,6 @@ static int disk_accounting_invalid_dev(struct btree_trans *trans,
|
||||
} else {
|
||||
return bch_err_throw(trans->c, remove_disk_accounting_entry);
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
|
||||
@ -536,53 +536,55 @@ void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bke
|
||||
}
|
||||
}
|
||||
|
||||
static int bucket_gens_init_iter(struct btree_trans *trans, struct bkey_s_c k,
|
||||
struct bkey_i_bucket_gens *g,
|
||||
bool *have_bucket_gens_key)
|
||||
{
|
||||
/*
|
||||
* Not a fsck error because this is checked/repaired by
|
||||
* bch2_check_alloc_key() which runs later:
|
||||
*/
|
||||
if (!bch2_dev_bucket_exists(trans->c, k.k->p))
|
||||
return 0;
|
||||
|
||||
unsigned offset;
|
||||
struct bpos pos = alloc_gens_pos(k.k->p, &offset);
|
||||
|
||||
if (*have_bucket_gens_key && !bkey_eq(g->k.p, pos)) {
|
||||
try(bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g->k_i, 0));
|
||||
try(bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc));
|
||||
|
||||
*have_bucket_gens_key = false;
|
||||
}
|
||||
|
||||
if (!*have_bucket_gens_key) {
|
||||
bkey_bucket_gens_init(&g->k_i);
|
||||
g->k.p = pos;
|
||||
*have_bucket_gens_key = true;
|
||||
}
|
||||
|
||||
struct bch_alloc_v4 a;
|
||||
g->v.gens[offset] = bch2_alloc_to_v4(k, &a)->gen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_bucket_gens_init(struct bch_fs *c)
|
||||
{
|
||||
struct bkey_i_bucket_gens g;
|
||||
bool have_bucket_gens_key = false;
|
||||
int ret;
|
||||
|
||||
CLASS(btree_trans, trans)(c);
|
||||
ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
try(for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_prefetch, k, ({
|
||||
/*
|
||||
* Not a fsck error because this is checked/repaired by
|
||||
* bch2_check_alloc_key() which runs later:
|
||||
*/
|
||||
if (!bch2_dev_bucket_exists(c, k.k->p))
|
||||
continue;
|
||||
bucket_gens_init_iter(trans, k, &g, &have_bucket_gens_key);
|
||||
})));
|
||||
|
||||
struct bch_alloc_v4 a;
|
||||
u8 gen = bch2_alloc_to_v4(k, &a)->gen;
|
||||
unsigned offset;
|
||||
struct bpos pos = alloc_gens_pos(iter.pos, &offset);
|
||||
int ret2 = 0;
|
||||
|
||||
if (have_bucket_gens_key && !bkey_eq(g.k.p, pos)) {
|
||||
ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
if (ret2)
|
||||
goto iter_err;
|
||||
have_bucket_gens_key = false;
|
||||
}
|
||||
|
||||
if (!have_bucket_gens_key) {
|
||||
bkey_bucket_gens_init(&g.k_i);
|
||||
g.k.p = pos;
|
||||
have_bucket_gens_key = true;
|
||||
}
|
||||
|
||||
g.v.gens[offset] = gen;
|
||||
iter_err:
|
||||
ret2;
|
||||
}));
|
||||
|
||||
if (have_bucket_gens_key && !ret)
|
||||
ret = commit_do(trans, NULL, NULL,
|
||||
if (have_bucket_gens_key)
|
||||
try(commit_do(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
|
||||
bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0)));
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_alloc_read(struct bch_fs *c)
|
||||
@ -766,13 +768,21 @@ int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca
|
||||
return 0;
|
||||
}
|
||||
|
||||
static noinline int inval_bucket_key(struct btree_trans *trans, struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
bch2_fs_inconsistent(c, "reference to invalid bucket\n%s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
|
||||
return bch_err_throw(c, trigger_alloc);
|
||||
}
|
||||
|
||||
int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
enum btree_id btree, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s new,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
int ret = 0;
|
||||
|
||||
CLASS(bch2_dev_bucket_tryget, ca)(c, new.k->p);
|
||||
@ -849,6 +859,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
u64 transaction_seq = trans->journal_res.seq;
|
||||
BUG_ON(!transaction_seq);
|
||||
|
||||
CLASS(printbuf, buf)();
|
||||
if (log_fsck_err_on(transaction_seq && new_a->journal_seq_nonempty > transaction_seq,
|
||||
trans, alloc_key_journal_seq_in_future,
|
||||
"bucket journal seq in future (currently at %llu)\n%s",
|
||||
@ -901,7 +912,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
guard(rcu)();
|
||||
u8 *gen = bucket_gen(ca, new.k->p.offset);
|
||||
if (unlikely(!gen))
|
||||
goto invalid_bucket;
|
||||
return inval_bucket_key(trans, new.s_c);
|
||||
*gen = new_a->gen;
|
||||
}
|
||||
|
||||
@ -931,16 +942,12 @@ int bch2_trigger_alloc(struct btree_trans *trans,
|
||||
guard(rcu)();
|
||||
struct bucket *g = gc_bucket(ca, new.k->p.offset);
|
||||
if (unlikely(!g))
|
||||
goto invalid_bucket;
|
||||
return inval_bucket_key(trans, new.s_c);
|
||||
g->gen_valid = 1;
|
||||
g->gen = new_a->gen;
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
invalid_bucket:
|
||||
bch2_fs_inconsistent(c, "reference to invalid bucket\n%s",
|
||||
(bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
|
||||
return bch_err_throw(c, trigger_alloc);
|
||||
}
|
||||
|
||||
static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress)
|
||||
@ -1278,13 +1285,12 @@ static int invalidate_one_bucket(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
|
||||
int ret = 0;
|
||||
|
||||
if (*nr_to_invalidate <= 0)
|
||||
return 1;
|
||||
|
||||
if (!bch2_dev_bucket_exists(c, bucket)) {
|
||||
if (fsck_err(trans, lru_entry_to_invalid_bucket,
|
||||
if (ret_fsck_err(trans, lru_entry_to_invalid_bucket,
|
||||
"lru key points to nonexistent device:bucket %llu:%llu",
|
||||
bucket.inode, bucket.offset))
|
||||
return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false);
|
||||
@ -1329,23 +1335,21 @@ static int invalidate_one_bucket(struct btree_trans *trans,
|
||||
trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
|
||||
--*nr_to_invalidate;
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bch_dev *ca, bool *wrapped)
|
||||
{
|
||||
struct bkey_s_c k;
|
||||
again:
|
||||
k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
|
||||
if (!k.k && !*wrapped) {
|
||||
while (true) {
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
|
||||
if (k.k || *wrapped)
|
||||
return k;
|
||||
|
||||
bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
|
||||
*wrapped = true;
|
||||
goto again;
|
||||
}
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
static void __bch2_do_invalidates(struct bch_dev *ca)
|
||||
|
||||
@ -201,7 +201,6 @@ static int backpointer_target_not_found(struct btree_trans *trans,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* If we're using the btree write buffer, the backpointer we were
|
||||
@ -230,7 +229,7 @@ static int backpointer_target_not_found(struct btree_trans *trans,
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&bp2.k_i));
|
||||
}
|
||||
|
||||
if (fsck_err(trans, backpointer_to_missing_ptr,
|
||||
if (ret_fsck_err(trans, backpointer_to_missing_ptr,
|
||||
"%s", buf.buf)) {
|
||||
try(bch2_backpointer_del(trans, bp.k->p));
|
||||
|
||||
@ -252,8 +251,8 @@ static int backpointer_target_not_found(struct btree_trans *trans,
|
||||
? bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc)
|
||||
: 0);
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct btree *__bch2_backpointer_get_node(struct btree_trans *trans,
|
||||
@ -378,35 +377,33 @@ static int bch2_check_backpointer_has_valid_bucket(struct btree_trans *trans, st
|
||||
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
int ret = 0;
|
||||
|
||||
struct bpos bucket;
|
||||
if (!bp_pos_to_bucket_nodev_noerror(c, k.k->p, &bucket)) {
|
||||
try(bch2_backpointers_maybe_flush(trans, k, last_flushed));
|
||||
|
||||
if (fsck_err(trans, backpointer_to_missing_device,
|
||||
if (ret_fsck_err(trans, backpointer_to_missing_device,
|
||||
"backpointer for missing device:\n%s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
ret = bch2_backpointer_del(trans, k.k->p);
|
||||
return ret;
|
||||
try(bch2_backpointer_del(trans, k.k->p));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
{
|
||||
CLASS(btree_iter, alloc_iter)(trans, BTREE_ID_alloc, bucket, 0);
|
||||
struct bkey_s_c alloc_k = bkey_try(bch2_btree_iter_peek_slot(&alloc_iter));
|
||||
CLASS(btree_iter, alloc_iter)(trans, BTREE_ID_alloc, bucket, 0);
|
||||
struct bkey_s_c alloc_k = bkey_try(bch2_btree_iter_peek_slot(&alloc_iter));
|
||||
|
||||
if (alloc_k.k->type != KEY_TYPE_alloc_v4) {
|
||||
try(bch2_backpointers_maybe_flush(trans, k, last_flushed));
|
||||
if (alloc_k.k->type != KEY_TYPE_alloc_v4) {
|
||||
try(bch2_backpointers_maybe_flush(trans, k, last_flushed));
|
||||
|
||||
if (fsck_err(trans, backpointer_to_missing_alloc,
|
||||
"backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
|
||||
alloc_iter.pos.inode, alloc_iter.pos.offset,
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
ret = bch2_backpointer_del(trans, k.k->p);
|
||||
}
|
||||
if (ret_fsck_err(trans, backpointer_to_missing_alloc,
|
||||
"backpointer for nonexistent alloc key: %llu:%llu:0\n%s",
|
||||
alloc_iter.pos.inode, alloc_iter.pos.offset,
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
try(bch2_backpointer_del(trans, k.k->p));
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* verify that every backpointer has a corresponding alloc key */
|
||||
@ -518,7 +515,6 @@ static int bp_missing(struct btree_trans *trans,
|
||||
struct bkey_s_c bp_found)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
int ret = 0;
|
||||
|
||||
CLASS(printbuf, buf)();
|
||||
prt_str(&buf, "missing backpointer\nfor: ");
|
||||
@ -531,10 +527,10 @@ static int bp_missing(struct btree_trans *trans,
|
||||
bch2_bkey_val_to_text(&buf, c, bp_found);
|
||||
}
|
||||
|
||||
if (fsck_err(trans, ptr_to_missing_backpointer, "%s", buf.buf))
|
||||
if (ret_fsck_err(trans, ptr_to_missing_backpointer, "%s", buf.buf))
|
||||
try(bch2_bucket_backpointer_mod(trans, extent, bp, true));
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool bkey_dev_ptr_stale(struct bch_fs *c, struct bkey_s_c k, unsigned dev)
|
||||
|
||||
@ -112,7 +112,6 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
int ret = 0;
|
||||
|
||||
CLASS(bch2_dev_tryget_noerror, ca)(c, p.ptr.dev);
|
||||
if (!ca) {
|
||||
@ -120,7 +119,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
return 0;
|
||||
|
||||
if (test_bit(p.ptr.dev, c->devs_removed.d)) {
|
||||
if (fsck_err(trans, ptr_to_removed_device,
|
||||
if (ret_fsck_err(trans, ptr_to_removed_device,
|
||||
"pointer to removed device %u\n"
|
||||
"while marking %s",
|
||||
p.ptr.dev,
|
||||
@ -128,7 +127,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
*do_update = true;
|
||||
} else {
|
||||
if (fsck_err(trans, ptr_to_invalid_device,
|
||||
if (ret_fsck_err(trans, ptr_to_invalid_device,
|
||||
"pointer to missing device %u\n"
|
||||
"while marking %s",
|
||||
p.ptr.dev,
|
||||
@ -141,7 +140,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
|
||||
struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||
if (!g) {
|
||||
if (fsck_err(trans, ptr_to_invalid_device,
|
||||
if (ret_fsck_err(trans, ptr_to_invalid_device,
|
||||
"pointer to invalid bucket on device %u\n"
|
||||
"while marking %s",
|
||||
p.ptr.dev,
|
||||
@ -153,7 +152,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
|
||||
|
||||
if (fsck_err_on(!g->gen_valid,
|
||||
if (ret_fsck_err_on(!g->gen_valid,
|
||||
trans, ptr_to_missing_alloc_key,
|
||||
"bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
|
||||
"while marking %s",
|
||||
@ -174,7 +173,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
|
||||
/* g->gen_valid == true */
|
||||
|
||||
if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
|
||||
if (ret_fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
|
||||
trans, ptr_gen_newer_than_bucket_gen,
|
||||
"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
|
||||
"while marking %s",
|
||||
@ -195,7 +194,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
*do_update = true;
|
||||
}
|
||||
|
||||
if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
|
||||
if (ret_fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
|
||||
trans, ptr_gen_newer_than_bucket_gen,
|
||||
"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
|
||||
"while marking %s",
|
||||
@ -206,7 +205,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
*do_update = true;
|
||||
|
||||
if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
|
||||
if (ret_fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
|
||||
trans, stale_dirty_ptr,
|
||||
"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
|
||||
"while marking %s",
|
||||
@ -220,7 +219,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
|
||||
return 0;
|
||||
|
||||
if (fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
|
||||
if (ret_fsck_err_on(bucket_data_type_mismatch(g->data_type, data_type),
|
||||
trans, ptr_bucket_data_type_mismatch,
|
||||
"bucket %u:%zu gen %u different types of data in same bucket: %s, %s\n"
|
||||
"while marking %s",
|
||||
@ -236,11 +235,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
bch_err(c, "btree and superblock in the same bucket - cannot repair");
|
||||
return bch_err_throw(c, fsck_repair_unimplemented);
|
||||
case BCH_DATA_journal:
|
||||
ret = bch2_dev_journal_bucket_delete(ca, PTR_BUCKET_NR(ca, &p.ptr));
|
||||
bch_err_msg(c, ret, "error deleting journal bucket %zu",
|
||||
PTR_BUCKET_NR(ca, &p.ptr));
|
||||
if (ret)
|
||||
return ret;
|
||||
try(bch2_dev_journal_bucket_delete(ca, PTR_BUCKET_NR(ca, &p.ptr)));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -256,7 +251,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
if (p.has_ec) {
|
||||
struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
|
||||
|
||||
if (fsck_err_on(!m || !m->alive,
|
||||
if (ret_fsck_err_on(!m || !m->alive,
|
||||
trans, ptr_to_missing_stripe,
|
||||
"pointer to nonexistent stripe %llu\n"
|
||||
"while marking %s",
|
||||
@ -265,7 +260,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
*do_update = true;
|
||||
|
||||
if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
|
||||
if (ret_fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p),
|
||||
trans, ptr_to_incorrect_stripe,
|
||||
"pointer does not match stripe %llu\n"
|
||||
"while marking %s",
|
||||
@ -274,8 +269,8 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
*do_update = true;
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool should_drop_ptr(struct bch_fs *c, struct bkey_s_c k,
|
||||
@ -466,7 +461,6 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
|
||||
size_t bucket_nr = PTR_BUCKET_NR(ca, ptr);
|
||||
CLASS(printbuf, buf)();
|
||||
bool inserting = sectors > 0;
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(!sectors);
|
||||
|
||||
@ -495,7 +489,7 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
|
||||
}
|
||||
|
||||
if (b_gen != ptr->gen && ptr->cached) {
|
||||
if (fsck_err_on(c->sb.compat & BIT_ULL(BCH_COMPAT_no_stale_ptrs),
|
||||
if (ret_fsck_err_on(c->sb.compat & BIT_ULL(BCH_COMPAT_no_stale_ptrs),
|
||||
trans, stale_ptr_with_no_stale_ptrs_feature,
|
||||
"stale cached ptr, but have no_stale_ptrs feature\n%s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
@ -544,8 +538,7 @@ int bch2_bucket_ref_update(struct btree_trans *trans, struct bch_dev *ca,
|
||||
}
|
||||
|
||||
*bucket_sectors += sectors;
|
||||
fsck_err:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bch2_trans_account_disk_usage_change(struct btree_trans *trans)
|
||||
|
||||
@ -83,31 +83,31 @@ static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *buck
|
||||
static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
|
||||
struct bch_dev **ca, struct bkey *hole)
|
||||
{
|
||||
struct bch_fs *c = iter->trans->c;
|
||||
struct bkey_s_c k;
|
||||
again:
|
||||
k = bch2_get_key_or_hole(iter, POS_MAX, hole);
|
||||
if (bkey_err(k))
|
||||
return k;
|
||||
while (true) {
|
||||
struct bch_fs *c = iter->trans->c;
|
||||
struct bkey_s_c k = bch2_get_key_or_hole(iter, POS_MAX, hole);
|
||||
if (bkey_err(k))
|
||||
return k;
|
||||
|
||||
*ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode);
|
||||
*ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode);
|
||||
|
||||
if (!k.k->type) {
|
||||
struct bpos hole_start = bkey_start_pos(k.k);
|
||||
if (!k.k->type) {
|
||||
struct bpos hole_start = bkey_start_pos(k.k);
|
||||
|
||||
if (!*ca || !bucket_valid(*ca, hole_start.offset)) {
|
||||
if (!next_bucket(c, ca, &hole_start))
|
||||
return bkey_s_c_null;
|
||||
if (!*ca || !bucket_valid(*ca, hole_start.offset)) {
|
||||
if (!next_bucket(c, ca, &hole_start))
|
||||
return bkey_s_c_null;
|
||||
|
||||
bch2_btree_iter_set_pos(iter, hole_start);
|
||||
goto again;
|
||||
bch2_btree_iter_set_pos(iter, hole_start);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (k.k->p.offset > (*ca)->mi.nbuckets)
|
||||
bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset);
|
||||
}
|
||||
|
||||
if (k.k->p.offset > (*ca)->mi.nbuckets)
|
||||
bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset);
|
||||
return k;
|
||||
}
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
int bch2_need_discard_or_freespace_err(struct btree_trans *trans,
|
||||
@ -152,13 +152,13 @@ int bch2_check_alloc_key(struct btree_trans *trans,
|
||||
int ret = 0;
|
||||
|
||||
CLASS(bch2_dev_bucket_tryget_noerror, ca)(c, alloc_k.k->p);
|
||||
if (fsck_err_on(!ca,
|
||||
if (ret_fsck_err_on(!ca,
|
||||
trans, alloc_key_to_missing_dev_bucket,
|
||||
"alloc key for invalid device:bucket %llu:%llu",
|
||||
alloc_k.k->p.inode, alloc_k.k->p.offset))
|
||||
ret = bch2_btree_delete_at(trans, alloc_iter, 0);
|
||||
try(bch2_btree_delete_at(trans, alloc_iter, 0));
|
||||
if (!ca)
|
||||
return ret;
|
||||
return 0;
|
||||
|
||||
if (!ca->mi.freespace_initialized)
|
||||
return 0;
|
||||
@ -184,7 +184,7 @@ int bch2_check_alloc_key(struct btree_trans *trans,
|
||||
bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
|
||||
k = bkey_try(bch2_btree_iter_peek_slot(bucket_gens_iter));
|
||||
|
||||
if (fsck_err_on(a->gen != alloc_gen(k, gens_offset),
|
||||
if (ret_fsck_err_on(a->gen != alloc_gen(k, gens_offset),
|
||||
trans, bucket_gens_key_wrong,
|
||||
"incorrect gen in bucket_gens btree (got %u should be %u)\n%s",
|
||||
alloc_gen(k, gens_offset), a->gen,
|
||||
@ -216,7 +216,6 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
|
||||
struct btree_iter *freespace_iter)
|
||||
{
|
||||
CLASS(printbuf, buf)();
|
||||
int ret = 0;
|
||||
|
||||
if (!ca->mi.freespace_initialized)
|
||||
return 0;
|
||||
@ -227,7 +226,7 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
|
||||
|
||||
*end = bkey_min(k.k->p, *end);
|
||||
|
||||
if (fsck_err_on(k.k->type != KEY_TYPE_set,
|
||||
if (ret_fsck_err_on(k.k->type != KEY_TYPE_set,
|
||||
trans, freespace_hole_missing,
|
||||
"hole in alloc btree missing in freespace btree\n"
|
||||
"device %llu buckets %llu-%llu",
|
||||
@ -246,8 +245,8 @@ int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
|
||||
|
||||
try(bch2_trans_update(trans, freespace_iter, update, 0));
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
@ -258,7 +257,6 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
|
||||
{
|
||||
CLASS(printbuf, buf)();
|
||||
unsigned gens_offset, gens_end_offset;
|
||||
int ret = 0;
|
||||
|
||||
bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
|
||||
|
||||
@ -275,7 +273,7 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
|
||||
bkey_reassemble(&g.k_i, k);
|
||||
|
||||
for (unsigned i = gens_offset; i < gens_end_offset; i++) {
|
||||
if (fsck_err_on(g.v.gens[i], trans,
|
||||
if (ret_fsck_err_on(g.v.gens[i], trans,
|
||||
bucket_gens_hole_wrong,
|
||||
"hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
|
||||
bucket_gens_pos_to_alloc(k.k->p, i).inode,
|
||||
@ -296,8 +294,7 @@ int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
*end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
|
||||
fsck_err:
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct check_discard_freespace_key_async {
|
||||
@ -446,21 +443,20 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
u64 b;
|
||||
bool need_update = false;
|
||||
CLASS(printbuf, buf)();
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
|
||||
bkey_reassemble(&g.k_i, k);
|
||||
|
||||
CLASS(bch2_dev_tryget_noerror, ca)(c, k.k->p.inode);
|
||||
if (!ca) {
|
||||
if (fsck_err(trans, bucket_gens_to_invalid_dev,
|
||||
if (ret_fsck_err(trans, bucket_gens_to_invalid_dev,
|
||||
"bucket_gens key for invalid device:\n%s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
return bch2_btree_delete_at(trans, iter, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (fsck_err_on(end <= ca->mi.first_bucket ||
|
||||
if (ret_fsck_err_on(end <= ca->mi.first_bucket ||
|
||||
start >= ca->mi.nbuckets,
|
||||
trans, bucket_gens_to_invalid_buckets,
|
||||
"bucket_gens key for invalid buckets:\n%s",
|
||||
@ -469,7 +465,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
for (b = start; b < ca->mi.first_bucket; b++)
|
||||
if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
|
||||
if (ret_fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
|
||||
trans, bucket_gens_nonzero_for_invalid_buckets,
|
||||
"bucket_gens key has nonzero gen for invalid bucket")) {
|
||||
g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
|
||||
@ -477,7 +473,7 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
for (b = ca->mi.nbuckets; b < end; b++)
|
||||
if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
|
||||
if (ret_fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
|
||||
trans, bucket_gens_nonzero_for_invalid_buckets,
|
||||
"bucket_gens key has nonzero gen for invalid bucket")) {
|
||||
g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
|
||||
@ -488,10 +484,53 @@ int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
struct bkey_i *u = errptr_try(bch2_trans_kmalloc(trans, sizeof(g)));
|
||||
|
||||
memcpy(u, &g, sizeof(g));
|
||||
return bch2_trans_update(trans, iter, u, 0);
|
||||
try(bch2_trans_update(trans, iter, u, 0));
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_btree_alloc_iter(struct btree_trans *trans,
|
||||
struct bch_dev **ca,
|
||||
struct btree_iter *iter,
|
||||
struct btree_iter *discard_iter,
|
||||
struct btree_iter *freespace_iter,
|
||||
struct btree_iter *bucket_gens_iter,
|
||||
struct progress_indicator *progress)
|
||||
{
|
||||
struct bkey hole;
|
||||
struct bkey_s_c k = bkey_try(bch2_get_key_or_real_bucket_hole(iter, ca, &hole));
|
||||
|
||||
if (!k.k)
|
||||
return 1;
|
||||
|
||||
try(progress_update_iter(trans, progress, iter));
|
||||
|
||||
struct bpos next;
|
||||
if (k.k->type) {
|
||||
next = bpos_nosnap_successor(k.k->p);
|
||||
|
||||
try(bch2_check_alloc_key(trans, k, iter,
|
||||
discard_iter,
|
||||
freespace_iter,
|
||||
bucket_gens_iter));
|
||||
} else {
|
||||
next = k.k->p;
|
||||
|
||||
try(bch2_check_alloc_hole_freespace(trans, *ca,
|
||||
bkey_start_pos(k.k),
|
||||
&next,
|
||||
freespace_iter));
|
||||
try(bch2_check_alloc_hole_bucket_gens(trans,
|
||||
bkey_start_pos(k.k),
|
||||
&next,
|
||||
bucket_gens_iter));
|
||||
}
|
||||
|
||||
try(bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc));
|
||||
|
||||
bch2_btree_iter_set_pos(iter, next);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_btree_alloc(struct btree_trans *trans)
|
||||
@ -507,62 +546,13 @@ static int check_btree_alloc(struct btree_trans *trans)
|
||||
struct bch_dev *ca __free(bch2_dev_put) = NULL;
|
||||
int ret = 0;
|
||||
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
struct bkey hole;
|
||||
struct bkey_s_c k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto bkey_err;
|
||||
|
||||
if (!k.k)
|
||||
break;
|
||||
|
||||
ret = progress_update_iter(trans, &progress, &iter);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
struct bpos next;
|
||||
if (k.k->type) {
|
||||
next = bpos_nosnap_successor(k.k->p);
|
||||
|
||||
ret = bch2_check_alloc_key(trans,
|
||||
k, &iter,
|
||||
&discard_iter,
|
||||
&freespace_iter,
|
||||
&bucket_gens_iter);
|
||||
BUG_ON(ret > 0);
|
||||
if (ret)
|
||||
goto bkey_err;
|
||||
} else {
|
||||
next = k.k->p;
|
||||
|
||||
ret = bch2_check_alloc_hole_freespace(trans, ca,
|
||||
bkey_start_pos(k.k),
|
||||
&next,
|
||||
&freespace_iter) ?:
|
||||
bch2_check_alloc_hole_bucket_gens(trans,
|
||||
bkey_start_pos(k.k),
|
||||
&next,
|
||||
&bucket_gens_iter);
|
||||
BUG_ON(ret > 0);
|
||||
if (ret)
|
||||
goto bkey_err;
|
||||
}
|
||||
|
||||
ret = bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc);
|
||||
if (ret)
|
||||
goto bkey_err;
|
||||
|
||||
bch2_btree_iter_set_pos(&iter, next);
|
||||
bkey_err:
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
continue;
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
while (!(ret = lockrestart_do(trans,
|
||||
check_btree_alloc_iter(trans, &ca, &iter,
|
||||
&discard_iter,
|
||||
&freespace_iter,
|
||||
&bucket_gens_iter,
|
||||
&progress))))
|
||||
;
|
||||
|
||||
return min(0, ret);
|
||||
}
|
||||
@ -625,7 +615,6 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a;
|
||||
CLASS(printbuf, buf)();
|
||||
int ret = 0;
|
||||
|
||||
struct bkey_s_c alloc_k = bkey_try(bch2_btree_iter_peek(alloc_iter));
|
||||
if (!alloc_k.k)
|
||||
@ -644,7 +633,7 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
lru_idx, alloc_k, last_flushed));
|
||||
|
||||
if (a->data_type == BCH_DATA_cached) {
|
||||
if (fsck_err_on(!a->io_time[READ],
|
||||
if (ret_fsck_err_on(!a->io_time[READ],
|
||||
trans, alloc_key_cached_but_read_time_zero,
|
||||
"cached bucket with read_time 0\n%s",
|
||||
(printbuf_reset(&buf),
|
||||
@ -659,13 +648,13 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
a = &a_mut->v;
|
||||
}
|
||||
|
||||
ret = bch2_lru_check_set(trans, alloc_k.k->p.inode,
|
||||
bucket_to_u64(alloc_k.k->p),
|
||||
a->io_time[READ],
|
||||
alloc_k, last_flushed);
|
||||
try(bch2_lru_check_set(trans, alloc_k.k->p.inode,
|
||||
bucket_to_u64(alloc_k.k->p),
|
||||
a->io_time[READ],
|
||||
alloc_k, last_flushed));
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
|
||||
@ -686,14 +675,46 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
|
||||
}))?: bch2_check_stripe_to_lru_refs(trans);
|
||||
}
|
||||
|
||||
static int dev_freespace_init_iter(struct btree_trans *trans, struct bch_dev *ca,
|
||||
struct btree_iter *iter, struct bpos end)
|
||||
{
|
||||
struct bkey hole;
|
||||
struct bkey_s_c k = bkey_try(bch2_get_key_or_hole(iter, end, &hole));
|
||||
|
||||
if (k.k->type) {
|
||||
/*
|
||||
* We process live keys in the alloc btree one at a
|
||||
* time:
|
||||
*/
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
|
||||
|
||||
try(bch2_bucket_do_index(trans, ca, k, a, true));
|
||||
try(bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc));
|
||||
|
||||
bch2_btree_iter_advance(iter);
|
||||
} else {
|
||||
struct bkey_i *freespace = errptr_try(bch2_trans_kmalloc(trans, sizeof(*freespace)));
|
||||
|
||||
bkey_init(&freespace->k);
|
||||
freespace->k.type = KEY_TYPE_set;
|
||||
freespace->k.p = k.k->p;
|
||||
freespace->k.size = k.k->size;
|
||||
|
||||
try(bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0));
|
||||
try(bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc));
|
||||
|
||||
bch2_btree_iter_set_pos(iter, k.k->p);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
|
||||
u64 bucket_start, u64 bucket_end)
|
||||
{
|
||||
struct bkey_s_c k;
|
||||
struct bkey hole;
|
||||
struct bpos end = POS(ca->dev_idx, bucket_end);
|
||||
unsigned long last_updated = jiffies;
|
||||
int ret;
|
||||
|
||||
BUG_ON(bucket_start > bucket_end);
|
||||
BUG_ON(bucket_end > ca->mi.nbuckets);
|
||||
@ -706,71 +727,14 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
|
||||
* Scan the alloc btree for every bucket on @ca, and add buckets to the
|
||||
* freespace/need_discard/need_gc_gens btrees as needed:
|
||||
*/
|
||||
while (1) {
|
||||
while (bkey_lt(iter.pos, end)) {
|
||||
if (time_after(jiffies, last_updated + HZ * 10)) {
|
||||
bch_info(ca, "%s: currently at %llu/%llu",
|
||||
__func__, iter.pos.offset, ca->mi.nbuckets);
|
||||
last_updated = jiffies;
|
||||
}
|
||||
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
if (bkey_ge(iter.pos, end)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
k = bch2_get_key_or_hole(&iter, end, &hole);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto bkey_err;
|
||||
|
||||
if (k.k->type) {
|
||||
/*
|
||||
* We process live keys in the alloc btree one at a
|
||||
* time:
|
||||
*/
|
||||
struct bch_alloc_v4 a_convert;
|
||||
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
|
||||
|
||||
ret = bch2_bucket_do_index(trans, ca, k, a, true) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc);
|
||||
if (ret)
|
||||
goto bkey_err;
|
||||
|
||||
bch2_btree_iter_advance(&iter);
|
||||
} else {
|
||||
struct bkey_i *freespace;
|
||||
|
||||
freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
|
||||
ret = PTR_ERR_OR_ZERO(freespace);
|
||||
if (ret)
|
||||
goto bkey_err;
|
||||
|
||||
bkey_init(&freespace->k);
|
||||
freespace->k.type = KEY_TYPE_set;
|
||||
freespace->k.p = k.k->p;
|
||||
freespace->k.size = k.k->size;
|
||||
|
||||
ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc);
|
||||
if (ret)
|
||||
goto bkey_err;
|
||||
|
||||
bch2_btree_iter_set_pos(&iter, k.k->p);
|
||||
}
|
||||
bkey_err:
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
continue;
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
bch_err_msg(ca, ret, "initializing free space");
|
||||
return ret;
|
||||
try(lockrestart_do(trans, dev_freespace_init_iter(trans, ca, &iter, end)));
|
||||
}
|
||||
|
||||
scoped_guard(mutex, &c->sb_lock) {
|
||||
|
||||
@ -83,7 +83,6 @@ int bch2_lru_check_set(struct btree_trans *trans,
|
||||
struct wb_maybe_flush *last_flushed)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
int ret = 0;
|
||||
|
||||
CLASS(btree_iter, lru_iter)(trans, BTREE_ID_lru, lru_pos(lru_id, dev_bucket, time), 0);
|
||||
struct bkey_s_c lru_k = bkey_try(bch2_btree_iter_peek_slot(&lru_iter));
|
||||
@ -97,11 +96,11 @@ int bch2_lru_check_set(struct btree_trans *trans,
|
||||
prt_newline(&buf);
|
||||
bch2_bkey_val_to_text(&buf, c, referring_k);
|
||||
|
||||
if (fsck_err(trans, alloc_key_to_missing_lru_entry, "%s", buf.buf))
|
||||
if (ret_fsck_err(trans, alloc_key_to_missing_lru_entry, "%s", buf.buf))
|
||||
try(bch2_lru_set(trans, lru_id, dev_bucket, time));
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bbpos lru_pos_to_bp(struct bkey_s_c lru_k)
|
||||
@ -173,7 +172,6 @@ static int bch2_check_lru_key(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf1)();
|
||||
CLASS(printbuf, buf2)();
|
||||
int ret = 0;
|
||||
|
||||
struct bbpos bp = lru_pos_to_bp(lru_k);
|
||||
|
||||
@ -186,7 +184,7 @@ static int bch2_check_lru_key(struct btree_trans *trans,
|
||||
if (lru_pos_time(lru_k.k->p) != idx) {
|
||||
try(bch2_btree_write_buffer_maybe_flush(trans, lru_k, last_flushed));
|
||||
|
||||
if (fsck_err(trans, lru_entry_bad,
|
||||
if (ret_fsck_err(trans, lru_entry_bad,
|
||||
"incorrect lru entry: lru %s time %llu\n"
|
||||
"%s\n"
|
||||
"for %s",
|
||||
@ -196,8 +194,8 @@ static int bch2_check_lru_key(struct btree_trans *trans,
|
||||
(bch2_bkey_val_to_text(&buf2, c, k), buf2.buf)))
|
||||
return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false);
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_check_lrus(struct bch_fs *c)
|
||||
|
||||
@ -820,7 +820,6 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
struct bkey_i_alloc_v4 *a;
|
||||
struct bch_alloc_v4 old_gc, gc, old_convert, new;
|
||||
const struct bch_alloc_v4 *old;
|
||||
int ret;
|
||||
|
||||
if (!bucket_valid(ca, k.k->p.offset))
|
||||
return 0;
|
||||
@ -858,7 +857,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
gc_m->dirty_sectors = gc.dirty_sectors;
|
||||
}
|
||||
|
||||
if (fsck_err_on(new.data_type != gc.data_type,
|
||||
if (ret_fsck_err_on(new.data_type != gc.data_type,
|
||||
trans, alloc_key_data_type_wrong,
|
||||
"bucket %llu:%llu gen %u has wrong data_type"
|
||||
": got %s, should be %s",
|
||||
@ -869,7 +868,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
new.data_type = gc.data_type;
|
||||
|
||||
#define copy_bucket_field(_errtype, _f) \
|
||||
if (fsck_err_on(new._f != gc._f, \
|
||||
if (ret_fsck_err_on(new._f != gc._f, \
|
||||
trans, _errtype, \
|
||||
"bucket %llu:%llu gen %u data type %s has wrong " #_f \
|
||||
": got %llu, should be %llu", \
|
||||
@ -900,9 +899,8 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
|
||||
a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
|
||||
|
||||
ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun);
|
||||
fsck_err:
|
||||
return ret;
|
||||
try(bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_norun));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_gc_alloc_done(struct bch_fs *c)
|
||||
@ -935,21 +933,16 @@ static int bch2_gc_write_stripes_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
const struct bch_stripe *s;
|
||||
struct gc_stripe *m;
|
||||
bool bad = false;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
if (k.k->type != KEY_TYPE_stripe)
|
||||
return 0;
|
||||
|
||||
s = bkey_s_c_to_stripe(k).v;
|
||||
m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
|
||||
struct gc_stripe *m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
|
||||
|
||||
for (i = 0; i < s->nr_blocks; i++) {
|
||||
bool bad = false;
|
||||
for (unsigned i = 0; i < s->nr_blocks; i++) {
|
||||
u32 old = stripe_blockcount_get(s, i);
|
||||
u32 new = (m ? m->block_sectors[i] : 0);
|
||||
|
||||
@ -963,7 +956,7 @@ static int bch2_gc_write_stripes_key(struct btree_trans *trans,
|
||||
if (bad)
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
|
||||
if (fsck_err_on(bad,
|
||||
if (ret_fsck_err_on(bad,
|
||||
trans, stripe_sector_count_wrong,
|
||||
"%s", buf.buf)) {
|
||||
struct bkey_i_stripe *new =
|
||||
@ -971,13 +964,13 @@ static int bch2_gc_write_stripes_key(struct btree_trans *trans,
|
||||
|
||||
bkey_reassemble(&new->k_i, k);
|
||||
|
||||
for (i = 0; i < new->v.nr_blocks; i++)
|
||||
for (unsigned i = 0; i < new->v.nr_blocks; i++)
|
||||
stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
|
||||
|
||||
ret = bch2_trans_update(trans, iter, &new->k_i, 0);
|
||||
try(bch2_trans_update(trans, iter, &new->k_i, 0));
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_gc_stripes_done(struct bch_fs *c)
|
||||
|
||||
@ -186,12 +186,12 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
|
||||
size_t src_len = src->bi_iter.bi_size;
|
||||
size_t dst_len = crc.uncompressed_size << 9;
|
||||
void *workspace;
|
||||
int ret = 0, ret2;
|
||||
int ret2;
|
||||
|
||||
enum bch_compression_opts opt = bch2_compression_type_to_opt(crc.compression_type);
|
||||
mempool_t *workspace_pool = &c->compress_workspace[opt];
|
||||
if (unlikely(!mempool_initialized(workspace_pool))) {
|
||||
if (fsck_err(c, compression_type_not_marked_in_sb,
|
||||
if (ret_fsck_err(c, compression_type_not_marked_in_sb,
|
||||
"compression type %s set but not marked in superblock",
|
||||
__bch2_compression_types[crc.compression_type]))
|
||||
try(bch2_check_set_has_compressed_data(c, opt));
|
||||
@ -252,8 +252,8 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_bio_uncompress_inplace(struct bch_write_op *op,
|
||||
@ -428,7 +428,7 @@ static unsigned __bio_compress(struct bch_fs *c,
|
||||
|
||||
mempool_t *workspace_pool = &c->compress_workspace[compression.type];
|
||||
if (unlikely(!mempool_initialized(workspace_pool))) {
|
||||
if (fsck_err(c, compression_opt_not_marked_in_sb,
|
||||
if (ret_fsck_err(c, compression_opt_not_marked_in_sb,
|
||||
"compression opt %s set but not marked in superblock",
|
||||
bch2_compression_opts[compression.type])) {
|
||||
ret = bch2_check_set_has_compressed_data(c, compression.type);
|
||||
@ -512,8 +512,6 @@ static unsigned __bio_compress(struct bch_fs *c,
|
||||
BUG_ON(*dst_len & (block_bytes(c) - 1));
|
||||
BUG_ON(*src_len & (block_bytes(c) - 1));
|
||||
return compression_type;
|
||||
fsck_err:
|
||||
return BCH_COMPRESSION_TYPE_none;
|
||||
}
|
||||
|
||||
unsigned bch2_bio_compress(struct bch_fs *c,
|
||||
|
||||
@ -188,7 +188,6 @@ static int bch2_indirect_extent_missing_error(struct btree_trans *trans,
|
||||
u64 refd_start = live_start - le32_to_cpu(p.v->front_pad);
|
||||
u64 refd_end = live_end + le32_to_cpu(p.v->back_pad);
|
||||
CLASS(printbuf, buf)();
|
||||
int ret = 0;
|
||||
|
||||
BUG_ON(missing_start < refd_start);
|
||||
BUG_ON(missing_end > refd_end);
|
||||
@ -205,7 +204,7 @@ static int bch2_indirect_extent_missing_error(struct btree_trans *trans,
|
||||
prt_printf(&buf, "\nmissing reflink btree range %llu-%llu",
|
||||
missing_start, missing_end);
|
||||
|
||||
if (fsck_err(trans, reflink_p_to_missing_reflink_v, "%s", buf.buf)) {
|
||||
if (ret_fsck_err(trans, reflink_p_to_missing_reflink_v, "%s", buf.buf)) {
|
||||
struct bkey_i_reflink_p *new =
|
||||
errptr_try(bch2_bkey_make_mut_noupdate_typed(trans, p.s_c, reflink_p));
|
||||
|
||||
@ -239,11 +238,10 @@ static int bch2_indirect_extent_missing_error(struct btree_trans *trans,
|
||||
try(bch2_btree_insert_trans(trans, BTREE_ID_extents, &new->k_i, BTREE_TRIGGER_norun));
|
||||
|
||||
if (should_commit)
|
||||
ret = bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?:
|
||||
bch_err_throw(c, transaction_restart_nested);
|
||||
try(bch2_trans_commit_lazy(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc));
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -723,7 +721,6 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
|
||||
const __le64 *refcount = bkey_refcount_c(k);
|
||||
CLASS(printbuf, buf)();
|
||||
struct reflink_gc *r;
|
||||
int ret = 0;
|
||||
|
||||
if (!refcount)
|
||||
return 0;
|
||||
@ -739,7 +736,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (fsck_err_on(r->refcount != le64_to_cpu(*refcount),
|
||||
if (ret_fsck_err_on(r->refcount != le64_to_cpu(*refcount),
|
||||
trans, reflink_v_refcount_wrong,
|
||||
"reflink key has wrong refcount:\n"
|
||||
"%s\n"
|
||||
@ -752,10 +749,10 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
|
||||
new->k.type = KEY_TYPE_deleted;
|
||||
else
|
||||
*bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
|
||||
ret = bch2_trans_update(trans, iter, new, 0);
|
||||
try(bch2_trans_update(trans, iter, new, 0));
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_gc_reflink_done(struct bch_fs *c)
|
||||
|
||||
@ -153,6 +153,34 @@ void bch2_free_fsck_errs(struct bch_fs *);
|
||||
_ret; \
|
||||
})
|
||||
|
||||
#define ret_fsck_err_wrap(_do) \
|
||||
({ \
|
||||
int _ret = _do; \
|
||||
if (!bch2_err_matches(_ret, BCH_ERR_fsck_fix) && \
|
||||
!bch2_err_matches(_ret, BCH_ERR_fsck_ignore)) \
|
||||
return _ret; \
|
||||
\
|
||||
bch2_err_matches(_ret, BCH_ERR_fsck_fix); \
|
||||
})
|
||||
|
||||
#define __ret_fsck_err(...) ret_fsck_err_wrap(bch2_fsck_err(__VA_ARGS__))
|
||||
|
||||
#define __ret_fsck_err_on(cond, c, _flags, _err_type, ...) \
|
||||
({ \
|
||||
might_sleep(); \
|
||||
\
|
||||
if (type_is(c, struct bch_fs *)) \
|
||||
WARN_ON(bch2_current_has_btree_trans((struct bch_fs *) c));\
|
||||
\
|
||||
(unlikely(cond) ? __ret_fsck_err(c, _flags, _err_type, __VA_ARGS__) : false);\
|
||||
})
|
||||
|
||||
#define ret_fsck_err(c, _err_type, ...) \
|
||||
__ret_fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__)
|
||||
|
||||
#define ret_fsck_err_on(cond, c, _err_type, ...) \
|
||||
__ret_fsck_err_on(cond, c, FSCK_CAN_FIX|FSCK_CAN_IGNORE, _err_type, __VA_ARGS__)
|
||||
|
||||
enum bch_validate_flags;
|
||||
__printf(5, 6)
|
||||
int __bch2_bkey_fsck_err(struct bch_fs *,
|
||||
|
||||
@ -696,6 +696,7 @@ static unsigned live_child(struct bch_fs *c, u32 start)
|
||||
id && id != start;
|
||||
id = bch2_snapshot_tree_next(t, id))
|
||||
if (bch2_snapshot_is_leaf(c, id) &&
|
||||
bch2_snapshot_exists(c, id) &&
|
||||
!snapshot_list_has_id(&d->delete_leaves, id) &&
|
||||
!interior_delete_has_id(&d->delete_interior, id))
|
||||
return id;
|
||||
@ -896,10 +897,9 @@ static int check_should_delete_snapshot(struct btree_trans *trans, struct bkey_s
|
||||
struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(k);
|
||||
unsigned live_children = 0;
|
||||
|
||||
if (BCH_SNAPSHOT_SUBVOL(s.v))
|
||||
return 0;
|
||||
|
||||
if (BCH_SNAPSHOT_DELETED(s.v))
|
||||
if (BCH_SNAPSHOT_SUBVOL(s.v) ||
|
||||
BCH_SNAPSHOT_NO_KEYS(s.v) ||
|
||||
BCH_SNAPSHOT_DELETED(s.v))
|
||||
return 0;
|
||||
|
||||
guard(mutex)(&d->progress_lock);
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user