Update bcachefs sources to ca748d1945bf bcachefs: moving_ctxt_flush_all() between reconcile iters

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2025-11-17 09:02:09 -05:00
parent 070b9ab882
commit 9e9942f9cf
23 changed files with 267 additions and 240 deletions

View File

@ -1 +1 @@
f595b42bf8eae730a95de7636238556ef9e86cee ca748d1945bfa1208b1d32e5a246a352b09ad271

View File

@ -103,6 +103,8 @@ do { \
TASK_UNINTERRUPTIBLE, 0, timeout, \ TASK_UNINTERRUPTIBLE, 0, timeout, \
__ret = schedule_timeout(__ret)) __ret = schedule_timeout(__ret))
#define wait_event_freezable_timeout(wq, condition, timeout) wait_event_timeout(wq, condition, timeout)
#define wait_event_timeout(wq, condition, timeout) \ #define wait_event_timeout(wq, condition, timeout) \
({ \ ({ \
long __ret = timeout; \ long __ret = timeout; \

View File

@ -72,6 +72,12 @@ static const char * const disk_accounting_type_strs[] = {
NULL NULL
}; };
static const unsigned bch2_accounting_type_nr_counters[] = {
#define x(f, id, nr) [BCH_DISK_ACCOUNTING_##f] = nr,
BCH_DISK_ACCOUNTING_TYPES()
#undef x
};
static inline void __accounting_key_init(struct bkey_i *k, struct bpos pos, static inline void __accounting_key_init(struct bkey_i *k, struct bpos pos,
s64 *d, unsigned nr) s64 *d, unsigned nr)
{ {
@ -97,6 +103,9 @@ int bch2_disk_accounting_mod(struct btree_trans *trans,
{ {
BUG_ON(nr > BCH_ACCOUNTING_MAX_COUNTERS); BUG_ON(nr > BCH_ACCOUNTING_MAX_COUNTERS);
BUG_ON(k->type >= BCH_DISK_ACCOUNTING_TYPE_NR);
EBUG_ON(nr != bch2_accounting_type_nr_counters[k->type]);
/* Normalize: */ /* Normalize: */
switch (k->type) { switch (k->type) {
case BCH_DISK_ACCOUNTING_replicas: case BCH_DISK_ACCOUNTING_replicas:
@ -171,12 +180,6 @@ static inline bool is_zero(char *start, char *end)
#define field_end(p, member) (((void *) (&p.member)) + sizeof(p.member)) #define field_end(p, member) (((void *) (&p.member)) + sizeof(p.member))
static const unsigned bch2_accounting_type_nr_counters[] = {
#define x(f, id, nr) [BCH_DISK_ACCOUNTING_##f] = nr,
BCH_DISK_ACCOUNTING_TYPES()
#undef x
};
int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k, int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k,
struct bkey_validate_context from) struct bkey_validate_context from)
{ {

View File

@ -111,7 +111,7 @@ static inline bool data_type_is_hidden(enum bch_data_type type)
x(btree, 6, 3) \ x(btree, 6, 3) \
x(rebalance_work, 7, 1) \ x(rebalance_work, 7, 1) \
x(inum, 8, 3) \ x(inum, 8, 3) \
x(reconcile_work, 9, 1) \ x(reconcile_work, 9, 2) \
x(dev_leaving, 10, 1) x(dev_leaving, 10, 1)
enum disk_accounting_type { enum disk_accounting_type {

View File

@ -899,6 +899,8 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b; struct btree *b;
EBUG_ON(path && level + 1 != path->level);
if (unlikely(level >= BTREE_MAX_DEPTH)) { if (unlikely(level >= BTREE_MAX_DEPTH)) {
int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u", int ret = bch2_fs_topology_error(c, "attempting to get btree node at level %u, >= max depth %u",
level, BTREE_MAX_DEPTH); level, BTREE_MAX_DEPTH);
@ -925,9 +927,10 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
* Parent node must be locked, else we could read in a btree node that's * Parent node must be locked, else we could read in a btree node that's
* been freed: * been freed:
*/ */
if (path && !bch2_btree_node_relock(trans, path, level + 1)) { if (path) {
trace_and_count(c, trans_restart_relock_parent_for_fill, trans, _THIS_IP_, path); int ret = bch2_btree_path_relock(trans, path, _THIS_IP_);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_relock)); if (ret)
return ERR_PTR(ret);
} }
b = bch2_btree_node_mem_alloc(trans, level != 0); b = bch2_btree_node_mem_alloc(trans, level != 0);
@ -972,7 +975,8 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
bch2_btree_node_read(trans, b, sync); bch2_btree_node_read(trans, b, sync);
int ret = bch2_trans_relock(trans); int ret = bch2_trans_relock(trans) ?:
bch2_btree_path_relock(trans, path, _THIS_IP_);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
@ -1032,7 +1036,6 @@ static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btr
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_cache *bc = &c->btree_cache; struct btree_cache *bc = &c->btree_cache;
struct btree *b; struct btree *b;
bool need_relock = false;
int ret; int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(level >= BTREE_MAX_DEPTH);
@ -1046,7 +1049,6 @@ retry:
*/ */
b = bch2_btree_node_fill(trans, path, k, path->btree_id, b = bch2_btree_node_fill(trans, path, k, path->btree_id,
level, lock_type, true); level, lock_type, true);
need_relock = true;
/* We raced and found the btree node in the cache */ /* We raced and found the btree node in the cache */
if (!b) if (!b)
@ -1085,11 +1087,11 @@ retry:
six_unlock_type(&b->c.lock, lock_type); six_unlock_type(&b->c.lock, lock_type);
bch2_trans_unlock(trans); bch2_trans_unlock(trans);
need_relock = true;
bch2_btree_node_wait_on_read(b); bch2_btree_node_wait_on_read(b);
ret = bch2_trans_relock(trans); ret = bch2_trans_relock(trans) ?:
bch2_btree_path_relock(trans, path, _THIS_IP_);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
@ -1101,15 +1103,6 @@ retry:
goto retry; goto retry;
} }
if (unlikely(need_relock)) {
ret = bch2_trans_relock(trans) ?:
bch2_btree_path_relock_intent(trans, path);
if (ret) {
six_unlock_type(&b->c.lock, lock_type);
return ERR_PTR(ret);
}
}
prefetch(b->aux_data); prefetch(b->aux_data);
for_each_bset(b, t) { for_each_bset(b, t) {
@ -1158,6 +1151,7 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
int ret; int ret;
EBUG_ON(level >= BTREE_MAX_DEPTH); EBUG_ON(level >= BTREE_MAX_DEPTH);
EBUG_ON(level + 1 != path->level);
b = btree_node_mem_ptr(k); b = btree_node_mem_ptr(k);

View File

@ -1200,6 +1200,69 @@ void bch2_gc_gens_async(struct bch_fs *c)
enumerated_ref_put(&c->writes, BCH_WRITE_REF_gc_gens); enumerated_ref_put(&c->writes, BCH_WRITE_REF_gc_gens);
} }
static int merge_btree_node_one(struct btree_trans *trans,
struct progress_indicator *progress,
struct btree_iter *iter,
u64 *merge_count)
{
try(bch2_btree_iter_traverse(iter));
struct btree_path *path = btree_iter_path(trans, iter);
struct btree *b = path->l[path->level].b;
if (!b)
return 1;
try(bch2_progress_update_iter(trans, progress, iter, "merge_btree_nodes"));
if (!btree_node_needs_merge(trans, b, 0)) {
if (bpos_eq(b->key.k.p, SPOS_MAX))
return 1;
bch2_btree_iter_set_pos(iter, bpos_successor(b->key.k.p));
return 0;
}
try(bch2_btree_path_upgrade(trans, path, path->level + 1));
try(bch2_foreground_maybe_merge(trans, iter->path, path->level, 0, 0, merge_count));
return 0;
}
int bch2_merge_btree_nodes(struct bch_fs *c)
{
struct progress_indicator progress;
bch2_progress_init_inner(&progress, c, ~0ULL, ~0ULL);
CLASS(btree_trans, trans)(c);
for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
u64 merge_count = 0;
for (unsigned level = 0; level < BTREE_MAX_DEPTH; level++) {
CLASS(btree_node_iter, iter)(trans, i, POS_MIN, 0, level, BTREE_ITER_prefetch);
while (true) {
int ret = lockrestart_do(trans, merge_btree_node_one(trans, &progress,
&iter, &merge_count));
if (ret < 0)
return ret;
if (ret)
break;
}
}
if (merge_count) {
CLASS(printbuf, buf)();
prt_printf(&buf, "merge_btree_nodes: %llu merges in ", merge_count);
bch2_btree_id_to_text(&buf, i);
prt_str(&buf, " btree");
bch_info(c, "%s", buf.buf);
}
}
return 0;
}
void bch2_fs_btree_gc_init_early(struct bch_fs *c) void bch2_fs_btree_gc_init_early(struct bch_fs *c)
{ {
seqcount_init(&c->gc_pos_lock); seqcount_init(&c->gc_pos_lock);

View File

@ -83,6 +83,8 @@ void bch2_gc_pos_to_text(struct printbuf *, struct gc_pos *);
int bch2_gc_gens(struct bch_fs *); int bch2_gc_gens(struct bch_fs *);
void bch2_gc_gens_async(struct bch_fs *); void bch2_gc_gens_async(struct bch_fs *);
int bch2_merge_btree_nodes(struct bch_fs *c);
void bch2_fs_btree_gc_init_early(struct bch_fs *); void bch2_fs_btree_gc_init_early(struct bch_fs *);
#endif /* _BCACHEFS_BTREE_GC_H */ #endif /* _BCACHEFS_BTREE_GC_H */

View File

@ -821,7 +821,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
unsigned long trace_ip) unsigned long trace_ip)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
int u64s_delta = 0; int u64s_delta = 0;
for (unsigned idx = 0; idx < trans->nr_updates; idx++) { for (unsigned idx = 0; idx < trans->nr_updates; idx++) {
struct btree_insert_entry *i = trans->updates + idx; struct btree_insert_entry *i = trans->updates + idx;
@ -832,8 +832,8 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
u64s_delta -= i->old_btree_u64s; u64s_delta -= i->old_btree_u64s;
if (!same_leaf_as_next(trans, i)) { if (!same_leaf_as_next(trans, i)) {
if (u64s_delta <= 0) try(bch2_foreground_maybe_merge(trans, i->path, i->level,
try(bch2_foreground_maybe_merge(trans, i->path, i->level, flags)); flags, u64s_delta, NULL));
u64s_delta = 0; u64s_delta = 0;
} }
@ -909,8 +909,13 @@ static int __bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip); trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], true); track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], true);
wait_event_freezable(c->journal.reclaim_wait, if (!wait_event_freezable_timeout(c->journal.reclaim_wait,
(ret = journal_reclaim_wait_done(c))); (ret = journal_reclaim_wait_done(c)),
HZ)) {
bch2_trans_unlock_long(trans);
wait_event_freezable(c->journal.reclaim_wait,
(ret = journal_reclaim_wait_done(c)));
}
track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], false); track_event_change(&c->times[BCH_TIME_blocked_key_cache_flush], false);

View File

@ -1879,7 +1879,7 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
prt_printf(&buf, "%s(): node not locked at level %u\n", prt_printf(&buf, "%s(): node not locked at level %u\n",
__func__, b->c.level); __func__, b->c.level);
bch2_btree_update_to_text(&buf, as); bch2_btree_update_to_text(&buf, as);
bch2_btree_path_to_text(&buf, trans, path_idx); bch2_btree_path_to_text(&buf, trans, path_idx, path);
bch2_fs_emergency_read_only2(c, &buf); bch2_fs_emergency_read_only2(c, &buf);
bch2_print_str(c, KERN_ERR, buf.buf); bch2_print_str(c, KERN_ERR, buf.buf);
@ -1963,7 +1963,7 @@ int bch2_btree_split_leaf(struct btree_trans *trans,
for (l = trans->paths[path].level + 1; for (l = trans->paths[path].level + 1;
btree_node_intent_locked(&trans->paths[path], l) && !ret; btree_node_intent_locked(&trans->paths[path], l) && !ret;
l++) l++)
ret = bch2_foreground_maybe_merge(trans, path, l, flags); ret = bch2_foreground_maybe_merge(trans, path, l, flags, 0, NULL);
return ret; return ret;
} }
@ -2032,6 +2032,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
btree_path_idx_t path, btree_path_idx_t path,
unsigned level, unsigned level,
unsigned flags, unsigned flags,
u64 *merge_count,
enum btree_node_sibling sib) enum btree_node_sibling sib)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
@ -2214,6 +2215,9 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
bch2_btree_update_done(as, trans); bch2_btree_update_done(as, trans);
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_merge], start_time); bch2_time_stats_update(&c->times[BCH_TIME_btree_node_merge], start_time);
if (merge_count)
(*merge_count)++;
out: out:
err: err:
if (new_path) if (new_path)

View File

@ -131,39 +131,35 @@ int bch2_btree_split_leaf(struct btree_trans *, btree_path_idx_t, unsigned);
int bch2_btree_increase_depth(struct btree_trans *, btree_path_idx_t, unsigned); int bch2_btree_increase_depth(struct btree_trans *, btree_path_idx_t, unsigned);
int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t, int __bch2_foreground_maybe_merge(struct btree_trans *, btree_path_idx_t,
unsigned, unsigned, enum btree_node_sibling); unsigned, unsigned, u64 *, enum btree_node_sibling);
static inline int bch2_foreground_maybe_merge_sibling(struct btree_trans *trans, static inline bool btree_node_needs_merge(struct btree_trans *trans, struct btree *b, int d)
btree_path_idx_t path_idx,
unsigned level, unsigned flags,
enum btree_node_sibling sib)
{ {
struct btree_path *path = trans->paths + path_idx;
struct btree *b;
EBUG_ON(!btree_node_locked(path, level));
if (static_branch_unlikely(&bch2_btree_node_merging_disabled)) if (static_branch_unlikely(&bch2_btree_node_merging_disabled))
return 0; return false;
b = path->l[level].b; return (int) min(b->sib_u64s[0], b->sib_u64s[1]) + d <=
if (b->sib_u64s[sib] > trans->c->btree_foreground_merge_threshold) (int) trans->c->btree_foreground_merge_threshold;
return 0;
return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, sib);
} }
static inline int bch2_foreground_maybe_merge(struct btree_trans *trans, static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
btree_path_idx_t path, btree_path_idx_t path_idx,
unsigned level, unsigned level, unsigned flags,
unsigned flags) int u64s_delta,
u64 *merge_count)
{ {
bch2_trans_verify_not_unlocked_or_in_restart(trans); bch2_trans_verify_not_unlocked_or_in_restart(trans);
return bch2_foreground_maybe_merge_sibling(trans, path, level, flags, struct btree_path *path = trans->paths + path_idx;
btree_prev_sib) ?: struct btree *b = path->l[level].b;
bch2_foreground_maybe_merge_sibling(trans, path, level, flags,
btree_next_sib); EBUG_ON(!btree_node_locked(path, level));
if (likely(!btree_node_needs_merge(trans, b, u64s_delta)))
return 0;
return __bch2_foreground_maybe_merge(trans, path_idx, level, flags, merge_count, btree_prev_sib) ?:
__bch2_foreground_maybe_merge(trans, path_idx, level, flags, merge_count, btree_next_sib);
} }
int bch2_btree_node_get_iter(struct btree_trans *, struct btree_iter *, struct btree *); int bch2_btree_node_get_iter(struct btree_trans *, struct btree_iter *, struct btree *);

View File

@ -31,6 +31,8 @@
static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *); static inline void btree_path_list_remove(struct btree_trans *, struct btree_path *);
static inline void btree_path_list_add(struct btree_trans *, static inline void btree_path_list_add(struct btree_trans *,
btree_path_idx_t, btree_path_idx_t); btree_path_idx_t, btree_path_idx_t);
static void bch2_btree_path_to_text_short(struct printbuf *, struct btree_trans *,
btree_path_idx_t, struct btree_path *);
static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter) static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
{ {
@ -820,34 +822,27 @@ static int btree_path_prefetch(struct btree_trans *trans, struct btree_path *pat
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_path_level *l = path_l(path); struct btree_path_level *l = path_l(path);
struct btree_node_iter node_iter = l->iter; struct btree_node_iter node_iter = l->iter;
struct bkey_packed *k;
unsigned nr = test_bit(BCH_FS_started, &c->flags) unsigned nr = test_bit(BCH_FS_started, &c->flags)
? (path->level > 1 ? 0 : 2) ? (path->level > 1 ? 0 : 2)
: (path->level > 1 ? 1 : 16); : (path->level > 1 ? 1 : 16);
bool was_locked = btree_node_locked(path, path->level);
int ret = 0;
struct bkey_buf tmp __cleanup(bch2_bkey_buf_exit); struct bkey_buf tmp __cleanup(bch2_bkey_buf_exit);
bch2_bkey_buf_init(&tmp); bch2_bkey_buf_init(&tmp);
while (nr-- && !ret) { while (nr--) {
if (!bch2_btree_node_relock(trans, path, path->level)) BUG_ON(!btree_node_locked(path, path->level));
break;
bch2_btree_node_iter_advance(&node_iter, l->b); bch2_btree_node_iter_advance(&node_iter, l->b);
k = bch2_btree_node_iter_peek(&node_iter, l->b); struct bkey_packed *k = bch2_btree_node_iter_peek(&node_iter, l->b);
if (!k) if (!k)
break; break;
bch2_bkey_buf_unpack(&tmp, l->b, k); bch2_bkey_buf_unpack(&tmp, l->b, k);
ret = bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id, try(bch2_btree_node_prefetch(trans, path, tmp.k, path->btree_id,
path->level - 1); path->level - 1));
} }
if (!was_locked) return 0;
btree_node_unlock(trans, path, path->level);
return ret;
} }
static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path, static int btree_path_prefetch_j(struct btree_trans *trans, struct btree_path *path,
@ -1312,18 +1307,15 @@ btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
return path; return path;
} }
btree_path_idx_t __must_check static btree_path_idx_t path_set_pos_trace(struct btree_trans *trans,
__bch2_btree_path_set_pos(struct btree_trans *trans, btree_path_idx_t path_idx, struct bpos new_pos,
btree_path_idx_t path_idx, struct bpos new_pos, bool intent, unsigned long ip)
bool intent, unsigned long ip)
{ {
int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos); int cmp = bpos_cmp(new_pos, trans->paths[path_idx].pos);
bch2_trans_verify_not_unlocked_or_in_restart(trans); bch2_trans_verify_not_unlocked_or_in_restart(trans);
EBUG_ON(!trans->paths[path_idx].ref); EBUG_ON(!trans->paths[path_idx].ref);
trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip); path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
struct btree_path *path = trans->paths + path_idx; struct btree_path *path = trans->paths + path_idx;
@ -1369,6 +1361,33 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
return path_idx; return path_idx;
} }
btree_path_idx_t __must_check
__bch2_btree_path_set_pos(struct btree_trans *trans,
btree_path_idx_t path_idx, struct bpos new_pos,
bool intent, unsigned long ip)
{
if (!trace_btree_path_set_pos_enabled()) {
return path_set_pos_trace(trans, path_idx, new_pos, intent, ip);
} else {
CLASS(printbuf, buf)();
guard(printbuf_indent_nextline)(&buf);
prt_newline(&buf);
bch2_btree_path_to_text(&buf, trans, path_idx, trans->paths + path_idx);
path_idx = path_set_pos_trace(trans, path_idx, new_pos, intent, ip);
prt_newline(&buf);
bch2_btree_path_to_text(&buf, trans, path_idx, trans->paths + path_idx);
prt_newline(&buf);
trace_btree_path_set_pos(trans, ip, buf.buf);
return path_idx;
}
}
/* Btree path: main interface: */ /* Btree path: main interface: */
static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path) static struct btree_path *have_path_at_pos(struct btree_trans *trans, struct btree_path *path)
@ -1539,10 +1558,9 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
} }
} }
static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx) static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_trans *trans,
btree_path_idx_t path_idx, struct btree_path *path)
{ {
struct btree_path *path = trans->paths + path_idx;
prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ", prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ",
path_idx, path->ref, path->intent_ref, path_idx, path->ref, path->intent_ref,
path->preserve ? 'P' : ' ', path->preserve ? 'P' : ' ',
@ -1581,14 +1599,14 @@ static const char *btree_node_locked_str(enum btree_node_locked_type t)
} }
} }
void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans, btree_path_idx_t path_idx) void bch2_btree_path_to_text(struct printbuf *out, struct btree_trans *trans,
btree_path_idx_t path_idx, struct btree_path *path)
{ {
bch2_btree_path_to_text_short(out, trans, path_idx); bch2_btree_path_to_text_short(out, trans, path_idx, path);
struct btree_path *path = trans->paths + path_idx;
prt_printf(out, " uptodate %u locks_want %u", path->uptodate, path->locks_want);
prt_newline(out); prt_newline(out);
prt_newline(out);
prt_printf(out, " ptodate %u locks_want %u", path->uptodate, path->locks_want);
guard(printbuf_indent)(out); guard(printbuf_indent)(out);
for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) { for (unsigned l = 0; l < BTREE_MAX_DEPTH; l++) {
@ -1615,7 +1633,7 @@ void __bch2_trans_paths_to_text(struct printbuf *out, struct btree_trans *trans,
btree_trans_sort_paths(trans); btree_trans_sort_paths(trans);
trans_for_each_path_idx_inorder(trans, iter) { trans_for_each_path_idx_inorder(trans, iter) {
bch2_btree_path_to_text_short(out, trans, iter.path_idx); bch2_btree_path_to_text_short(out, trans, iter.path_idx, trans->paths + iter.path_idx);
prt_newline(out); prt_newline(out);
} }
} }

View File

@ -6,7 +6,8 @@
#include "btree/types.h" #include "btree/types.h"
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *); void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t); void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *,
btree_path_idx_t, struct btree_path *);
void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *); void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
void bch2_dump_trans_paths_updates(struct btree_trans *); void bch2_dump_trans_paths_updates(struct btree_trans *);
@ -550,9 +551,9 @@ static inline void __bch2_trans_iter_init(struct btree_trans *trans,
__builtin_constant_p(flags)) __builtin_constant_p(flags))
bch2_trans_iter_init_common(trans, iter, btree, pos, 0, 0, bch2_trans_iter_init_common(trans, iter, btree, pos, 0, 0,
bch2_btree_iter_flags(trans, btree, 0, flags), bch2_btree_iter_flags(trans, btree, 0, flags),
_RET_IP_); _THIS_IP_);
else else
bch2_trans_iter_init_outlined(trans, iter, btree, pos, flags, _RET_IP_); bch2_trans_iter_init_outlined(trans, iter, btree, pos, flags, _THIS_IP_);
} }
static inline void bch2_trans_iter_init(struct btree_trans *trans, static inline void bch2_trans_iter_init(struct btree_trans *trans,
@ -564,6 +565,13 @@ static inline void bch2_trans_iter_init(struct btree_trans *trans,
__bch2_trans_iter_init(trans, iter, btree, pos, flags); __bch2_trans_iter_init(trans, iter, btree, pos, flags);
} }
#define DEFINE_CLASS2(_name, _type, _exit, _init, _init_args...) \
typedef _type class_##_name##_t; \
static __always_inline void class_##_name##_destructor(_type *p) \
{ _type _T = *p; _exit; } \
static __always_inline _type class_##_name##_constructor(_init_args) \
{ _type t = _init; return t; }
#define bch2_trans_iter_class_init(_trans, _btree, _pos, _flags) \ #define bch2_trans_iter_class_init(_trans, _btree, _pos, _flags) \
({ \ ({ \
struct btree_iter iter; \ struct btree_iter iter; \
@ -571,7 +579,7 @@ static inline void bch2_trans_iter_init(struct btree_trans *trans,
iter; \ iter; \
}) })
DEFINE_CLASS(btree_iter, struct btree_iter, DEFINE_CLASS2(btree_iter, struct btree_iter,
bch2_trans_iter_exit(&_T), bch2_trans_iter_exit(&_T),
bch2_trans_iter_class_init(trans, btree, pos, flags), bch2_trans_iter_class_init(trans, btree, pos, flags),
struct btree_trans *trans, struct btree_trans *trans,

View File

@ -773,41 +773,6 @@ static inline void __bch2_trans_unlock(struct btree_trans *trans)
__bch2_btree_path_unlock(trans, path); __bch2_btree_path_unlock(trans, path);
} }
static noinline __cold void bch2_trans_relock_fail(struct btree_trans *trans, struct btree_path *path,
struct get_locks_fail *f, bool trace, ulong ip)
{
if (!trace)
goto out;
if (trace_trans_restart_relock_enabled()) {
CLASS(printbuf, buf)();
bch2_bpos_to_text(&buf, path->pos);
prt_printf(&buf, " %s l=%u seq=%u node seq=",
bch2_btree_id_str(path->btree_id),
f->l, path->l[f->l].lock_seq);
if (IS_ERR_OR_NULL(f->b)) {
prt_str(&buf, bch2_err_str(PTR_ERR(f->b)));
} else {
prt_printf(&buf, "%u", f->b->c.lock.seq);
struct six_lock_count c =
bch2_btree_node_lock_counts(trans, NULL, &f->b->c, f->l);
prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
c = six_lock_counts(&f->b->c.lock);
prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
}
trace_trans_restart_relock(trans, ip, buf.buf);
}
count_event(trans->c, trans_restart_relock);
out:
__bch2_trans_unlock(trans);
bch2_trans_verify_locks(trans);
}
static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace, ulong ip) static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace, ulong ip)
{ {
bch2_trans_verify_locks(trans); bch2_trans_verify_locks(trans);
@ -821,14 +786,54 @@ static inline int __bch2_trans_relock(struct btree_trans *trans, bool trace, ulo
unsigned i; unsigned i;
trans_for_each_path(trans, path, i) { trans_for_each_path(trans, path, i) {
struct get_locks_fail f; if (!path->should_be_locked)
int ret; continue;
if (path->should_be_locked && if (likely(!trace_trans_restart_relock_enabled() || !trace)) {
(ret = btree_path_get_locks(trans, path, false, &f, int ret = btree_path_get_locks(trans, path, false, NULL,
BCH_ERR_transaction_restart_relock))) { BCH_ERR_transaction_restart_relock);
bch2_trans_relock_fail(trans, path, &f, trace, ip); if (ret) {
return ret; if (trace)
count_event(trans->c, trans_restart_relock);
__bch2_trans_unlock(trans);
bch2_trans_verify_locks(trans);
return ret;
}
} else {
struct get_locks_fail f;
struct btree_path old_path = *path;
int ret = btree_path_get_locks(trans, path, false, &f,
BCH_ERR_transaction_restart_relock);
if (ret) {
CLASS(printbuf, buf)();
guard(printbuf_indent)(&buf);
bch2_bpos_to_text(&buf, path->pos);
prt_printf(&buf, " %s l=%u seq=%u node seq=",
bch2_btree_id_str(path->btree_id),
f.l, path->l[f.l].lock_seq);
if (IS_ERR_OR_NULL(f.b)) {
prt_str(&buf, bch2_err_str(PTR_ERR(f.b)));
} else {
prt_printf(&buf, "%u", f.b->c.lock.seq);
struct six_lock_count c =
bch2_btree_node_lock_counts(trans, NULL, &f.b->c, f.l);
prt_printf(&buf, " self locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
c = six_lock_counts(&f.b->c.lock);
prt_printf(&buf, " total locked %u.%u.%u", c.n[0], c.n[1], c.n[2]);
}
prt_newline(&buf);
bch2_btree_path_to_text(&buf, trans, path - trans->paths, &old_path);
trace_trans_restart_relock(trans, ip, buf.buf);
count_event(trans->c, trans_restart_relock);
__bch2_trans_unlock(trans);
bch2_trans_verify_locks(trans);
return ret;
}
} }
} }

View File

@ -388,7 +388,8 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
BCH_WATERMARK_reclaim| BCH_WATERMARK_reclaim|
BCH_TRANS_COMMIT_journal_reclaim| BCH_TRANS_COMMIT_journal_reclaim|
BCH_TRANS_COMMIT_no_check_rw| BCH_TRANS_COMMIT_no_check_rw|
BCH_TRANS_COMMIT_no_enospc)); BCH_TRANS_COMMIT_no_enospc,
0, NULL));
if (ret) if (ret)
goto err; goto err;
} }

View File

@ -524,16 +524,17 @@ int __bch2_trigger_extent_reconcile(struct btree_trans *trans,
unsigned delta = old.k->size == new.k->size unsigned delta = old.k->size == new.k->size
? old_a ^ new_a ? old_a ^ new_a
: old_a | new_a; : old_a | new_a;
bool metadata = level != 0;
while (delta) { while (delta) {
unsigned c = __ffs(delta); unsigned c = __ffs(delta);
delta ^= BIT(c); delta ^= BIT(c);
s64 v[1] = { 0 }; s64 v[2] = { 0, 0 };
if (old_a & BIT(c)) if (old_a & BIT(c))
v[0] -= (s64) old.k->size; v[metadata] -= (s64) old.k->size;
if (new_a & BIT(c)) if (new_a & BIT(c))
v[0] += (s64) new.k->size; v[metadata] += (s64) new.k->size;
try(bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, v, reconcile_work, c)); try(bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc, v, reconcile_work, c));
} }
@ -1713,6 +1714,7 @@ static int do_reconcile(struct moving_context *ctxt)
struct bkey_i_cookie pending_cookie; struct bkey_i_cookie pending_cookie;
bkey_init(&pending_cookie.k); bkey_init(&pending_cookie.k);
bch2_moving_ctxt_flush_all(ctxt);
bch2_btree_write_buffer_flush_sync(trans); bch2_btree_write_buffer_flush_sync(trans);
while (!bch2_move_ratelimit(ctxt)) { while (!bch2_move_ratelimit(ctxt)) {
@ -2105,7 +2107,7 @@ static int check_reconcile_work_data_btree(struct btree_trans *trans,
while (true) { while (true) {
bch2_disk_reservation_put(c, &res.r); bch2_disk_reservation_put(c, &res.r);
try(progress_update_iter(trans, progress, &data_iter)); try(bch2_progress_update_iter(trans, progress, &data_iter, "check_reconcile_work"));
try(commit_do(trans, &res.r, NULL, BCH_TRANS_COMMIT_no_enospc, try(commit_do(trans, &res.r, NULL, BCH_TRANS_COMMIT_no_enospc,
check_reconcile_work_one(trans, &data_iter, rb_w, rb_h, rb_p, check_reconcile_work_one(trans, &data_iter, rb_w, rb_h, rb_p,
snapshot_io_opts, last_flushed, &cur_pos))); snapshot_io_opts, last_flushed, &cur_pos)));

View File

@ -66,7 +66,7 @@ DECLARE_EVENT_CLASS(trans_str,
__assign_str(str); __assign_str(str);
), ),
TP_printk("%d,%d %s %pS %s", TP_printk("%d,%d %s %pS\n%s",
MAJOR(__entry->dev), MINOR(__entry->dev), MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->trans_fn, (void *) __entry->caller_ip, __get_str(str)) __entry->trans_fn, (void *) __entry->caller_ip, __get_str(str))
); );
@ -1105,13 +1105,6 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_next_node,
TP_ARGS(trans, caller_ip, path) TP_ARGS(trans, caller_ip, path)
); );
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_parent_for_fill,
TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip,
struct btree_path *path),
TP_ARGS(trans, caller_ip, path)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill, DEFINE_EVENT(transaction_restart_iter, trans_restart_relock_key_cache_fill,
TP_PROTO(struct btree_trans *trans, TP_PROTO(struct btree_trans *trans,
unsigned long caller_ip, unsigned long caller_ip,
@ -1667,78 +1660,9 @@ DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
TP_ARGS(trans, path) TP_ARGS(trans, path)
); );
TRACE_EVENT(btree_path_set_pos, DEFINE_EVENT(trans_str, btree_path_set_pos,
TP_PROTO(struct btree_trans *trans, TP_PROTO(struct btree_trans *trans, unsigned long caller_ip, const char *str),
struct btree_path *path, TP_ARGS(trans, caller_ip, str)
struct bpos *new_pos),
TP_ARGS(trans, path, new_pos),
TP_STRUCT__entry(
__field(btree_path_idx_t, idx )
__field(u8, ref )
__field(u8, preserve )
__field(u8, btree_id )
TRACE_BPOS_entries(old_pos)
TRACE_BPOS_entries(new_pos)
__field(u8, locks_want )
__field(u8, nodes_locked )
__array(char, node0, 24 )
__array(char, node1, 24 )
__array(char, node2, 24 )
__array(char, node3, 24 )
),
TP_fast_assign(
__entry->idx = path - trans->paths;
__entry->ref = path->ref;
__entry->preserve = path->preserve;
__entry->btree_id = path->btree_id;
TRACE_BPOS_assign(old_pos, path->pos);
TRACE_BPOS_assign(new_pos, *new_pos);
__entry->nodes_locked = path->nodes_locked;
struct btree *b = path->l[0].b;
if (IS_ERR(b))
strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
b = path->l[1].b;
if (IS_ERR(b))
strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
b = path->l[2].b;
if (IS_ERR(b))
strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
b = path->l[3].b;
if (IS_ERR(b))
strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
else
scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
),
TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
"locks %u %u %u %u node %s %s %s %s",
__entry->idx,
__entry->ref,
__entry->preserve,
bch2_btree_id_str(__entry->btree_id),
__entry->old_pos_inode,
__entry->old_pos_offset,
__entry->old_pos_snapshot,
__entry->new_pos_inode,
__entry->new_pos_offset,
__entry->new_pos_snapshot,
(__entry->nodes_locked >> 6) & 3,
(__entry->nodes_locked >> 4) & 3,
(__entry->nodes_locked >> 2) & 3,
(__entry->nodes_locked >> 0) & 3,
__entry->node3,
__entry->node2,
__entry->node1,
__entry->node0)
); );
TRACE_EVENT(btree_path_free, TRACE_EVENT(btree_path_free,
@ -1783,9 +1707,11 @@ static inline void trace_btree_path_clone(struct btree_trans *trans, struct btre
static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {} static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {} static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {} static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {} static inline void trace_btree_path_set_pos(struct btree_trans *trans, unsigned long ip, const char *str) {}
static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {} static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
static inline bool trace_btree_path_set_pos_enabled(void) { return false; }
#endif #endif
#endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */ #endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */

View File

@ -1999,7 +1999,10 @@ static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio)
c->opts.fsck = true; c->opts.fsck = true;
set_bit(BCH_FS_in_fsck, &c->flags); set_bit(BCH_FS_in_fsck, &c->flags);
int ret = bch2_run_online_recovery_passes(c, ~0ULL); int ret = bch2_run_recovery_passes(c,
bch2_recovery_passes_match(PASS_FSCK) &
bch2_recovery_passes_match(PASS_ONLINE),
true);
clear_bit(BCH_FS_in_fsck, &c->flags); clear_bit(BCH_FS_in_fsck, &c->flags);
bch_err_fn(c, ret); bch_err_fn(c, ret);

View File

@ -911,7 +911,7 @@ bch2_inode_alloc_cursor_get(struct btree_trans *trans, u64 cpu, u64 *min, u64 *m
CLASS(btree_iter, iter)(trans, BTREE_ID_logged_ops, CLASS(btree_iter, iter)(trans, BTREE_ID_logged_ops,
POS(LOGGED_OPS_INUM_inode_cursors, cursor_idx), POS(LOGGED_OPS_INUM_inode_cursors, cursor_idx),
BTREE_ITER_cached); BTREE_ITER_intent|BTREE_ITER_cached);
struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
int ret = bkey_err(k); int ret = bkey_err(k);
if (ret) if (ret)

View File

@ -266,7 +266,7 @@ static struct recovery_pass_fn recovery_pass_fns[] = {
#undef x #undef x
}; };
static u64 bch2_recovery_passes_match(unsigned flags) u64 bch2_recovery_passes_match(unsigned flags)
{ {
u64 ret = 0; u64 ret = 0;
@ -515,17 +515,13 @@ static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
return 0; return 0;
} }
static int __bch2_run_recovery_passes(struct bch_fs *c, u64 orig_passes_to_run, int bch2_run_recovery_passes(struct bch_fs *c, u64 orig_passes_to_run, bool failfast)
bool online)
{ {
struct bch_fs_recovery *r = &c->recovery; struct bch_fs_recovery *r = &c->recovery;
int ret = 0; int ret = 0;
spin_lock_irq(&r->lock); spin_lock_irq(&r->lock);
if (online)
orig_passes_to_run &= bch2_recovery_passes_match(PASS_ONLINE);
if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)) if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info))
orig_passes_to_run &= ~bch2_recovery_passes_match(PASS_ALLOC); orig_passes_to_run &= ~bch2_recovery_passes_match(PASS_ALLOC);
@ -565,7 +561,7 @@ static int __bch2_run_recovery_passes(struct bch_fs *c, u64 orig_passes_to_run,
ret = ret2; ret = ret2;
} }
if (ret && !online) if (ret && failfast)
break; break;
if (prev_done <= BCH_RECOVERY_PASS_check_snapshots && if (prev_done <= BCH_RECOVERY_PASS_check_snapshots &&
@ -586,20 +582,17 @@ static void bch2_async_recovery_passes_work(struct work_struct *work)
struct bch_fs *c = container_of(work, struct bch_fs, recovery.work); struct bch_fs *c = container_of(work, struct bch_fs, recovery.work);
struct bch_fs_recovery *r = &c->recovery; struct bch_fs_recovery *r = &c->recovery;
__bch2_run_recovery_passes(c, bch2_run_recovery_passes(c,
c->sb.recovery_passes_required & ~r->passes_ratelimiting, c->sb.recovery_passes_required &
true); ~r->passes_ratelimiting &
bch2_recovery_passes_match(PASS_ONLINE),
false);
up(&r->run_lock); up(&r->run_lock);
enumerated_ref_put(&c->writes, BCH_WRITE_REF_async_recovery_passes); enumerated_ref_put(&c->writes, BCH_WRITE_REF_async_recovery_passes);
} }
int bch2_run_online_recovery_passes(struct bch_fs *c, u64 passes) int bch2_run_recovery_passes_startup(struct bch_fs *c, enum bch_recovery_pass from)
{
return __bch2_run_recovery_passes(c, c->sb.recovery_passes_required|passes, true);
}
int bch2_run_recovery_passes(struct bch_fs *c, enum bch_recovery_pass from)
{ {
u64 passes = u64 passes =
bch2_recovery_passes_match(PASS_ALWAYS) | bch2_recovery_passes_match(PASS_ALWAYS) |
@ -621,7 +614,7 @@ int bch2_run_recovery_passes(struct bch_fs *c, enum bch_recovery_pass from)
passes &= ~(BIT_ULL(from) - 1); passes &= ~(BIT_ULL(from) - 1);
down(&c->recovery.run_lock); down(&c->recovery.run_lock);
int ret = __bch2_run_recovery_passes(c, passes, false); int ret = bch2_run_recovery_passes(c, passes, true);
up(&c->recovery.run_lock); up(&c->recovery.run_lock);
return ret; return ret;

View File

@ -59,8 +59,9 @@ int bch2_run_explicit_recovery_pass(struct bch_fs *, struct printbuf *,
int bch2_require_recovery_pass(struct bch_fs *, struct printbuf *, int bch2_require_recovery_pass(struct bch_fs *, struct printbuf *,
enum bch_recovery_pass); enum bch_recovery_pass);
int bch2_run_online_recovery_passes(struct bch_fs *, u64); u64 bch2_recovery_passes_match(unsigned);
int bch2_run_recovery_passes(struct bch_fs *, enum bch_recovery_pass); int bch2_run_recovery_passes(struct bch_fs *, u64, bool);
int bch2_run_recovery_passes_startup(struct bch_fs *, enum bch_recovery_pass);
void bch2_recovery_pass_status_to_text(struct printbuf *, struct bch_fs *); void bch2_recovery_pass_status_to_text(struct printbuf *, struct bch_fs *);

View File

@ -35,6 +35,7 @@
x(fs_journal_alloc, 7, PASS_ALWAYS|PASS_SILENT|PASS_ALLOC) \ x(fs_journal_alloc, 7, PASS_ALWAYS|PASS_SILENT|PASS_ALLOC) \
x(set_may_go_rw, 8, PASS_ALWAYS|PASS_SILENT) \ x(set_may_go_rw, 8, PASS_ALWAYS|PASS_SILENT) \
x(journal_replay, 9, PASS_ALWAYS) \ x(journal_replay, 9, PASS_ALWAYS) \
x(merge_btree_nodes, 45, PASS_ONLINE) \
x(check_alloc_info, 10, PASS_ONLINE|PASS_FSCK_ALLOC) \ x(check_alloc_info, 10, PASS_ONLINE|PASS_FSCK_ALLOC) \
x(check_lrus, 11, PASS_ONLINE|PASS_FSCK_ALLOC) \ x(check_lrus, 11, PASS_ONLINE|PASS_FSCK_ALLOC) \
x(check_btree_backpointers, 12, PASS_ONLINE|PASS_FSCK_ALLOC) \ x(check_btree_backpointers, 12, PASS_ONLINE|PASS_FSCK_ALLOC) \

View File

@ -769,7 +769,7 @@ use_clean:
try(bch2_sb_set_upgrade_extra(c)); try(bch2_sb_set_upgrade_extra(c));
try(bch2_run_recovery_passes(c, 0)); try(bch2_run_recovery_passes_startup(c, 0));
/* /*
* Normally set by the appropriate recovery pass: when cleared, this * Normally set by the appropriate recovery pass: when cleared, this
@ -806,7 +806,7 @@ use_clean:
clear_bit(BCH_FS_errors_fixed, &c->flags); clear_bit(BCH_FS_errors_fixed, &c->flags);
clear_bit(BCH_FS_errors_fixed_silent, &c->flags); clear_bit(BCH_FS_errors_fixed_silent, &c->flags);
try(bch2_run_recovery_passes(c, BCH_RECOVERY_PASS_check_alloc_info)); try(bch2_run_recovery_passes_startup(c, BCH_RECOVERY_PASS_check_alloc_info));
if (errors_fixed || if (errors_fixed ||
test_bit(BCH_FS_errors_not_fixed, &c->flags)) { test_bit(BCH_FS_errors_not_fixed, &c->flags)) {

View File

@ -89,7 +89,7 @@ enum counters_flags {
x(trans_restart_relock_after_fill, 58, TYPE_COUNTER) \ x(trans_restart_relock_after_fill, 58, TYPE_COUNTER) \
x(trans_restart_relock_key_cache_fill, 59, TYPE_COUNTER) \ x(trans_restart_relock_key_cache_fill, 59, TYPE_COUNTER) \
x(trans_restart_relock_next_node, 60, TYPE_COUNTER) \ x(trans_restart_relock_next_node, 60, TYPE_COUNTER) \
x(trans_restart_relock_parent_for_fill, 61, TYPE_COUNTER) \ x(trans_restart_relock_parent_for_fill_obsolete,61, TYPE_COUNTER) \
x(trans_restart_relock_path, 62, TYPE_COUNTER) \ x(trans_restart_relock_path, 62, TYPE_COUNTER) \
x(trans_restart_relock_path_intent, 63, TYPE_COUNTER) \ x(trans_restart_relock_path_intent, 63, TYPE_COUNTER) \
x(trans_restart_too_many_iters, 64, TYPE_COUNTER) \ x(trans_restart_too_many_iters, 64, TYPE_COUNTER) \