Update bcachefs sources to 9ab6c94002b9 bcachefs: bch2_journal_meta() takes ref on c->writes

This commit is contained in:
Kent Overstreet 2024-10-11 22:54:16 -04:00
parent 1dba682527
commit 4e25d26a81
23 changed files with 177 additions and 116 deletions

View File

@ -1 +1 @@
8aa83b2beeb30185242600116e24d2e6c0c2fce5
9ab6c94002b9def79b0d79d0efd4e5255100feb4

View File

@ -76,12 +76,11 @@ fsck_err:
void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer *bp)
{
prt_printf(out, "btree=%s l=%u offset=%llu:%u len=%u pos=",
bch2_btree_id_str(bp->btree_id),
bp->level,
(u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
(u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
bp->bucket_len);
bch2_btree_id_level_to_text(out, bp->btree_id, bp->level);
prt_printf(out, " offset=%llu:%u len=%u pos=",
(u64) (bp->bucket_offset >> MAX_EXTENT_COMPRESS_RATIO_SHIFT),
(u32) bp->bucket_offset & ~(~0U << MAX_EXTENT_COMPRESS_RATIO_SHIFT),
bp->bucket_len);
bch2_bpos_to_text(out, bp->pos);
}
@ -496,9 +495,13 @@ found:
goto err;
prt_str(&buf, "extents pointing to same space, but first extent checksum bad:");
prt_printf(&buf, "\n %s ", bch2_btree_id_str(btree));
prt_printf(&buf, "\n ");
bch2_btree_id_to_text(&buf, btree);
prt_str(&buf, " ");
bch2_bkey_val_to_text(&buf, c, extent);
prt_printf(&buf, "\n %s ", bch2_btree_id_str(o_btree));
prt_printf(&buf, "\n ");
bch2_btree_id_to_text(&buf, o_btree);
prt_str(&buf, " ");
bch2_bkey_val_to_text(&buf, c, extent2);
struct nonce nonce = extent_nonce(extent.k->bversion, p.crc);
@ -633,8 +636,9 @@ check_existing_bp:
goto err;
missing:
printbuf_reset(&buf);
prt_printf(&buf, "missing backpointer for btree=%s l=%u ",
bch2_btree_id_str(bp.btree_id), bp.level);
prt_str(&buf, "missing backpointer for btree=");
bch2_btree_id_to_text(&buf, bp.btree_id);
prt_printf(&buf, " l=%u ", bp.level);
bch2_bkey_val_to_text(&buf, c, orig_k);
prt_printf(&buf, "\n got: ");
bch2_bkey_val_to_text(&buf, c, bp_k);

View File

@ -29,7 +29,7 @@ static inline struct bbpos bbpos_successor(struct bbpos pos)
static inline void bch2_bbpos_to_text(struct printbuf *out, struct bbpos pos)
{
prt_str(out, bch2_btree_id_str(pos.btree));
bch2_btree_id_to_text(out, pos.btree);
prt_char(out, ':');
bch2_bpos_to_text(out, pos.pos);
}

View File

@ -687,6 +687,7 @@ struct btree_trans_buf {
((subvol_inum) { BCACHEFS_ROOT_SUBVOL, BCACHEFS_ROOT_INO })
#define BCH_WRITE_REFS() \
x(journal) \
x(trans) \
x(write) \
x(promote) \

View File

@ -982,16 +982,14 @@ static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
return;
prt_printf(&buf,
"btree node header doesn't match ptr\n"
"btree %s level %u\n"
"ptr: ",
bch2_btree_id_str(b->c.btree_id), b->c.level);
"btree node header doesn't match ptr: ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_str(&buf, "\nptr: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
prt_printf(&buf, "\nheader: btree %s level %llu\n"
"min ",
bch2_btree_id_str(BTREE_NODE_ID(b->data)),
BTREE_NODE_LEVEL(b->data));
prt_str(&buf, "\nheader: ");
bch2_btree_id_level_to_text(&buf, BTREE_NODE_ID(b->data), BTREE_NODE_LEVEL(b->data));
prt_str(&buf, "\nmin ");
bch2_bpos_to_text(&buf, b->data->min_key);
prt_printf(&buf, "\nmax ");
@ -1373,12 +1371,19 @@ void bch2_btree_id_to_text(struct printbuf *out, enum btree_id btree)
prt_printf(out, "(unknown btree %u)", btree);
}
void bch2_btree_id_level_to_text(struct printbuf *out, enum btree_id btree, unsigned level)
{
prt_str(out, "btree=");
bch2_btree_id_to_text(out, btree);
prt_printf(out, " level=%u", level);
}
void bch2_btree_pos_to_text(struct printbuf *out, struct bch_fs *c, const struct btree *b)
{
prt_printf(out, "%s level %u/%u\n ",
bch2_btree_id_str(b->c.btree_id),
b->c.level,
bch2_btree_id_root(c, b->c.btree_id)->level);
bch2_btree_id_to_text(out, b->c.btree_id);
prt_printf(out, " level %u/%u\n ",
b->c.level,
bch2_btree_id_root(c, b->c.btree_id)->level);
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&b->key));
}
@ -1453,8 +1458,12 @@ void bch2_btree_cache_to_text(struct printbuf *out, const struct btree_cache *bc
prt_printf(out, "cannibalize lock:\t%p\n", bc->alloc_lock);
prt_newline(out);
for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
prt_btree_cache_line(out, c, bch2_btree_id_str(i), bc->nr_by_btree[i]);
for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++) {
bch2_btree_id_to_text(out, i);
prt_printf(out, "\t");
prt_human_readable_u64(out, bc->nr_by_btree[i] * c->opts.btree_node_size);
prt_printf(out, " (%zu)\n", bc->nr_by_btree[i]);
}
prt_newline(out);
prt_printf(out, "freed:\t%zu\n", bc->nr_freed);

View File

@ -136,8 +136,9 @@ static inline struct btree *btree_node_root(struct bch_fs *c, struct btree *b)
return bch2_btree_id_root(c, b->c.btree_id)->b;
}
const char *bch2_btree_id_str(enum btree_id);
const char *bch2_btree_id_str(enum btree_id); /* avoid */
void bch2_btree_id_to_text(struct printbuf *, enum btree_id);
void bch2_btree_id_level_to_text(struct printbuf *, enum btree_id, unsigned);
void bch2_btree_pos_to_text(struct printbuf *, struct bch_fs *, const struct btree *);
void bch2_btree_node_to_text(struct printbuf *, struct bch_fs *, const struct btree *);

View File

@ -56,8 +56,8 @@ void bch2_gc_pos_to_text(struct printbuf *out, struct gc_pos *p)
{
prt_str(out, bch2_gc_phase_strs[p->phase]);
prt_char(out, ' ');
bch2_btree_id_to_text(out, p->btree);
prt_printf(out, " l=%u ", p->level);
bch2_btree_id_level_to_text(out, p->btree, p->level);
prt_char(out, ' ');
bch2_bpos_to_text(out, p->pos);
}
@ -209,8 +209,9 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *
if (bpos_eq(expected_start, cur->data->min_key))
return 0;
prt_printf(&buf, " at btree %s level %u:\n parent: ",
bch2_btree_id_str(b->c.btree_id), b->c.level);
prt_printf(&buf, " at ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_printf(&buf, ":\n parent: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
if (prev) {
@ -277,8 +278,9 @@ static int btree_repair_node_end(struct btree_trans *trans, struct btree *b,
if (bpos_eq(child->key.k.p, b->key.k.p))
return 0;
prt_printf(&buf, "at btree %s level %u:\n parent: ",
bch2_btree_id_str(b->c.btree_id), b->c.level);
prt_printf(&buf, " at ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_printf(&buf, ":\n parent: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
prt_str(&buf, "\n child: ");
@ -341,14 +343,14 @@ again:
ret = PTR_ERR_OR_ZERO(cur);
printbuf_reset(&buf);
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level - 1);
prt_char(&buf, ' ');
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
trans, btree_node_unreadable,
"Topology repair: unreadable btree node at btree %s level %u:\n"
"Topology repair: unreadable btree node at\n"
" %s",
bch2_btree_id_str(b->c.btree_id),
b->c.level - 1,
buf.buf)) {
bch2_btree_node_evict(trans, cur_k.k);
cur = NULL;
@ -370,7 +372,7 @@ again:
break;
if (bch2_btree_node_is_stale(c, cur)) {
bch_info(c, "btree node %s older than nodes found by scanning", buf.buf);
bch_info(c, "btree node older than nodes found by scanning\n %s", buf.buf);
six_unlock_read(&cur->c.lock);
bch2_btree_node_evict(trans, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
@ -478,14 +480,13 @@ again:
}
printbuf_reset(&buf);
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
if (mustfix_fsck_err_on(!have_child,
trans, btree_node_topology_interior_node_empty,
"empty interior btree node at btree %s level %u\n"
" %s",
bch2_btree_id_str(b->c.btree_id),
b->c.level, buf.buf))
"empty interior btree node at %s", buf.buf))
ret = DROP_THIS_NODE;
err:
fsck_err:
@ -511,6 +512,7 @@ int bch2_check_topology(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
struct bpos pulled_from_scan = POS_MIN;
struct printbuf buf = PRINTBUF;
int ret = 0;
bch2_trans_srcu_unlock(trans);
@ -519,19 +521,21 @@ int bch2_check_topology(struct bch_fs *c)
struct btree_root *r = bch2_btree_id_root(c, i);
bool reconstructed_root = false;
bch2_btree_id_to_text(&buf, i);
if (r->error) {
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes);
if (ret)
break;
reconstruct_root:
bch_info(c, "btree root %s unreadable, must recover from scan", bch2_btree_id_str(i));
bch_info(c, "btree root %s unreadable, must recover from scan", buf.buf);
r->alive = false;
r->error = 0;
if (!bch2_btree_has_scanned_nodes(c, i)) {
mustfix_fsck_err(trans, btree_root_unreadable_and_scan_found_nothing,
"no nodes found for btree %s, continue?", bch2_btree_id_str(i));
"no nodes found for btree %s, continue?", buf.buf);
bch2_btree_root_alloc_fake_trans(trans, i, 0);
} else {
bch2_btree_root_alloc_fake_trans(trans, i, 1);
@ -560,13 +564,14 @@ reconstruct_root:
if (!reconstructed_root)
goto reconstruct_root;
bch_err(c, "empty btree root %s", bch2_btree_id_str(i));
bch_err(c, "empty btree root %s", buf.buf);
bch2_btree_root_alloc_fake_trans(trans, i, 0);
r->alive = false;
ret = 0;
}
}
fsck_err:
printbuf_exit(&buf);
bch2_trans_put(trans);
return ret;
}
@ -713,6 +718,7 @@ static int bch2_gc_btrees(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
enum btree_id ids[BTREE_ID_NR];
struct printbuf buf = PRINTBUF;
unsigned i;
int ret = 0;
@ -731,10 +737,13 @@ static int bch2_gc_btrees(struct bch_fs *c)
if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO),
trans, btree_node_read_error,
"btree node read error for %s",
bch2_btree_id_str(btree)))
(printbuf_reset(&buf),
bch2_btree_id_to_text(&buf, btree),
buf.buf)))
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
}
fsck_err:
printbuf_exit(&buf);
bch2_trans_put(trans);
bch_err_fn(c, ret);
return ret;

View File

@ -25,9 +25,8 @@
static void bch2_btree_node_header_to_text(struct printbuf *out, struct btree_node *bn)
{
prt_printf(out, "btree=%s l=%u seq %llux\n",
bch2_btree_id_str(BTREE_NODE_ID(bn)),
(unsigned) BTREE_NODE_LEVEL(bn), bn->keys.seq);
bch2_btree_id_level_to_text(out, BTREE_NODE_ID(bn), BTREE_NODE_LEVEL(bn));
prt_printf(out, " seq %llux\n", bn->keys.seq);
prt_str(out, "min: ");
bch2_bpos_to_text(out, bn->min_key);
prt_newline(out);
@ -1347,9 +1346,11 @@ start:
!btree_node_read_error(b) &&
c->curr_recovery_pass != BCH_RECOVERY_PASS_scan_for_btree_nodes) {
printbuf_reset(&buf);
bch2_bpos_to_text(&buf, b->key.k.p);
bch_err_ratelimited(c, "%s: rewriting btree node at btree=%s level=%u %s due to error",
__func__, bch2_btree_id_str(b->c.btree_id), b->c.level, buf.buf);
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_str(&buf, " ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
bch_err_ratelimited(c, "%s: rewriting btree node at due to error\n %s",
__func__, buf.buf);
bch2_btree_node_rewrite_async(c, b);
}

View File

@ -1435,10 +1435,11 @@ void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
trans_for_each_update(trans, i) {
struct bkey_s_c old = { &i->old_k, i->old_v };
prt_printf(buf, "update: btree=%s cached=%u %pS\n",
bch2_btree_id_str(i->btree_id),
i->cached,
(void *) i->ip_allocated);
prt_str(buf, "update: btree=");
bch2_btree_id_to_text(buf, i->btree_id);
prt_printf(buf, " cached=%u %pS\n",
i->cached,
(void *) i->ip_allocated);
prt_printf(buf, " old ");
bch2_bkey_val_to_text(buf, trans->c, old);
@ -1471,13 +1472,13 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra
{
struct btree_path *path = trans->paths + path_idx;
prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
prt_printf(out, "path: idx %3u ref %u:%u %c %c %c ",
path_idx, path->ref, path->intent_ref,
path->preserve ? 'P' : ' ',
path->should_be_locked ? 'S' : ' ',
path->cached ? 'C' : 'B',
bch2_btree_id_str(path->btree_id),
path->level);
path->cached ? 'C' : 'B');
bch2_btree_id_level_to_text(out, path->btree_id, path->level);
prt_str(out, " pos ");
bch2_bpos_to_text(out, path->pos);
if (!path->cached && btree_node_locked(path, path->level)) {
@ -3354,8 +3355,9 @@ bch2_btree_bkey_cached_common_to_text(struct printbuf *out,
pid = owner ? owner->pid : 0;
rcu_read_unlock();
prt_printf(out, "\t%px %c l=%u %s:", b, b->cached ? 'c' : 'b',
b->level, bch2_btree_id_str(b->btree_id));
prt_printf(out, "\t%px %c ", b, b->cached ? 'c' : 'b');
bch2_btree_id_to_text(out, b->btree_id);
prt_printf(out, " l=%u:", b->level);
bch2_bpos_to_text(out, btree_node_pos(b));
prt_printf(out, "\t locks %u:%u:%u held by pid %u",
@ -3394,11 +3396,11 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
if (!path->nodes_locked)
continue;
prt_printf(out, " path %u %c l=%u %s:",
idx,
path->cached ? 'c' : 'b',
path->level,
bch2_btree_id_str(path->btree_id));
prt_printf(out, " path %u %c ",
idx,
path->cached ? 'c' : 'b');
bch2_btree_id_to_text(out, path->btree_id);
prt_printf(out, " l=%u:", path->level);
bch2_bpos_to_text(out, path->pos);
prt_newline(out);

View File

@ -362,7 +362,7 @@ static int btree_trans_restart(struct btree_trans *trans, int err)
static inline int trans_maybe_inject_restart(struct btree_trans *trans, unsigned long ip)
{
#ifdef CONFIG_BCACHEFS_INJECT_TRANSACTION_RESTARTS
if (!(ktime_get_ns() & ~(~0UL << min(63, (10 + trans->restart_count_this_trans))))) {
if (!(ktime_get_ns() & ~(~0ULL << min(63, (10 + trans->restart_count_this_trans))))) {
trace_and_count(trans->c, trans_restart_injected, trans, ip);
return btree_trans_restart_ip(trans,
BCH_ERR_transaction_restart_fault_inject, ip);

View File

@ -628,8 +628,11 @@ void bch2_journal_keys_dump(struct bch_fs *c)
darray_for_each(*keys, i) {
printbuf_reset(&buf);
prt_printf(&buf, "btree=");
bch2_btree_id_to_text(&buf, i->btree_id);
prt_printf(&buf, " l=%u ", i->level);
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(i->k));
pr_err("%s l=%u %s", bch2_btree_id_str(i->btree_id), i->level, buf.buf);
pr_err("%s", buf.buf);
}
printbuf_exit(&buf);
}

View File

@ -22,9 +22,9 @@ struct find_btree_nodes_worker {
static void found_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct found_btree_node *n)
{
prt_printf(out, "%s l=%u seq=%u journal_seq=%llu cookie=%llx ",
bch2_btree_id_str(n->btree_id), n->level, n->seq,
n->journal_seq, n->cookie);
bch2_btree_id_level_to_text(out, n->btree_id, n->level);
prt_printf(out, " seq=%u journal_seq=%llu cookie=%llx ",
n->seq, n->journal_seq, n->cookie);
bch2_bpos_to_text(out, n->min_key);
prt_str(out, "-");
bch2_bpos_to_text(out, n->max_key);
@ -499,7 +499,9 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
if (c->opts.verbose) {
struct printbuf buf = PRINTBUF;
prt_printf(&buf, "recovering %s l=%u ", bch2_btree_id_str(btree), level);
prt_str(&buf, "recovery ");
bch2_btree_id_level_to_text(&buf, btree, level);
prt_str(&buf, " ");
bch2_bpos_to_text(&buf, node_min);
prt_str(&buf, " - ");
bch2_bpos_to_text(&buf, node_max);

View File

@ -97,9 +97,9 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
bch2_topology_error(c);
printbuf_reset(&buf);
prt_str(&buf, "end of prev node doesn't match start of next node\n"),
prt_printf(&buf, " in btree %s level %u node ",
bch2_btree_id_str(b->c.btree_id), b->c.level);
prt_str(&buf, "end of prev node doesn't match start of next node\n in ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_str(&buf, " node ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
prt_str(&buf, "\n prev ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
@ -118,9 +118,9 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
bch2_topology_error(c);
printbuf_reset(&buf);
prt_str(&buf, "empty interior node\n");
prt_printf(&buf, " in btree %s level %u node ",
bch2_btree_id_str(b->c.btree_id), b->c.level);
prt_str(&buf, "empty interior node\n in ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_str(&buf, " node ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
need_fsck_err(trans, btree_node_topology_empty_interior_node, "%s", buf.buf);
@ -129,9 +129,9 @@ int bch2_btree_node_check_topology(struct btree_trans *trans, struct btree *b)
bch2_topology_error(c);
printbuf_reset(&buf);
prt_str(&buf, "last child node doesn't end at end of parent node\n");
prt_printf(&buf, " in btree %s level %u node ",
bch2_btree_id_str(b->c.btree_id), b->c.level);
prt_str(&buf, "last child node doesn't end at end of parent node\n in ");
bch2_btree_id_level_to_text(&buf, b->c.btree_id, b->c.level);
prt_str(&buf, " node ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
prt_str(&buf, "\n last key ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(prev.k));
@ -2570,8 +2570,9 @@ static void bch2_btree_update_to_text(struct printbuf *out, struct btree_update
prt_printf(out, "%ps: ", (void *) as->ip_started);
bch2_trans_commit_flags_to_text(out, as->flags);
prt_printf(out, " btree=%s l=%u-%u mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
bch2_btree_id_str(as->btree_id),
prt_str(out, " ");
bch2_btree_id_to_text(out, as->btree_id);
prt_printf(out, " l=%u-%u mode=%s nodes_written=%u cl.remaining=%u journal_seq=%llu\n",
as->update_level_start,
as->update_level_end,
bch2_btree_update_modes[as->mode],

View File

@ -472,7 +472,9 @@ static void bch2_cached_btree_node_to_text(struct printbuf *out, struct bch_fs *
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 32);
prt_printf(out, "%px btree=%s l=%u\n", b, bch2_btree_id_str(b->c.btree_id), b->c.level);
prt_printf(out, "%px ", b);
bch2_btree_id_level_to_text(out, b->c.btree_id, b->c.level);
prt_printf(out, "\n");
printbuf_indent_add(out, 2);

View File

@ -217,7 +217,8 @@ void bch2_accounting_key_to_text(struct printbuf *out, struct disk_accounting_po
prt_printf(out, "id=%u", k->snapshot.id);
break;
case BCH_DISK_ACCOUNTING_btree:
prt_printf(out, "btree=%s", bch2_btree_id_str(k->btree.id));
prt_str(out, "btree=");
bch2_btree_id_to_text(out, k->btree.id);
break;
}
}

View File

@ -406,7 +406,7 @@ static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
sync_inodes_sb(c->vfs_sb);
up_read(&c->vfs_sb->s_umount);
}
retry:
if (arg.src_ptr) {
error = user_path_at(arg.dirfd,
(const char __user *)(unsigned long)arg.src_ptr,
@ -486,11 +486,6 @@ err3:
err2:
if (arg.src_ptr)
path_put(&src_path);
if (retry_estale(error, lookup_flags)) {
lookup_flags |= LOOKUP_REVAL;
goto retry;
}
err1:
return error;
}

View File

@ -829,19 +829,14 @@ out:
return ret;
}
int bch2_journal_meta(struct journal *j)
static int __bch2_journal_meta(struct journal *j)
{
struct journal_buf *buf;
struct journal_res res;
int ret;
memset(&res, 0, sizeof(res));
ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
struct journal_res res = {};
int ret = bch2_journal_res_get(j, &res, jset_u64s(0), 0);
if (ret)
return ret;
buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
struct journal_buf *buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
buf->must_flush = true;
if (!buf->flush_time) {
@ -854,6 +849,18 @@ int bch2_journal_meta(struct journal *j)
return bch2_journal_flush_seq(j, res.seq);
}
int bch2_journal_meta(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_journal))
return -EROFS;
int ret = __bch2_journal_meta(j);
bch2_write_ref_put(c, BCH_WRITE_REF_journal);
return ret;
}
/* block/unlock the journal: */
void bch2_journal_unblock(struct journal *j)
@ -1191,7 +1198,7 @@ void bch2_fs_journal_stop(struct journal *j)
* Always write a new journal entry, to make sure the clock hands are up
* to date (and match the superblock)
*/
bch2_journal_meta(j);
__bch2_journal_meta(j);
journal_quiesce(j);
cancel_delayed_work_sync(&j->write_work);

View File

@ -421,7 +421,8 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
bch2_prt_jset_entry_type(out, entry->type);
prt_str(out, ": ");
}
prt_printf(out, "btree=%s l=%u ", bch2_btree_id_str(entry->btree_id), entry->level);
bch2_btree_id_level_to_text(out, entry->btree_id, entry->level);
prt_char(out, ' ');
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));
first = false;
}

View File

@ -522,8 +522,9 @@ int bch2_parse_one_mount_opt(struct bch_fs *c, struct bch_opts *opts,
val = "0";
}
/* Unknown options are ignored: */
if (id < 0)
goto bad_opt;
return 0;
if (!(bch2_opt_table[id].flags & OPT_MOUNT))
goto bad_opt;
@ -595,6 +596,9 @@ int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
copied_opts_start = copied_opts;
while ((opt = strsep(&copied_opts, ",")) != NULL) {
if (!*opt)
continue;
name = strsep(&opt, "=");
val = opt;

View File

@ -42,7 +42,10 @@ int bch2_btree_lost_data(struct bch_fs *c, enum btree_id btree)
mutex_lock(&c->sb_lock);
if (!(c->sb.btrees_lost_data & b)) {
bch_err(c, "flagging btree %s lost data", bch2_btree_id_str(btree));
struct printbuf buf = PRINTBUF;
bch2_btree_id_to_text(&buf, btree);
bch_err(c, "flagging btree %s lost data", buf.buf);
printbuf_exit(&buf);
bch2_sb_field_get(c->disk_sb.sb, ext)->btrees_lost_data |= cpu_to_le64(b);
}
@ -385,10 +388,13 @@ int bch2_journal_replay(struct bch_fs *c)
? BCH_TRANS_COMMIT_no_journal_res|BCH_WATERMARK_reclaim
: 0),
bch2_journal_replay_key(trans, k));
bch_err_msg(c, ret, "while replaying key at btree %s level %u:",
bch2_btree_id_str(k->btree_id), k->level);
if (ret)
if (ret) {
struct printbuf buf = PRINTBUF;
bch2_btree_id_level_to_text(&buf, k->btree_id, k->level);
bch_err_msg(c, ret, "while replaying key at %s:", buf.buf);
printbuf_exit(&buf);
goto err;
}
BUG_ON(k->btree_id != BTREE_ID_accounting && !k->overwritten);
}
@ -536,6 +542,7 @@ static int journal_replay_early(struct bch_fs *c,
static int read_btree_roots(struct bch_fs *c)
{
struct printbuf buf = PRINTBUF;
int ret = 0;
for (unsigned i = 0; i < btree_id_nr_alive(c); i++) {
@ -547,14 +554,17 @@ static int read_btree_roots(struct bch_fs *c)
if (btree_id_is_alloc(i) && c->opts.reconstruct_alloc)
continue;
printbuf_reset(&buf);
bch2_btree_id_level_to_text(&buf, i, r->level);
if (mustfix_fsck_err_on((ret = r->error),
c, btree_root_bkey_invalid,
"invalid btree root %s",
bch2_btree_id_str(i)) ||
buf.buf) ||
mustfix_fsck_err_on((ret = r->error = bch2_btree_root_read(c, i, &r->key, r->level)),
c, btree_root_read_error,
"error reading btree root %s l=%u: %s",
bch2_btree_id_str(i), r->level, bch2_err_str(ret))) {
"error reading btree root %s: %s",
buf.buf, bch2_err_str(ret))) {
if (btree_id_is_alloc(i))
r->error = 0;
@ -572,6 +582,7 @@ static int read_btree_roots(struct bch_fs *c)
}
}
fsck_err:
printbuf_exit(&buf);
return ret;
}
@ -680,7 +691,7 @@ int bch2_fs_recovery(struct bch_fs *c)
}
if (c->opts.norecovery)
c->opts.recovery_pass_last = BCH_RECOVERY_PASS_journal_replay - 1;
c->opts.recovery_pass_last = BCH_RECOVERY_PASS_snapshots_read;
mutex_lock(&c->sb_lock);
struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
@ -887,11 +898,9 @@ use_clean:
set_bit(BCH_FS_accounting_replay_done, &c->flags);
/* fsync if we fixed errors */
if (test_bit(BCH_FS_errors_fixed, &c->flags) &&
bch2_write_ref_tryget(c, BCH_WRITE_REF_fsync)) {
if (test_bit(BCH_FS_errors_fixed, &c->flags)) {
bch2_journal_flush_all_pins(&c->journal);
bch2_journal_meta(&c->journal);
bch2_write_ref_put(c, BCH_WRITE_REF_fsync);
}
/* If we fixed errors, verify that fs is actually clean now: */

View File

@ -821,6 +821,11 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
rcu_read_lock();
for (unsigned i = 0; i < e->nr_devs; i++) {
if (e->devs[i] == BCH_SB_MEMBER_INVALID) {
nr_failed++;
continue;
}
nr_online += test_bit(e->devs[i], devs.d);
struct bch_dev *ca = bch2_dev_rcu_noerror(c, e->devs[i]);

View File

@ -247,7 +247,10 @@ static void member_to_text(struct printbuf *out,
prt_newline(out);
prt_printf(out, "Btree allocated bitmap blocksize:\t");
prt_units_u64(out, 1ULL << m.btree_bitmap_shift);
if (m.btree_bitmap_shift < 64)
prt_units_u64(out, 1ULL << m.btree_bitmap_shift);
else
prt_printf(out, "(invalid shift %u)", m.btree_bitmap_shift);
prt_newline(out);
prt_printf(out, "Btree allocated bitmap:\t");

View File

@ -302,7 +302,8 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
static void bch2_gc_gens_pos_to_text(struct printbuf *out, struct bch_fs *c)
{
prt_printf(out, "%s: ", bch2_btree_id_str(c->gc_gens_btree));
bch2_btree_id_to_text(out, c->gc_gens_btree);
prt_printf(out, ": ");
bch2_bpos_to_text(out, c->gc_gens_pos);
prt_printf(out, "\n");
}