Update bcachefs sources to 0eb07772a42c bcachefs: Only show options from mount in /proc/mounts

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2025-11-21 00:21:39 -05:00
parent e0ad02a550
commit fff87dcac0
21 changed files with 304 additions and 208 deletions

View File

@ -1 +1 @@
ba3f652e4cdc86313cb13380efd59f1e6e6f484f
0eb07772a42c4617bb788f9fccade67a69c3da81

View File

@ -821,6 +821,7 @@ struct bch_fs {
struct bch_opts opts;
atomic_t opt_change_cookie;
struct bch_opts_mask mount_opts;
unsigned loglevel;
unsigned prev_loglevel;

View File

@ -203,23 +203,19 @@ static __always_inline bool bversion_eq(struct bversion l, struct bversion r)
l.lo == r.lo;
}
static inline bool bkey_fields_eq(const struct bkey l, const struct bkey r)
{
return (l.u64s == r.u64s &&
l.type == r.type &&
bpos_eq(l.p, r.p) &&
bversion_eq(l.bversion, r.bversion) &&
l.size == r.size);
}
static inline bool bkey_and_val_eq(struct bkey_s_c l, struct bkey_s_c r)
{
if (!(l.k->u64s == r.k->u64s &&
l.k->type == r.k->type &&
bpos_eq(l.k->p, r.k->p) &&
bversion_eq(l.k->bversion, r.k->bversion) &&
l.k->size == r.k->size))
return false;
if (l.k->type != KEY_TYPE_btree_ptr_v2) {
return !memcmp(l.v, r.v, bkey_val_bytes(l.k));
} else {
/* don't compare bch_btree_ptr_v2.mem_ptr */
return !memcmp((void *) l.v + 8,
(void *) r.v + 8,
bkey_val_bytes(l.k) - 8);
}
return bkey_fields_eq(*l.k, *r.k) &&
!memcmp(l.v, r.v, bkey_val_bytes(l.k));
}
#define ZERO_VERSION ((struct bversion) { .hi = 0, .lo = 0 })

View File

@ -188,8 +188,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
}
static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *b,
struct btree *prev, struct btree *cur,
struct bpos *pulled_from_scan)
struct btree *prev, struct btree *cur)
{
struct bch_fs *c = trans->c;
struct bpos expected_start = !prev
@ -219,40 +218,45 @@ static int btree_check_node_boundaries(struct btree_trans *trans, struct btree *
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&cur->key));
if (bpos_lt(expected_start, cur->data->min_key)) { /* gap */
size_t nodes_found = 0;
if (b->c.level == 1 &&
bpos_lt(*pulled_from_scan, cur->data->min_key)) {
btree_id_recovers_from_scan(b->c.btree_id)) {
try(bch2_get_scanned_nodes(c, b->c.btree_id, 0,
expected_start,
bpos_predecessor(cur->data->min_key)));
bpos_predecessor(cur->data->min_key),
&buf, &nodes_found));
if (!nodes_found)
prt_printf(&buf, "btree node scan found no nodes this range\n");
}
*pulled_from_scan = cur->data->min_key;
ret = bch_err_throw(c, topology_repair_did_fill_from_scan);
} else {
if (mustfix_fsck_err(trans, btree_node_topology_gap_between_nodes,
"gap between btree nodes%s", buf.buf))
ret = set_node_min(c, cur, expected_start);
if (mustfix_fsck_err(trans, btree_node_topology_gap_between_nodes,
"gap between btree nodes%s", buf.buf)) {
if (nodes_found)
return bch_err_throw(c, topology_repair_did_fill_from_scan);
else
return set_node_min(c, cur, expected_start);
}
} else { /* overlap */
if (prev && BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) { /* cur overwrites prev */
if (bpos_ge(prev->data->min_key, cur->data->min_key)) { /* fully? */
if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_next_node,
"btree node overwritten by next node%s", buf.buf))
ret = bch_err_throw(c, topology_repair_drop_prev_node);
return bch_err_throw(c, topology_repair_drop_prev_node);
} else {
if (mustfix_fsck_err(trans, btree_node_topology_bad_max_key,
"btree node with incorrect max_key%s", buf.buf))
ret = set_node_max(c, prev,
bpos_predecessor(cur->data->min_key));
return set_node_max(c, prev, bpos_predecessor(cur->data->min_key));
}
} else {
if (bpos_ge(expected_start, cur->data->max_key)) { /* fully? */
if (mustfix_fsck_err(trans, btree_node_topology_overwritten_by_prev_node,
"btree node overwritten by prev node%s", buf.buf))
ret = bch_err_throw(c, topology_repair_drop_this_node);
return bch_err_throw(c, topology_repair_drop_this_node);
} else {
if (mustfix_fsck_err(trans, btree_node_topology_bad_min_key,
"btree node with incorrect min_key%s", buf.buf))
ret = set_node_min(c, cur, expected_start);
return set_node_min(c, cur, expected_start);
}
}
}
@ -286,8 +290,7 @@ fsck_err:
return ret;
}
static int btree_repair_node_end(struct btree_trans *trans, struct btree *b,
struct btree *child, struct bpos *pulled_from_scan)
static int btree_repair_node_end(struct btree_trans *trans, struct btree *b, struct btree *child)
{
struct bch_fs *c = trans->c;
int ret = 0;
@ -304,25 +307,25 @@ static int btree_repair_node_end(struct btree_trans *trans, struct btree *b,
prt_str(&buf, "\nchild: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&child->key));
size_t nodes_found = 0;
if (b->c.level == 1)
try(bch2_get_scanned_nodes(c, b->c.btree_id, 0,
bpos_successor(child->key.k.p), b->key.k.p,
&buf, &nodes_found));
if (mustfix_fsck_err(trans, btree_node_topology_bad_max_key,
"btree node with incorrect max_key%s", buf.buf)) {
if (b->c.level == 1 &&
bpos_lt(*pulled_from_scan, b->key.k.p)) {
try(bch2_get_scanned_nodes(c, b->c.btree_id, 0,
bpos_successor(child->key.k.p), b->key.k.p));
*pulled_from_scan = b->key.k.p;
if (nodes_found)
return bch_err_throw(c, topology_repair_did_fill_from_scan);
} else {
try(set_node_max(c, child, b->key.k.p));
}
else
return set_node_max(c, child, b->key.k.p);
}
fsck_err:
return ret;
}
static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b,
struct bpos *pulled_from_scan)
static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b)
{
struct bch_fs *c = trans->c;
struct btree_and_journal_iter iter;
@ -389,7 +392,7 @@ again:
}
ret = lockrestart_do(trans,
btree_check_node_boundaries(trans, b, prev, cur, pulled_from_scan));
btree_check_node_boundaries(trans, b, prev, cur));
if (ret && !bch2_err_matches(ret, BCH_ERR_topology_repair))
goto err;
@ -434,7 +437,7 @@ again:
if (!ret && !IS_ERR_OR_NULL(prev)) {
BUG_ON(cur);
ret = lockrestart_do(trans,
btree_repair_node_end(trans, b, prev, pulled_from_scan));
btree_repair_node_end(trans, b, prev));
if (bch2_err_matches(ret, BCH_ERR_topology_repair_did_fill_from_scan)) {
new_pass = true;
ret = 0;
@ -472,7 +475,7 @@ again:
if (ret)
goto err;
ret = bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan);
ret = bch2_btree_repair_topology_recurse(trans, cur);
six_unlock_read(&cur->c.lock);
cur = NULL;
@ -533,45 +536,56 @@ static int bch2_topology_check_root(struct btree_trans *trans, enum btree_id btr
return 0;
CLASS(printbuf, buf)();
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "btree root ");
bch2_btree_id_to_text(&buf, btree);
prt_printf(&buf, " unreadable: %s\n", bch2_err_str(r->error));
int ret = 0;
bool print = true;
if (!btree_id_recovers_from_scan(btree)) {
r->alive = false;
r->error = 0;
bch2_btree_root_alloc_fake_trans(trans, btree, 0);
*reconstructed_root = true;
ret = bch2_btree_lost_data(c, &buf, btree);
bch2_print_str(c, KERN_NOTICE, buf.buf);
goto out;
}
bch2_btree_id_to_text(&buf, btree);
bch_info(c, "btree root %s unreadable, must recover from scan", buf.buf);
ret = bch2_btree_has_scanned_nodes(c, btree);
if (ret < 0)
goto err;
if (!ret) {
__fsck_err(trans,
FSCK_CAN_FIX|(btree_id_can_reconstruct(btree) ? FSCK_AUTOFIX : 0),
btree_root_unreadable_and_scan_found_nothing,
"no nodes found for btree %s, continue?", buf.buf);
r->alive = false;
r->error = 0;
bch2_btree_root_alloc_fake_trans(trans, btree, 0);
} else {
r->alive = false;
r->error = 0;
bch2_btree_root_alloc_fake_trans(trans, btree, 1);
ret = bch2_btree_has_scanned_nodes(c, btree, &buf);
bch2_shoot_down_journal_keys(c, btree, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
try(bch2_get_scanned_nodes(c, btree, 0, POS_MIN, SPOS_MAX));
if (ret < 0) {
/*
* just log our message, we'll be rewinding to run
* btree node scan
*/
} else if (!ret) {
print = false;
__fsck_err(trans,
FSCK_CAN_FIX|(btree_id_can_reconstruct(btree) ? FSCK_AUTOFIX : 0),
btree_root_unreadable_and_scan_found_nothing,
"%sbtree node scan found no nodes, continue?", buf.buf);
r->alive = false;
r->error = 0;
bch2_btree_root_alloc_fake_trans(trans, btree, 0);
*reconstructed_root = true;
} else {
r->alive = false;
r->error = 0;
bch2_btree_root_alloc_fake_trans(trans, btree, 1);
*reconstructed_root = true;
bch2_shoot_down_journal_keys(c, btree, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
size_t nodes_found = 0;
try(bch2_get_scanned_nodes(c, btree, 0, POS_MIN, SPOS_MAX, &buf, &nodes_found));
}
}
out:
*reconstructed_root = true;
return 0;
err:
if (print)
bch2_print_str(c, KERN_NOTICE, buf.buf);
fsck_err:
bch_err_fn(c, ret);
return ret;
@ -580,7 +594,6 @@ fsck_err:
int bch2_check_topology(struct bch_fs *c)
{
CLASS(btree_trans, trans)(c);
struct bpos pulled_from_scan = POS_MIN;
int ret = 0;
bch2_trans_srcu_unlock(trans);
@ -597,7 +610,7 @@ recover:
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
ret = btree_check_root_boundaries(trans, b) ?:
bch2_btree_repair_topology_recurse(trans, b, &pulled_from_scan);
bch2_btree_repair_topology_recurse(trans, b);
six_unlock_read(&b->c.lock);
if (bch2_err_matches(ret, BCH_ERR_topology_repair_drop_this_node)) {

View File

@ -68,15 +68,17 @@ static void verify_update_old_key(struct btree_trans *trans, struct btree_insert
}
/* when updating btree ptrs, mem_ptr may change underneath us, unlocked */
struct bkey_s_c old = { &i->old_k, i->old_v };
if (unlikely(!bkey_and_val_eq(k, old))) {
if (!bkey_fields_eq(*k.k, i->old_k) || k.v != i->old_v) {
CLASS(printbuf, buf)();
prt_str(&buf, "updated cached old key doesn't match");
prt_str(&buf, "\ncached: ");
bch2_bkey_val_to_text(&buf, c, old);
prt_str(&buf, "\nreal: ");
bch2_bkey_val_to_text(&buf, c, k);
prt_str(&buf, "updated cached old key doesn't match\n");
prt_str(&buf, "cached: ");
bch2_bkey_to_text(&buf, &i->old_k);
prt_printf(&buf, " %px\n", i->old_v);
prt_str(&buf, "real: ");
bch2_bkey_to_text(&buf, k.k);
prt_printf(&buf, " %px\n", k.v);
panic("%s\n", buf.buf);
}
#endif

View File

@ -23,7 +23,7 @@ struct find_btree_nodes_worker {
struct bch_dev *ca;
};
static void found_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct found_btree_node *n)
void bch2_found_btree_node_to_text(struct printbuf *out, struct bch_fs *c, const struct found_btree_node *n)
{
bch2_btree_id_level_to_text(out, n->btree_id, n->level);
prt_printf(out, " seq=%u journal_seq=%llu cookie=%llx ",
@ -34,24 +34,24 @@ static void found_btree_node_to_text(struct printbuf *out, struct bch_fs *c, con
if (n->range_updated)
prt_str(out, " range updated");
prt_newline(out);
guard(printbuf_indent)(out);
guard(printbuf_atomic)(out);
guard(rcu)();
for (unsigned i = 0; i < n->nr_ptrs; i++) {
prt_newline(out);
bch2_extent_ptr_to_text(out, c, n->ptrs + i);
prt_newline(out);
}
}
static void found_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c, found_btree_nodes nodes)
static void found_btree_nodes_to_text(struct printbuf *out, struct bch_fs *c,
darray_found_btree_node nodes)
{
guard(printbuf_indent)(out);
darray_for_each(nodes, i) {
found_btree_node_to_text(out, c, i);
prt_newline(out);
}
darray_for_each(nodes, i)
bch2_found_btree_node_to_text(out, c, i);
}
static void found_btree_node_to_key(struct bkey_i *k, const struct found_btree_node *f)
@ -328,7 +328,7 @@ static bool nodes_overlap(const struct found_btree_node *l,
static int handle_overwrites(struct bch_fs *c,
struct found_btree_node *l,
found_btree_nodes *nodes_heap)
darray_found_btree_node *nodes_heap)
{
struct found_btree_node *r;
@ -373,21 +373,19 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
{
struct find_btree_nodes *f = &c->found_btree_nodes;
CLASS(printbuf, buf)();
found_btree_nodes nodes_heap = {};
CLASS(darray_found_btree_node, nodes_heap)();
size_t dst;
int ret = 0;
if (f->nodes.nr)
return 0;
mutex_init(&f->lock);
try(read_btree_nodes(f));
guard(mutex)(&f->lock);
if (!f->nodes.nr) {
bch_err(c, "%s: no btree nodes found", __func__);
ret = -EINVAL;
goto err;
return -EINVAL;
}
if (0 && c->opts.verbose) {
@ -407,8 +405,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
prev->cookie == i->cookie) {
if (prev->nr_ptrs == ARRAY_SIZE(prev->ptrs)) {
bch_err(c, "%s: found too many replicas for btree node", __func__);
ret = -EINVAL;
goto err;
return -EINVAL;
}
prev->ptrs[prev->nr_ptrs++] = i->ptrs[0];
} else {
@ -431,33 +428,27 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
{
/* darray must have same layout as a heap */
min_heap_char real_heap;
BUILD_BUG_ON(sizeof(nodes_heap.nr) != sizeof(real_heap.nr));
BUILD_BUG_ON(sizeof(nodes_heap.size) != sizeof(real_heap.size));
BUILD_BUG_ON(offsetof(found_btree_nodes, nr) != offsetof(min_heap_char, nr));
BUILD_BUG_ON(offsetof(found_btree_nodes, size) != offsetof(min_heap_char, size));
BUILD_BUG_ON(sizeof(nodes_heap.nr) != sizeof(real_heap.nr));
BUILD_BUG_ON(sizeof(nodes_heap.size) != sizeof(real_heap.size));
BUILD_BUG_ON(offsetof(darray_found_btree_node, nr) != offsetof(min_heap_char, nr));
BUILD_BUG_ON(offsetof(darray_found_btree_node, size) != offsetof(min_heap_char, size));
}
min_heapify_all(&nodes_heap, &found_btree_node_heap_cbs, NULL);
if (nodes_heap.nr) {
ret = darray_push(&f->nodes, *min_heap_peek(&nodes_heap));
if (ret)
goto err;
try(darray_push(&f->nodes, *min_heap_peek(&nodes_heap)));
min_heap_pop(&nodes_heap, &found_btree_node_heap_cbs, NULL);
}
while (true) {
ret = handle_overwrites(c, &darray_last(f->nodes), &nodes_heap);
if (ret)
goto err;
try(handle_overwrites(c, &darray_last(f->nodes), &nodes_heap));
if (!nodes_heap.nr)
break;
ret = darray_push(&f->nodes, *min_heap_peek(&nodes_heap));
if (ret)
goto err;
try(darray_push(&f->nodes, *min_heap_peek(&nodes_heap)));
min_heap_pop(&nodes_heap, &found_btree_node_heap_cbs, NULL);
}
@ -475,9 +466,7 @@ int bch2_scan_for_btree_nodes(struct bch_fs *c)
}
eytzinger0_sort(f->nodes.data, f->nodes.nr, sizeof(f->nodes.data[0]), found_btree_node_cmp_pos, NULL);
err:
darray_exit(&nodes_heap);
return ret;
return 0;
}
static int found_btree_node_range_start_cmp(const void *_l, const void *_r)
@ -517,9 +506,9 @@ bool bch2_btree_node_is_stale(struct bch_fs *c, struct btree *b)
return false;
}
int bch2_btree_has_scanned_nodes(struct bch_fs *c, enum btree_id btree)
int bch2_btree_has_scanned_nodes(struct bch_fs *c, enum btree_id btree, struct printbuf *out)
{
try(bch2_run_print_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes));
try(bch2_run_explicit_recovery_pass(c, out, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0));
struct found_btree_node search = {
.btree_id = btree,
@ -534,12 +523,13 @@ int bch2_btree_has_scanned_nodes(struct bch_fs *c, enum btree_id btree)
}
int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
unsigned level, struct bpos node_min, struct bpos node_max)
unsigned level, struct bpos node_min, struct bpos node_max,
struct printbuf *out, size_t *nodes_found)
{
if (!btree_id_recovers_from_scan(btree))
return 0;
try(bch2_run_print_explicit_recovery_pass(c, BCH_RECOVERY_PASS_scan_for_btree_nodes));
try(bch2_run_explicit_recovery_pass(c, out, BCH_RECOVERY_PASS_scan_for_btree_nodes, 0));
if (c->opts.verbose) {
CLASS(printbuf, buf)();
@ -575,12 +565,6 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
found_btree_node_to_key(&tmp.k, &n);
if (c->opts.verbose) {
CLASS(printbuf, buf)();
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&tmp.k));
bch_verbose(c, "%s(): recovering %s", __func__, buf.buf);
}
BUG_ON(bch2_bkey_validate(c, bkey_i_to_s_c(&tmp.k),
(struct bkey_validate_context) {
.from = BKEY_VALIDATE_btree_node,
@ -588,9 +572,27 @@ int bch2_get_scanned_nodes(struct bch_fs *c, enum btree_id btree,
.btree = btree,
}));
if (!*nodes_found) {
prt_printf(out, "recovering from btree node scan at ");
bch2_btree_id_level_to_text(out, btree, level);
prt_newline(out);
printbuf_indent_add(out, 2);
}
*nodes_found += 1;
if (*nodes_found < 10) {
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&tmp.k));
prt_newline(out);
} else if (*nodes_found == 10)
prt_printf(out, "<many>\n");
try(bch2_journal_key_insert(c, btree, level + 1, &tmp.k));
}
if (*nodes_found)
printbuf_indent_sub(out, 2);
return 0;
}
@ -598,3 +600,8 @@ void bch2_find_btree_nodes_exit(struct find_btree_nodes *f)
{
darray_exit(&f->nodes);
}
void bch2_find_btree_nodes_init(struct find_btree_nodes *f)
{
mutex_init(&f->lock);
}

View File

@ -2,10 +2,17 @@
#ifndef _BCACHEFS_BTREE_NODE_SCAN_H
#define _BCACHEFS_BTREE_NODE_SCAN_H
void bch2_found_btree_node_to_text(struct printbuf *, struct bch_fs *,
const struct found_btree_node *);
int bch2_scan_for_btree_nodes(struct bch_fs *);
bool bch2_btree_node_is_stale(struct bch_fs *, struct btree *);
int bch2_btree_has_scanned_nodes(struct bch_fs *, enum btree_id);
int bch2_get_scanned_nodes(struct bch_fs *, enum btree_id, unsigned, struct bpos, struct bpos);
int bch2_btree_has_scanned_nodes(struct bch_fs *, enum btree_id, struct printbuf *);
int bch2_get_scanned_nodes(struct bch_fs *, enum btree_id, unsigned,
struct bpos, struct bpos,
struct printbuf *, size_t *);
void bch2_find_btree_nodes_exit(struct find_btree_nodes *);
void bch2_find_btree_nodes_init(struct find_btree_nodes *);
#endif /* _BCACHEFS_BTREE_NODE_SCAN_H */

View File

@ -4,7 +4,7 @@
#include "util/darray.h"
struct found_btree_node {
typedef struct found_btree_node {
bool range_updated:1;
u8 btree_id;
u8 level;
@ -18,14 +18,14 @@ struct found_btree_node {
unsigned nr_ptrs;
struct bch_extent_ptr ptrs[BCH_REPLICAS_MAX];
};
} found_btree_node;
typedef DARRAY(struct found_btree_node) found_btree_nodes;
DEFINE_DARRAY(found_btree_node);
struct find_btree_nodes {
int ret;
struct mutex lock;
found_btree_nodes nodes;
darray_found_btree_node nodes;
};
#endif /* _BCACHEFS_BTREE_NODE_SCAN_TYPES_H */

View File

@ -730,6 +730,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_node_bad_seq,
"got wrong btree node: got\n%s",
(printbuf_reset(&buf),
printbuf_indent_add(&buf, 2),
bch2_btree_node_header_to_text(&buf, b->data),
buf.buf));
} else {

View File

@ -16,6 +16,7 @@
#include "btree/interior.h"
#include "btree/iter.h"
#include "btree/locking.h"
#include "btree/node_scan.h"
#include "btree/read.h"
#include "btree/update.h"
@ -892,6 +893,45 @@ static const struct file_operations write_points_ops = {
.read = bch2_write_points_read,
};
static bool print_next_node_scan_node(struct dump_iter *i)
{
struct find_btree_nodes *f = &i->c->found_btree_nodes;
guard(mutex)(&f->lock);
if (i->iter >= f->nodes.nr)
return false;
size_t idx = inorder_to_eytzinger0(i->iter, f->nodes.nr);
bch2_found_btree_node_to_text(&i->buf, i->c, &f->nodes.data[idx]);
i->iter++;
return true;
}
static ssize_t bch2_btree_node_scan_read(struct file *file, char __user *buf,
size_t size, loff_t *ppos)
{
struct dump_iter *i = file->private_data;
i->ubuf = buf;
i->size = size;
i->ret = 0;
try(bch2_debugfs_flush_buf(i));
while (print_next_node_scan_node(i))
try(bch2_debugfs_flush_buf(i));
return i->ret;
}
static const struct file_operations btree_node_scan_ops = {
.owner = THIS_MODULE,
.open = bch2_dump_open,
.release = bch2_dump_release,
.read = bch2_btree_node_scan_read,
};
void bch2_fs_debug_exit(struct bch_fs *c)
{
if (!IS_ERR_OR_NULL(c->fs_debug_dir))
@ -900,15 +940,16 @@ void bch2_fs_debug_exit(struct bch_fs *c)
static void bch2_fs_debug_btree_init(struct bch_fs *c, struct btree_debug *bd)
{
struct dentry *d;
struct dentry *btree_dir =
debugfs_create_dir(bch2_btree_id_str(bd->id), c->btree_debug_dir);
if (IS_ERR_OR_NULL(btree_dir))
return;
d = debugfs_create_dir(bch2_btree_id_str(bd->id), c->btree_debug_dir);
debugfs_create_file("keys", 0400, btree_dir, bd, &btree_debug_ops);
debugfs_create_file("keys", 0400, d, bd, &btree_debug_ops);
debugfs_create_file("formats", 0400, btree_dir, bd, &btree_format_debug_ops);
debugfs_create_file("formats", 0400, d, bd, &btree_format_debug_ops);
debugfs_create_file("bfloat-failed", 0400, d, bd,
debugfs_create_file("bfloat-failed", 0400, btree_dir, bd,
&bfloat_failed_debug_ops);
}
@ -947,6 +988,9 @@ void bch2_fs_debug_init(struct bch_fs *c)
debugfs_create_file("btree_deadlock", 0400, c->fs_debug_dir,
c->btree_debug, &btree_deadlock_ops);
debugfs_create_file("btree_node_scan", 0400, c->fs_debug_dir,
c->btree_debug, &btree_node_scan_ops);
debugfs_create_file("write_points", 0400, c->fs_debug_dir,
c->btree_debug, &write_points_ops);

View File

@ -701,11 +701,14 @@ static ssize_t sysfs_opt_store(struct bch_fs *c,
BUG();
}
if (!ca)
bch2_opt_set_by_id(&c->opts, id, v);
if (changed) {
if (!ca) {
bch2_opt_set_by_id(&c->opts, id, v);
clear_bit(id, c->mount_opts.d);
}
if (changed)
bch2_opt_hook_post_set(c, ca, 0, id, v);
}
ret = size;
err:

View File

@ -595,7 +595,6 @@ static void __bch2_fs_free(struct bch_fs *c)
bch2_fs_accounting_exit(c);
bch2_fs_async_obj_exit(c);
bch2_journal_keys_put_initial(c);
bch2_find_btree_nodes_exit(&c->found_btree_nodes);
BUG_ON(atomic_read(&c->journal_keys.ref));
percpu_free_rwsem(&c->mark_lock);
@ -1072,6 +1071,7 @@ static int bch2_fs_init(struct bch_fs *c, struct bch_sb *sb,
bch2_fs_sb_errors_init_early(c);
bch2_fs_snapshots_init_early(c);
bch2_fs_subvolumes_init_early(c);
bch2_find_btree_nodes_init(&c->found_btree_nodes);
INIT_LIST_HEAD(&c->list);

View File

@ -476,26 +476,6 @@ int bch2_require_recovery_pass(struct bch_fs *c,
return ret;
}
int bch2_run_print_explicit_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
{
enum bch_run_recovery_pass_flags flags = 0;
if (!recovery_pass_needs_set(c, pass, &flags))
return 0;
CLASS(printbuf, buf)();
bch2_log_msg_start(c, &buf);
guard(mutex)(&c->sb_lock);
bool write_sb = false;
int ret = __bch2_run_explicit_recovery_pass(c, &buf, pass,
RUN_RECOVERY_PASS_nopersistent,
&write_sb);
bch2_print_str(c, KERN_NOTICE, buf.buf);
return ret;
}
static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
{
struct bch_fs_recovery *r = &c->recovery;

View File

@ -48,8 +48,6 @@ static inline int bch2_recovery_cancelled(struct bch_fs *c)
bool bch2_recovery_pass_want_ratelimit(struct bch_fs *, enum bch_recovery_pass, unsigned);
int bch2_run_print_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass);
int __bch2_run_explicit_recovery_pass(struct bch_fs *, struct printbuf *,
enum bch_recovery_pass,
enum bch_run_recovery_pass_flags,

View File

@ -151,30 +151,28 @@ journal_error_check_stuck(struct journal *j, int error, unsigned flags)
return stuck;
}
void bch2_journal_do_writes_locked(struct journal *j)
{
lockdep_assert_held(&j->lock);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
u64 seq = journal_last_unallocated_seq(j);
struct journal_buf *w = j->buf + (seq & JOURNAL_BUF_MASK);
if (seq &&
!w->write_started &&
!journal_state_seq_count(j, j->reservations, seq)) {
j->seq_write_started = seq;
w->write_started = true;
closure_get(&c->cl);
closure_call(&w->io, bch2_journal_write, j->wq, NULL);
}
}
void bch2_journal_do_writes(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
for (u64 seq = journal_last_unwritten_seq(j);
seq <= journal_cur_seq(j);
seq++) {
unsigned idx = seq & JOURNAL_BUF_MASK;
struct journal_buf *w = j->buf + idx;
if (w->write_started && !w->write_allocated)
break;
if (w->write_started)
continue;
if (!journal_state_seq_count(j, j->reservations, seq)) {
j->seq_write_started = seq;
w->write_started = true;
closure_get(&c->cl);
closure_call(&w->io, bch2_journal_write, j->wq, NULL);
}
break;
}
guard(spinlock)(&j->lock);
bch2_journal_do_writes_locked(j);
}
/*
@ -188,7 +186,7 @@ void bch2_journal_buf_put_final(struct journal *j, u64 seq)
if (__bch2_journal_pin_put(j, seq))
bch2_journal_update_last_seq(j);
bch2_journal_do_writes(j);
bch2_journal_do_writes_locked(j);
/*
* for __bch2_next_write_buffer_flush_journal_buf(), when quiescing an
@ -820,11 +818,14 @@ recheck_need_open:
seq = res.seq;
buf = journal_seq_to_buf(j, seq);
buf->must_flush = true;
if (!buf->flush_time) {
buf->flush_time = local_clock() ?: 1;
buf->expires = jiffies;
scoped_guard(spinlock, &j->lock) {
buf->must_flush = true;
if (!buf->flush_time) {
buf->flush_time = local_clock() ?: 1;
buf->expires = jiffies;
}
}
if (parent && !closure_wait(&buf->wait, parent))
@ -936,11 +937,14 @@ int __bch2_journal_meta(struct journal *j)
try(bch2_journal_res_get(j, &res, jset_u64s(0), 0, NULL));
struct journal_buf *buf = j->buf + (res.seq & JOURNAL_BUF_MASK);
buf->must_flush = true;
if (!buf->flush_time) {
buf->flush_time = local_clock() ?: 1;
buf->expires = jiffies;
scoped_guard(spinlock, &j->lock) {
buf->must_flush = true;
if (!buf->flush_time) {
buf->flush_time = local_clock() ?: 1;
buf->expires = jiffies;
}
}
bch2_journal_res_put(j, &res);

View File

@ -139,6 +139,16 @@ static inline u64 journal_last_unwritten_seq(struct journal *j)
return j->seq_ondisk + 1;
}
static inline u64 journal_last_unallocated_seq(struct journal *j)
{
for (u64 seq = journal_last_unwritten_seq(j);
seq <= journal_cur_seq(j);
seq++)
if (!j->buf[seq & JOURNAL_BUF_MASK].write_allocated)
return seq;
return 0;
}
static inline bool journal_seq_unwritten(struct journal *j, u64 seq)
{
return seq > j->seq_ondisk;
@ -295,6 +305,8 @@ static inline union journal_res_state journal_state_buf_put(struct journal *j, u
}
bool bch2_journal_entry_close(struct journal *);
void bch2_journal_do_writes_locked(struct journal *);
void bch2_journal_do_writes(struct journal *);
void bch2_journal_buf_put_final(struct journal *, u64);

View File

@ -328,7 +328,7 @@ again:
* allocated, in bch2_journal_write() - but the journal write error path
* is special:
*/
bch2_journal_do_writes(j);
bch2_journal_do_writes_locked(j);
spin_unlock(&j->lock);
if (last_seq_ondisk_updated) {
@ -667,6 +667,7 @@ CLOSURE_CALLBACK(bch2_journal_write)
BUG_ON(!w->write_started);
BUG_ON(w->write_allocated);
BUG_ON(w->write_done);
BUG_ON(journal_last_unallocated_seq(j) != le64_to_cpu(w->data->seq));
j->write_start_time = local_clock();
@ -719,7 +720,7 @@ CLOSURE_CALLBACK(bch2_journal_write)
* available:
*/
bch2_journal_space_available(j);
bch2_journal_do_writes(j);
bch2_journal_do_writes_locked(j);
}
w->devs_written = bch2_bkey_devs(c, bkey_i_to_s_c(&w->key));

View File

@ -508,6 +508,7 @@ void bch2_opt_to_text(struct printbuf *out,
void bch2_opts_to_text(struct printbuf *out,
struct bch_opts opts,
struct bch_fs *c, struct bch_sb *sb,
struct bch_opts_mask *mask,
unsigned show_mask, unsigned hide_mask,
unsigned flags)
{
@ -519,6 +520,9 @@ void bch2_opts_to_text(struct printbuf *out,
if ((opt->flags & hide_mask) || !(opt->flags & show_mask))
continue;
if (mask && !test_bit(i, mask->d))
continue;
u64 v = bch2_opt_get_by_id(&opts, i);
if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
continue;

View File

@ -558,6 +558,17 @@ enum fsck_err_opts {
NULL, "BTREE_ITER_prefetch causes btree nodes to be\n"\
" prefetched sequentially")
enum bch_opt_id {
#define x(_name, ...) Opt_##_name,
BCH_OPTS()
#undef x
bch2_opts_nr
};
struct bch_opts_mask {
unsigned long d[BITS_TO_LONGS(bch2_opts_nr)];
};
struct bch_opts {
#define x(_name, _bits, ...) unsigned _name##_defined:1;
BCH_OPTS()
@ -602,13 +613,6 @@ static inline struct bch_opts bch2_opts_empty(void)
void bch2_opts_apply(struct bch_opts *, struct bch_opts);
enum bch_opt_id {
#define x(_name, ...) Opt_##_name,
BCH_OPTS()
#undef x
bch2_opts_nr
};
struct bch_fs;
struct printbuf;
@ -659,6 +663,7 @@ void bch2_opt_to_text(struct printbuf *, struct bch_fs *, struct bch_sb *,
void bch2_opts_to_text(struct printbuf *,
struct bch_opts,
struct bch_fs *, struct bch_sb *,
struct bch_opts_mask *,
unsigned, unsigned, unsigned);
int bch2_opt_hook_pre_set(struct bch_fs *, struct bch_dev *, u64, enum bch_opt_id, u64, bool);

View File

@ -63,9 +63,17 @@ static int check_subvol(struct btree_trans *trans,
snapid = le32_to_cpu(subvol.snapshot);
ret = bch2_snapshot_lookup(trans, snapid, &snapshot);
if (bch2_err_matches(ret, ENOENT))
return bch2_run_print_explicit_recovery_pass(c,
BCH_RECOVERY_PASS_reconstruct_snapshots) ?: ret;
if (bch2_err_matches(ret, ENOENT)) {
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "subvolume points to missing snapshot\n");
bch2_bkey_val_to_text(&buf, c, k);
prt_newline(&buf);
ret = bch2_run_explicit_recovery_pass(c, &buf,
BCH_RECOVERY_PASS_reconstruct_snapshots, 0) ?: ret;
bch2_print_str(c, KERN_NOTICE, buf.buf);
return ret;
}
if (ret)
return ret;

View File

@ -2002,6 +2002,7 @@ static int bch2_show_options(struct seq_file *seq, struct dentry *root)
CLASS(printbuf, buf)();
bch2_opts_to_text(&buf, c->opts, c, c->disk_sb.sb,
&c->mount_opts,
OPT_MOUNT, OPT_HIDDEN, OPT_SHOW_MOUNT_STYLE);
printbuf_nul_terminate(&buf);
seq_printf(seq, ",%s", buf.buf);
@ -2085,6 +2086,13 @@ static int bch2_test_super(struct super_block *s, void *data)
return true;
}
static void set_mount_opts(struct bch_fs *c, struct bch_opts *opts)
{
for (enum bch_opt_id id = 0; id < bch2_opts_nr; id++)
if (bch2_opt_defined_by_id(opts, id))
set_bit(id, c->mount_opts.d);
}
static int bch2_fs_get_tree(struct fs_context *fc)
{
struct bch_fs *c;
@ -2121,6 +2129,7 @@ static int bch2_fs_get_tree(struct fs_context *fc)
if (opt_defined(opts, discard))
set_bit(BCH_FS_discard_mount_opt_set, &c->flags);
set_mount_opts(c, &opts);
/* Some options can't be parsed until after the fs is started: */
opts = bch2_opts_empty();
@ -2129,6 +2138,7 @@ static int bch2_fs_get_tree(struct fs_context *fc)
goto err_stop_fs;
bch2_opts_apply(&c->opts, opts);
set_mount_opts(c, &opts);
ret = bch2_fs_start(c);
if (ret)