Update bcachefs sources to 3693b2ca83 fixup! bcachefs: More topology repair code

This commit is contained in:
Kent Overstreet 2021-07-08 12:14:49 -04:00
parent a471a75449
commit 050d5f7bcf
14 changed files with 97 additions and 58 deletions

View File

@ -1 +1 @@
dbee44d5abdad7a2812b1e51c364dd0c1c3f328c
3693b2ca83ff9eda49660b31299d2bebe3a1075f

View File

@ -148,7 +148,8 @@ static inline struct bpos SPOS(__u64 inode, __u64 offset, __u32 snapshot)
}
#define POS_MIN SPOS(0, 0, 0)
#define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
#define POS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, 0)
#define SPOS_MAX SPOS(KEY_INODE_MAX, KEY_OFFSET_MAX, KEY_SNAPSHOT_MAX)
#define POS(_inode, _offset) SPOS(_inode, _offset, 0)
/* Empty placeholder struct, for container_of() */

View File

@ -216,7 +216,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
return 0;
}
static int btree_repair_node_start(struct bch_fs *c, struct btree *b,
static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
struct btree *prev, struct btree *cur)
{
struct bpos expected_start = !prev
@ -233,31 +233,51 @@ static int btree_repair_node_start(struct bch_fs *c, struct btree *b,
bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&prev->key));
}
if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
"btree node with incorrect min_key at btree %s level %u:\n"
" prev %s\n"
" cur %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
buf1,
(bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&cur->key)), buf2))) {
bch2_bkey_val_to_text(&PBUF(buf2), c, bkey_i_to_s_c(&cur->key));
if (prev &&
bpos_cmp(expected_start, cur->data->min_key) > 0 &&
BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
if (bkey_cmp(prev->data->min_key,
cur->data->min_key) <= 0)
/* cur overwrites prev: */
if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key,
cur->data->min_key) >= 0, c,
"btree node overwritten by next node at btree %s level %u:\n"
" node %s\n"
" next %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
buf1, buf2))
return DROP_PREV_NODE;
if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p,
bpos_predecessor(cur->data->min_key)), c,
"btree node with incorrect max_key at btree %s level %u:\n"
" node %s\n"
" next %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
buf1, buf2))
ret = set_node_max(c, prev,
bpos_predecessor(cur->data->min_key));
} else {
if (bkey_cmp(expected_start, b->data->max_key) >= 0)
/* prev overwrites cur: */
if (mustfix_fsck_err_on(bpos_cmp(expected_start,
cur->data->max_key) >= 0, c,
"btree node overwritten by prev node at btree %s level %u:\n"
" prev %s\n"
" node %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
buf1, buf2))
return DROP_THIS_NODE;
if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
"btree node with incorrect min_key at btree %s level %u:\n"
" prev %s\n"
" node %s",
bch2_btree_ids[b->c.btree_id], b->c.level,
buf1, buf2))
ret = set_node_min(c, cur, expected_start);
}
if (ret)
return ret;
}
fsck_err:
return ret;
}
@ -334,7 +354,7 @@ again:
break;
}
ret = btree_repair_node_start(c, b, prev, cur);
ret = btree_repair_node_boundaries(c, b, prev, cur);
if (ret == DROP_THIS_NODE) {
six_unlock_read(&cur->c.lock);
@ -942,7 +962,7 @@ static int bch2_gc_btree_init(struct bch_fs *c,
goto fsck_err;
}
if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, POS_MAX), c,
if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c,
"btree root with incorrect max_key: %s",
(bch2_bpos_to_text(&PBUF(buf), b->data->max_key), buf))) {
bch_err(c, "repair unimplemented");

View File

@ -87,7 +87,7 @@ static inline struct gc_pos gc_pos_btree_node(struct btree *b)
*/
static inline struct gc_pos gc_pos_btree_root(enum btree_id id)
{
return gc_pos_btree(id, POS_MAX, BTREE_MAX_DEPTH);
return gc_pos_btree(id, SPOS_MAX, BTREE_MAX_DEPTH);
}
static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)

View File

@ -1074,7 +1074,7 @@ static inline int btree_iter_lock_root(struct btree_iter *iter,
}
lock_type = __btree_lock_want(iter, iter->level);
if (unlikely(!btree_node_lock(b, POS_MAX, iter->level,
if (unlikely(!btree_node_lock(b, SPOS_MAX, iter->level,
iter, lock_type,
lock_root_check_fn, rootp,
trace_ip)))
@ -1595,7 +1595,7 @@ out:
inline bool bch2_btree_iter_advance(struct btree_iter *iter)
{
struct bpos pos = iter->k.p;
bool ret = bpos_cmp(pos, POS_MAX) != 0;
bool ret = bpos_cmp(pos, SPOS_MAX) != 0;
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
pos = bkey_successor(iter, pos);
@ -1617,7 +1617,7 @@ inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
{
struct bpos next_pos = iter->l[0].b->key.k.p;
bool ret = bpos_cmp(next_pos, POS_MAX) != 0;
bool ret = bpos_cmp(next_pos, SPOS_MAX) != 0;
/*
* Typically, we don't want to modify iter->pos here, since that
@ -1627,7 +1627,7 @@ static inline bool btree_iter_set_pos_to_next_leaf(struct btree_iter *iter)
if (ret)
btree_iter_set_search_pos(iter, bpos_successor(next_pos));
else
bch2_btree_iter_set_pos(iter, POS_MAX);
bch2_btree_iter_set_pos(iter, SPOS_MAX);
return ret;
}
@ -1843,7 +1843,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
switch (btree_iter_type(iter)) {
case BTREE_ITER_KEYS:
k = btree_iter_level_peek_all(iter, &iter->l[0]);
EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, iter->pos) == 0);
break;
case BTREE_ITER_CACHED:
ck = (void *) iter->l[0].b;

View File

@ -340,7 +340,7 @@ struct bkey_cached {
};
struct btree_insert_entry {
unsigned trigger_flags;
unsigned flags;
u8 bkey_type;
enum btree_id btree_id:8;
u8 level;
@ -639,7 +639,9 @@ static inline bool btree_type_has_snapshots(enum btree_id id)
return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
}
enum btree_trigger_flags {
enum btree_update_flags {
__BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE,
__BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
__BTREE_TRIGGER_INSERT,
@ -650,6 +652,8 @@ enum btree_trigger_flags {
__BTREE_TRIGGER_NOATOMIC,
};
#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
#define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
#define BTREE_TRIGGER_INSERT (1U << __BTREE_TRIGGER_INSERT)

View File

@ -77,7 +77,7 @@ int bch2_btree_node_update_key(struct bch_fs *, struct btree_iter *,
struct btree *, struct bkey_i *);
int bch2_trans_update(struct btree_trans *, struct btree_iter *,
struct bkey_i *, enum btree_trigger_flags);
struct bkey_i *, enum btree_update_flags);
void bch2_trans_commit_hook(struct btree_trans *,
struct btree_trans_commit_hook *);
int __bch2_trans_commit(struct btree_trans *);

View File

@ -367,7 +367,7 @@ static struct btree *__btree_root_alloc(struct btree_update *as, unsigned level)
struct btree *b = bch2_btree_node_alloc(as, level);
btree_set_min(b, POS_MIN);
btree_set_max(b, POS_MAX);
btree_set_max(b, SPOS_MAX);
b->data->format = bch2_btree_calc_format(b);
btree_node_set_format(b, b->data->format);
@ -1590,7 +1590,7 @@ retry:
b = iter->l[level].b;
if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) ||
(sib == btree_next_sib && !bpos_cmp(b->data->max_key, POS_MAX))) {
(sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) {
b->sib_u64s[sib] = U16_MAX;
goto out;
}
@ -2014,7 +2014,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
b->c.btree_id = id;
bkey_btree_ptr_init(&b->key);
b->key.k.p = POS_MAX;
b->key.k.p = SPOS_MAX;
*((u64 *) bkey_i_to_btree_ptr(&b->key)->v.start) = U64_MAX - id;
bch2_bset_init_first(b, &b->data->keys);
@ -2022,7 +2022,7 @@ void bch2_btree_root_alloc(struct bch_fs *c, enum btree_id id)
b->data->flags = 0;
btree_set_min(b, POS_MIN);
btree_set_max(b, POS_MAX);
btree_set_max(b, SPOS_MAX);
b->data->format = bch2_btree_calc_format(b);
btree_node_set_format(b, b->data->format);

View File

@ -363,7 +363,7 @@ static noinline void bch2_trans_mark_gc(struct btree_trans *trans)
if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
bch2_mark_update(trans, i->iter, i->k,
i->trigger_flags|BTREE_TRIGGER_GC);
i->flags|BTREE_TRIGGER_GC);
}
}
@ -468,7 +468,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
trans_for_each_update(trans, i)
if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type))
bch2_mark_update(trans, i->iter, i->k,
i->trigger_flags);
i->flags);
if (marking && trans->fs_usage_deltas)
bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas);
@ -798,8 +798,7 @@ static int extent_handle_overwrites(struct btree_trans *trans,
if (bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(i->k))) {
update_iter = bch2_trans_copy_iter(trans, iter);
ret = bch2_btree_delete_at(trans, update_iter,
i->trigger_flags);
ret = bch2_btree_delete_at(trans, update_iter, i->flags);
bch2_trans_iter_put(trans, update_iter);
if (ret)
@ -840,14 +839,16 @@ static int extent_handle_overwrites(struct btree_trans *trans,
if (ret)
goto out;
bch2_trans_update(trans, update_iter, update, i->trigger_flags);
bch2_trans_update(trans, update_iter, update,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
i->flags);
bch2_trans_iter_put(trans, update_iter);
}
if (bkey_cmp(k.k->p, i->k->k.p) <= 0) {
update_iter = bch2_trans_copy_iter(trans, iter);
ret = bch2_btree_delete_at(trans, update_iter,
i->trigger_flags);
i->flags);
bch2_trans_iter_put(trans, update_iter);
if (ret)
@ -862,7 +863,7 @@ static int extent_handle_overwrites(struct btree_trans *trans,
bkey_reassemble(update, k);
bch2_cut_front(i->k->k.p, update);
bch2_trans_update(trans, iter, update, i->trigger_flags);
bch2_trans_update(trans, iter, update, i->flags);
goto out;
}
next:
@ -907,7 +908,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
#ifdef CONFIG_BCACHEFS_DEBUG
trans_for_each_update(trans, i)
if (btree_iter_type(i->iter) != BTREE_ITER_CACHED &&
!(i->trigger_flags & BTREE_TRIGGER_NORUN))
!(i->flags & BTREE_TRIGGER_NORUN))
bch2_btree_key_cache_verify_clean(trans,
i->btree_id, i->k->k.p);
#endif
@ -925,8 +926,8 @@ int __bch2_trans_commit(struct btree_trans *trans)
i->trans_triggers_run = true;
trans_trigger_run = true;
ret = bch2_trans_mark_update(trans, i->iter, i->k,
i->trigger_flags);
ret = bch2_trans_mark_update(trans, i->iter,
i->k, i->flags);
if (unlikely(ret)) {
if (ret == -EINTR)
trace_trans_restart_mark(trans->ip, _RET_IP_,
@ -1009,10 +1010,10 @@ err:
}
int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_i *k, enum btree_trigger_flags flags)
struct bkey_i *k, enum btree_update_flags flags)
{
struct btree_insert_entry *i, n = (struct btree_insert_entry) {
.trigger_flags = flags,
.flags = flags,
.bkey_type = __btree_node_type(iter->level, iter->btree_id),
.btree_id = iter->btree_id,
.level = iter->level,
@ -1110,7 +1111,7 @@ int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
}
int bch2_btree_delete_at(struct btree_trans *trans,
struct btree_iter *iter, unsigned trigger_flags)
struct btree_iter *iter, unsigned update_flags)
{
struct bkey_i *k;
@ -1120,7 +1121,7 @@ int bch2_btree_delete_at(struct btree_trans *trans,
bkey_init(&k->k);
k->k.p = iter->pos;
return bch2_trans_update(trans, iter, k, trigger_flags);
return bch2_trans_update(trans, iter, k, update_flags);
}
int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,

View File

@ -313,7 +313,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
if (err)
return err;
if (!i->size || !bpos_cmp(POS_MAX, i->from))
if (!i->size || !bpos_cmp(SPOS_MAX, i->from))
return i->ret;
bch2_trans_init(&trans, i->c, 0, 0);
@ -329,7 +329,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
* can't easily correctly restart a btree node traversal across
* all nodes, meh
*/
i->from = bpos_cmp(POS_MAX, b->key.k.p)
i->from = bpos_cmp(SPOS_MAX, b->key.k.p)
? bpos_successor(b->key.k.p)
: b->key.k.p;

View File

@ -112,7 +112,7 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c,
bch_scnmemcpy(out, d.v->d_name,
bch2_dirent_name_bytes(d));
pr_buf(out, " -> %llu type %u", d.v->d_inum, d.v->d_type);
pr_buf(out, " -> %llu type %s", d.v->d_inum, bch2_d_types[d.v->d_type]);
}
static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,

View File

@ -769,7 +769,7 @@ static int bch2_move_btree(struct bch_fs *c,
break;
if ((cmp_int(id, end_btree_id) ?:
bkey_cmp(b->key.k.p, end_pos)) > 0)
bpos_cmp(b->key.k.p, end_pos)) > 0)
break;
stats->pos = iter->pos;
@ -921,7 +921,7 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
ret = bch2_move_btree(c,
0, POS_MIN,
BTREE_ID_NR, POS_MAX,
BTREE_ID_NR, SPOS_MAX,
rewrite_old_nodes_pred, c, stats);
if (!ret) {
mutex_lock(&c->sb_lock);

View File

@ -63,6 +63,18 @@ const char * const bch2_member_states[] = {
#undef x
const char * const bch2_d_types[] = {
[DT_UNKNOWN] = "unknown",
[DT_FIFO] = "fifo",
[DT_CHR] = "chr",
[DT_DIR] = "dir",
[DT_BLK] = "blk",
[DT_REG] = "reg",
[DT_LNK] = "lnk",
[DT_SOCK] = "sock",
[DT_WHT] = "whiteout",
};
void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src)
{
#define x(_name, ...) \

View File

@ -18,6 +18,7 @@ extern const char * const bch2_str_hash_types[];
extern const char * const bch2_data_types[];
extern const char * const bch2_cache_replacement_policies[];
extern const char * const bch2_member_states[];
extern const char * const bch2_d_types[];
/*
* Mount options; we also store defaults in the superblock.