mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to c7defb5793 bcachefs: Split btree_iter_traverse and bch2_btree_iter_traverse()
This commit is contained in:
parent
1f0d52aa06
commit
9f69a652dc
@ -1 +1 @@
|
||||
0a9f0fc68a3cfaaee05a0848673fdb3de3108982
|
||||
c7defb5793039b55066e8e9d41e76bae826a7894
|
||||
|
@ -1396,6 +1396,8 @@ enum bch_sb_feature {
|
||||
enum bch_sb_compat {
|
||||
BCH_COMPAT_FEAT_ALLOC_INFO = 0,
|
||||
BCH_COMPAT_FEAT_ALLOC_METADATA = 1,
|
||||
BCH_COMPAT_FEAT_EXTENTS_ABOVE_BTREE_UPDATES_DONE = 2,
|
||||
BCH_COMPAT_FEAT_BFORMAT_OVERFLOW_DONE = 3,
|
||||
};
|
||||
|
||||
/* options: */
|
||||
|
@ -551,7 +551,12 @@ void bch2_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
|
||||
static void set_format_field(struct bkey_format *f, enum bch_bkey_fields i,
|
||||
unsigned bits, u64 offset)
|
||||
{
|
||||
offset = bits == 64 ? 0 : min(offset, U64_MAX - ((1ULL << bits) - 1));
|
||||
unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
|
||||
u64 unpacked_max = ~((~0ULL << 1) << (unpacked_bits - 1));
|
||||
|
||||
bits = min(bits, unpacked_bits);
|
||||
|
||||
offset = bits == unpacked_bits ? 0 : min(offset, unpacked_max - ((1ULL << bits) - 1));
|
||||
|
||||
f->bits_per_field[i] = bits;
|
||||
f->field_offset[i] = cpu_to_le64(offset);
|
||||
|
@ -169,8 +169,22 @@ void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
|
||||
pr_buf(out, "POS_MIN");
|
||||
else if (!bkey_cmp(pos, POS_MAX))
|
||||
pr_buf(out, "POS_MAX");
|
||||
else {
|
||||
if (pos.inode == U64_MAX)
|
||||
pr_buf(out, "U64_MAX");
|
||||
else
|
||||
pr_buf(out, "%llu:%llu", pos.inode, pos.offset);
|
||||
pr_buf(out, "%llu", pos.inode);
|
||||
pr_buf(out, ":");
|
||||
if (pos.offset == U64_MAX)
|
||||
pr_buf(out, "U64_MAX");
|
||||
else
|
||||
pr_buf(out, "%llu", pos.offset);
|
||||
pr_buf(out, ":");
|
||||
if (pos.snapshot == U32_MAX)
|
||||
pr_buf(out, "U32_MAX");
|
||||
else
|
||||
pr_buf(out, "%u", pos.snapshot);
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
|
||||
@ -185,8 +199,7 @@ void bch2_bkey_to_text(struct printbuf *out, const struct bkey *k)
|
||||
|
||||
bch2_bpos_to_text(out, k->p);
|
||||
|
||||
pr_buf(out, " snap %u len %u ver %llu",
|
||||
k->p.snapshot, k->size, k->version.lo);
|
||||
pr_buf(out, " len %u ver %llu", k->size, k->version.lo);
|
||||
} else {
|
||||
pr_buf(out, "(null)");
|
||||
}
|
||||
|
@ -1169,7 +1169,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
|
||||
}
|
||||
}
|
||||
|
||||
bch2_btree_iter_next(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
@ -1638,7 +1638,8 @@ int bch2_gc_thread_start(struct bch_fs *c)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
BUG_ON(c->gc_thread);
|
||||
if (c->gc_thread)
|
||||
return 0;
|
||||
|
||||
p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
|
||||
if (IS_ERR(p)) {
|
||||
|
@ -16,6 +16,8 @@
|
||||
#include <linux/prefetch.h>
|
||||
#include <trace/events/bcachefs.h>
|
||||
|
||||
static void btree_iter_set_search_pos(struct btree_iter *, struct bpos);
|
||||
|
||||
static inline bool is_btree_node(struct btree_iter *iter, unsigned l)
|
||||
{
|
||||
return l < BTREE_MAX_DEPTH &&
|
||||
@ -492,9 +494,9 @@ static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
|
||||
static void bch2_btree_iter_verify_level(struct btree_iter *iter,
|
||||
unsigned level)
|
||||
{
|
||||
struct btree_iter_level *l = &iter->l[level];
|
||||
struct btree_node_iter tmp = l->iter;
|
||||
bool locked = btree_node_locked(iter, level);
|
||||
struct btree_iter_level *l;
|
||||
struct btree_node_iter tmp;
|
||||
bool locked;
|
||||
struct bkey_packed *p, *k;
|
||||
char buf1[100], buf2[100], buf3[100];
|
||||
const char *msg;
|
||||
@ -502,6 +504,10 @@ static void bch2_btree_iter_verify_level(struct btree_iter *iter,
|
||||
if (!bch2_debug_check_iterators)
|
||||
return;
|
||||
|
||||
l = &iter->l[level];
|
||||
tmp = l->iter;
|
||||
locked = btree_node_locked(iter, level);
|
||||
|
||||
if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
|
||||
if (!level)
|
||||
bch2_btree_iter_verify_cached(iter);
|
||||
@ -809,7 +815,7 @@ static inline struct bkey_s_c __btree_iter_unpack(struct btree_iter *iter,
|
||||
}
|
||||
|
||||
/* peek_all() doesn't skip deleted keys */
|
||||
static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
|
||||
static inline struct bkey_s_c btree_iter_level_peek_all(struct btree_iter *iter,
|
||||
struct btree_iter_level *l,
|
||||
struct bkey *u)
|
||||
{
|
||||
@ -817,18 +823,24 @@ static inline struct bkey_s_c __btree_iter_peek_all(struct btree_iter *iter,
|
||||
bch2_btree_node_iter_peek_all(&l->iter, l->b));
|
||||
}
|
||||
|
||||
static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter,
|
||||
static inline struct bkey_s_c btree_iter_level_peek(struct btree_iter *iter,
|
||||
struct btree_iter_level *l)
|
||||
{
|
||||
return __btree_iter_unpack(iter, l, &iter->k,
|
||||
struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
|
||||
bch2_btree_node_iter_peek(&l->iter, l->b));
|
||||
|
||||
iter->real_pos = k.k ? k.k->p : l->b->key.k.p;
|
||||
return k;
|
||||
}
|
||||
|
||||
static inline struct bkey_s_c __btree_iter_prev(struct btree_iter *iter,
|
||||
static inline struct bkey_s_c btree_iter_level_prev(struct btree_iter *iter,
|
||||
struct btree_iter_level *l)
|
||||
{
|
||||
return __btree_iter_unpack(iter, l, &iter->k,
|
||||
struct bkey_s_c k = __btree_iter_unpack(iter, l, &iter->k,
|
||||
bch2_btree_node_iter_prev(&l->iter, l->b));
|
||||
|
||||
iter->real_pos = k.k ? k.k->p : l->b->data->min_key;
|
||||
return k;
|
||||
}
|
||||
|
||||
static inline bool btree_iter_advance_to_pos(struct btree_iter *iter,
|
||||
@ -1140,11 +1152,6 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void btree_iter_up(struct btree_iter *iter)
|
||||
{
|
||||
btree_node_unlock(iter, iter->level++);
|
||||
}
|
||||
|
||||
static int btree_iter_traverse_one(struct btree_iter *, unsigned long);
|
||||
|
||||
static int __btree_iter_traverse_all(struct btree_trans *trans, int ret)
|
||||
@ -1233,9 +1240,9 @@ static inline bool btree_iter_good_node(struct btree_iter *iter,
|
||||
!bch2_btree_node_relock(iter, l))
|
||||
return false;
|
||||
|
||||
if (check_pos <= 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
|
||||
if (check_pos < 0 && btree_iter_pos_before_node(iter, iter->l[l].b))
|
||||
return false;
|
||||
if (check_pos >= 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
|
||||
if (check_pos > 0 && btree_iter_pos_after_node(iter, iter->l[l].b))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
@ -1286,24 +1293,8 @@ static int btree_iter_traverse_one(struct btree_iter *iter,
|
||||
if (unlikely(iter->level >= BTREE_MAX_DEPTH))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
|
||||
* here unnecessary
|
||||
*/
|
||||
iter->level = btree_iter_up_until_good_node(iter, 0);
|
||||
|
||||
/*
|
||||
* If we've got a btree node locked (i.e. we aren't about to relock the
|
||||
* root) - advance its node iterator if necessary:
|
||||
*
|
||||
* XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
|
||||
*/
|
||||
if (is_btree_node(iter, iter->level)) {
|
||||
BUG_ON(!btree_iter_pos_in_node(iter, iter->l[iter->level].b));
|
||||
|
||||
btree_iter_advance_to_pos(iter, &iter->l[iter->level], -1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note: iter->nodes[iter->level] may be temporarily NULL here - that
|
||||
* would indicate to other code that we got to the end of the btree,
|
||||
@ -1338,7 +1329,7 @@ static int btree_iter_traverse_one(struct btree_iter *iter,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
static int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
{
|
||||
struct btree_trans *trans = iter->trans;
|
||||
int ret;
|
||||
@ -1351,6 +1342,30 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note:
|
||||
* bch2_btree_iter_traverse() is for external users, btree_iter_traverse() is
|
||||
* for internal btree iterator users
|
||||
*
|
||||
* bch2_btree_iter_traverse sets iter->real_pos to iter->pos,
|
||||
* btree_iter_traverse() does not:
|
||||
*/
|
||||
static inline int __must_check
|
||||
btree_iter_traverse(struct btree_iter *iter)
|
||||
{
|
||||
return iter->uptodate >= BTREE_ITER_NEED_RELOCK
|
||||
? __bch2_btree_iter_traverse(iter)
|
||||
: 0;
|
||||
}
|
||||
|
||||
int __must_check
|
||||
bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
{
|
||||
btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
|
||||
|
||||
return btree_iter_traverse(iter);
|
||||
}
|
||||
|
||||
/* Iterate across nodes (leaf and interior nodes) */
|
||||
|
||||
struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
|
||||
@ -1361,10 +1376,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
|
||||
bch2_btree_iter_verify(iter);
|
||||
|
||||
if (iter->uptodate == BTREE_ITER_UPTODATE)
|
||||
return iter->l[iter->level].b;
|
||||
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
ret = btree_iter_traverse(iter);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
@ -1375,7 +1387,6 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
|
||||
BUG_ON(bkey_cmp(b->key.k.p, iter->pos) < 0);
|
||||
|
||||
iter->pos = iter->real_pos = b->key.k.p;
|
||||
iter->uptodate = BTREE_ITER_UPTODATE;
|
||||
|
||||
bch2_btree_iter_verify(iter);
|
||||
|
||||
@ -1396,12 +1407,12 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
|
||||
bch2_trans_cond_resched(iter->trans);
|
||||
|
||||
btree_iter_up(iter);
|
||||
btree_node_unlock(iter, iter->level);
|
||||
iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
|
||||
iter->level++;
|
||||
|
||||
if (!bch2_btree_node_relock(iter, iter->level))
|
||||
btree_iter_set_dirty(iter, BTREE_ITER_NEED_RELOCK);
|
||||
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
|
||||
ret = btree_iter_traverse(iter);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
@ -1415,21 +1426,16 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
* Haven't gotten to the end of the parent node: go back down to
|
||||
* the next child node
|
||||
*/
|
||||
btree_iter_set_search_pos(iter, bkey_successor(iter->pos));
|
||||
|
||||
/*
|
||||
* We don't really want to be unlocking here except we can't
|
||||
* directly tell btree_iter_traverse() "traverse to this level"
|
||||
* except by setting iter->level, so we have to unlock so we
|
||||
* don't screw up our lock invariants:
|
||||
*/
|
||||
if (btree_node_read_locked(iter, iter->level))
|
||||
/* Unlock to avoid screwing up our lock invariants: */
|
||||
btree_node_unlock(iter, iter->level);
|
||||
|
||||
iter->pos = iter->real_pos = bkey_successor(iter->pos);
|
||||
iter->level = iter->min_depth;
|
||||
|
||||
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
|
||||
ret = btree_iter_traverse(iter);
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
@ -1437,7 +1443,6 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
}
|
||||
|
||||
iter->pos = iter->real_pos = b->key.k.p;
|
||||
iter->uptodate = BTREE_ITER_UPTODATE;
|
||||
|
||||
bch2_btree_iter_verify(iter);
|
||||
|
||||
@ -1489,15 +1494,7 @@ out:
|
||||
bch2_btree_iter_verify(iter);
|
||||
}
|
||||
|
||||
void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
|
||||
{
|
||||
bkey_init(&iter->k);
|
||||
iter->k.p = iter->pos = new_pos;
|
||||
|
||||
btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
|
||||
}
|
||||
|
||||
inline bool bch2_btree_iter_advance_pos(struct btree_iter *iter)
|
||||
inline bool bch2_btree_iter_advance(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = iter->k.p;
|
||||
bool ret = bkey_cmp(pos, POS_MAX) != 0;
|
||||
@ -1508,7 +1505,7 @@ inline bool bch2_btree_iter_advance_pos(struct btree_iter *iter)
|
||||
return ret;
|
||||
}
|
||||
|
||||
inline bool bch2_btree_iter_rewind_pos(struct btree_iter *iter)
|
||||
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = bkey_start_pos(&iter->k);
|
||||
bool ret = bkey_cmp(pos, POS_MIN) != 0;
|
||||
@ -1550,150 +1547,57 @@ static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* btree_iter_peek_uptodate - given an iterator that is uptodate, return the key
|
||||
* it currently points to
|
||||
*/
|
||||
static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter)
|
||||
static struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
|
||||
enum btree_id btree_id, struct bpos pos)
|
||||
{
|
||||
struct btree_iter_level *l = &iter->l[0];
|
||||
struct bkey_s_c ret = { .k = &iter->k };
|
||||
|
||||
if (!bkey_deleted(&iter->k)) {
|
||||
struct bkey_packed *_k =
|
||||
__bch2_btree_node_iter_peek_all(&l->iter, l->b);
|
||||
|
||||
ret.v = bkeyp_val(&l->b->format, _k);
|
||||
|
||||
if (bch2_debug_check_iterators) {
|
||||
struct bkey k = bkey_unpack_key(l->b, _k);
|
||||
|
||||
BUG_ON(memcmp(&k, &iter->k, sizeof(k)));
|
||||
}
|
||||
|
||||
if (bch2_debug_check_bkeys)
|
||||
bch2_bkey_debugcheck(iter->trans->c, l->b, ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_btree_iter_peek: returns first key greater than or equal to iterator's
|
||||
* current position
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
|
||||
{
|
||||
struct btree_iter_level *l = &iter->l[0];
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
|
||||
bch2_btree_iter_verify(iter);
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
|
||||
btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
|
||||
|
||||
if (iter->uptodate == BTREE_ITER_UPTODATE &&
|
||||
!bkey_deleted(&iter->k))
|
||||
return btree_iter_peek_uptodate(iter);
|
||||
|
||||
while (1) {
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
if (unlikely(ret))
|
||||
return bkey_s_c_err(ret);
|
||||
|
||||
k = __btree_iter_peek(iter, l);
|
||||
if (likely(k.k))
|
||||
break;
|
||||
|
||||
if (!btree_iter_set_pos_to_next_leaf(iter))
|
||||
return bkey_s_c_null;
|
||||
}
|
||||
|
||||
/*
|
||||
* iter->pos should always be equal to the key we just
|
||||
* returned - except extents can straddle iter->pos:
|
||||
*/
|
||||
if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
|
||||
iter->pos = bkey_start_pos(k.k);
|
||||
|
||||
iter->real_pos = k.k->p;
|
||||
|
||||
iter->uptodate = BTREE_ITER_UPTODATE;
|
||||
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
return k;
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_btree_iter_next: returns first key greater than iterator's current
|
||||
* position
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
|
||||
{
|
||||
if (!bch2_btree_iter_advance_pos(iter))
|
||||
return bkey_s_c_null;
|
||||
|
||||
return bch2_btree_iter_peek(iter);
|
||||
}
|
||||
|
||||
static struct bkey_s_c __btree_trans_updates_peek(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = btree_iter_search_key(iter);
|
||||
struct btree_trans *trans = iter->trans;
|
||||
struct btree_insert_entry *i;
|
||||
|
||||
trans_for_each_update2(trans, i)
|
||||
if ((cmp_int(iter->btree_id, i->iter->btree_id) ?:
|
||||
bkey_cmp(pos, i->k->k.p)) <= 0)
|
||||
if ((cmp_int(btree_id, i->iter->btree_id) ?:
|
||||
bkey_cmp(pos, i->k->k.p)) <= 0) {
|
||||
if (btree_id == i->iter->btree_id)
|
||||
return i->k;
|
||||
break;
|
||||
|
||||
return i < trans->updates2 + trans->nr_updates2 &&
|
||||
iter->btree_id == i->iter->btree_id
|
||||
? bkey_i_to_s_c(i->k)
|
||||
: bkey_s_c_null;
|
||||
}
|
||||
|
||||
static struct bkey_s_c __bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
|
||||
{
|
||||
struct btree_iter_level *l = &iter->l[0];
|
||||
struct bkey_s_c k = __btree_iter_peek(iter, l);
|
||||
struct bkey_s_c u = __btree_trans_updates_peek(iter);
|
||||
|
||||
if (k.k && (!u.k || bkey_cmp(k.k->p, u.k->p) < 0))
|
||||
return k;
|
||||
if (u.k && bkey_cmp(u.k->p, l->b->key.k.p) <= 0) {
|
||||
iter->k = *u.k;
|
||||
return u;
|
||||
}
|
||||
return bkey_s_c_null;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
|
||||
static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool with_updates)
|
||||
{
|
||||
struct bpos search_key = btree_iter_search_key(iter);
|
||||
struct bkey_i *next_update = with_updates
|
||||
? btree_trans_peek_updates(iter->trans, iter->btree_id, search_key)
|
||||
: NULL;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
|
||||
bch2_btree_iter_verify(iter);
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
|
||||
btree_iter_set_search_pos(iter, search_key);
|
||||
|
||||
while (1) {
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
ret = btree_iter_traverse(iter);
|
||||
if (unlikely(ret))
|
||||
return bkey_s_c_err(ret);
|
||||
|
||||
k = __bch2_btree_iter_peek_with_updates(iter);
|
||||
k = btree_iter_level_peek(iter, &iter->l[0]);
|
||||
|
||||
if (k.k && bkey_deleted(k.k)) {
|
||||
if (!bch2_btree_iter_advance_pos(iter))
|
||||
return bkey_s_c_null;
|
||||
if (next_update &&
|
||||
bkey_cmp(next_update->k.p, iter->real_pos) <= 0)
|
||||
k = bkey_i_to_s_c(next_update);
|
||||
|
||||
if (likely(k.k)) {
|
||||
if (bkey_deleted(k.k)) {
|
||||
btree_iter_set_search_pos(iter,
|
||||
bkey_successor(k.k->p));
|
||||
continue;
|
||||
}
|
||||
|
||||
if (likely(k.k))
|
||||
break;
|
||||
}
|
||||
|
||||
if (!btree_iter_set_pos_to_next_leaf(iter))
|
||||
return bkey_s_c_null;
|
||||
@ -1706,13 +1610,40 @@ struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
|
||||
if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
|
||||
iter->pos = bkey_start_pos(k.k);
|
||||
|
||||
iter->uptodate = BTREE_ITER_UPTODATE;
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
return k;
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_btree_iter_peek: returns first key greater than or equal to iterator's
|
||||
* current position
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
|
||||
{
|
||||
return __btree_iter_peek(iter, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* bch2_btree_iter_next: returns first key greater than iterator's current
|
||||
* position
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
|
||||
{
|
||||
if (!bch2_btree_iter_advance(iter))
|
||||
return bkey_s_c_null;
|
||||
|
||||
return bch2_btree_iter_peek(iter);
|
||||
}
|
||||
|
||||
struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
|
||||
{
|
||||
return __btree_iter_peek(iter, true);
|
||||
}
|
||||
|
||||
struct bkey_s_c bch2_btree_iter_next_with_updates(struct btree_iter *iter)
|
||||
{
|
||||
if (!bch2_btree_iter_advance_pos(iter))
|
||||
if (!bch2_btree_iter_advance(iter))
|
||||
return bkey_s_c_null;
|
||||
|
||||
return bch2_btree_iter_peek_with_updates(iter);
|
||||
@ -1734,23 +1665,19 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
|
||||
btree_iter_set_search_pos(iter, iter->pos);
|
||||
|
||||
if (iter->uptodate == BTREE_ITER_UPTODATE &&
|
||||
!bkey_deleted(&iter->k))
|
||||
return btree_iter_peek_uptodate(iter);
|
||||
|
||||
while (1) {
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
ret = btree_iter_traverse(iter);
|
||||
if (unlikely(ret)) {
|
||||
k = bkey_s_c_err(ret);
|
||||
goto no_key;
|
||||
}
|
||||
|
||||
k = __btree_iter_peek(iter, l);
|
||||
k = btree_iter_level_peek(iter, l);
|
||||
if (!k.k ||
|
||||
((iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
? bkey_cmp(bkey_start_pos(k.k), iter->pos) >= 0
|
||||
: bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0))
|
||||
k = __btree_iter_prev(iter, l);
|
||||
k = btree_iter_level_prev(iter, l);
|
||||
|
||||
if (likely(k.k))
|
||||
break;
|
||||
@ -1766,15 +1693,13 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
/* Extents can straddle iter->pos: */
|
||||
if (bkey_cmp(k.k->p, iter->pos) < 0)
|
||||
iter->pos = k.k->p;
|
||||
iter->real_pos = k.k->p;
|
||||
iter->uptodate = BTREE_ITER_UPTODATE;
|
||||
out:
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
return k;
|
||||
no_key:
|
||||
/*
|
||||
* __btree_iter_peek() may have set iter->k to a key we didn't want, and
|
||||
* btree_iter_level_peek() may have set iter->k to a key we didn't want, and
|
||||
* then we errored going to the previous leaf - make sure it's
|
||||
* consistent with iter->pos:
|
||||
*/
|
||||
@ -1789,7 +1714,7 @@ no_key:
|
||||
*/
|
||||
struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
|
||||
{
|
||||
if (!bch2_btree_iter_rewind_pos(iter))
|
||||
if (!bch2_btree_iter_rewind(iter))
|
||||
return bkey_s_c_null;
|
||||
|
||||
return bch2_btree_iter_peek_prev(iter);
|
||||
@ -1832,8 +1757,6 @@ __bch2_btree_iter_peek_slot_extents(struct btree_iter *iter)
|
||||
|
||||
EBUG_ON(!iter->k.size);
|
||||
|
||||
iter->uptodate = BTREE_ITER_UPTODATE;
|
||||
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
|
||||
@ -1852,17 +1775,14 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
|
||||
btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
|
||||
|
||||
if (iter->uptodate == BTREE_ITER_UPTODATE)
|
||||
return btree_iter_peek_uptodate(iter);
|
||||
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
return __bch2_btree_iter_peek_slot_extents(iter);
|
||||
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
ret = btree_iter_traverse(iter);
|
||||
if (unlikely(ret))
|
||||
return bkey_s_c_err(ret);
|
||||
|
||||
k = __btree_iter_peek_all(iter, l, &iter->k);
|
||||
k = btree_iter_level_peek_all(iter, l, &iter->k);
|
||||
|
||||
EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
|
||||
|
||||
@ -1873,7 +1793,6 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
k = (struct bkey_s_c) { &iter->k, NULL };
|
||||
}
|
||||
|
||||
iter->uptodate = BTREE_ITER_UPTODATE;
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
return k;
|
||||
@ -1881,7 +1800,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
|
||||
struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
|
||||
{
|
||||
if (!bch2_btree_iter_advance_pos(iter))
|
||||
if (!bch2_btree_iter_advance(iter))
|
||||
return bkey_s_c_null;
|
||||
|
||||
return bch2_btree_iter_peek_slot(iter);
|
||||
@ -1889,7 +1808,7 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
|
||||
|
||||
struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
|
||||
{
|
||||
if (!bch2_btree_iter_rewind_pos(iter))
|
||||
if (!bch2_btree_iter_rewind(iter))
|
||||
return bkey_s_c_null;
|
||||
|
||||
return bch2_btree_iter_peek_slot(iter);
|
||||
@ -1903,7 +1822,7 @@ struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED);
|
||||
bch2_btree_iter_verify(iter);
|
||||
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
ret = btree_iter_traverse(iter);
|
||||
if (unlikely(ret))
|
||||
return bkey_s_c_err(ret);
|
||||
|
||||
@ -2058,8 +1977,8 @@ struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
|
||||
continue;
|
||||
|
||||
if (best &&
|
||||
bkey_cmp(bpos_diff(best->pos, pos),
|
||||
bpos_diff(iter->real_pos, pos)) < 0)
|
||||
bkey_cmp(bpos_diff(best->real_pos, pos),
|
||||
bpos_diff(iter->real_pos, pos)) > 0)
|
||||
continue;
|
||||
|
||||
best = iter;
|
||||
@ -2091,6 +2010,7 @@ alloc_iter:
|
||||
__bch2_btree_iter_upgrade_nounlock(iter, 1);
|
||||
|
||||
bch2_btree_iter_set_pos(iter, pos);
|
||||
btree_iter_set_search_pos(iter, btree_iter_search_key(iter));
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
@ -145,15 +145,7 @@ void bch2_btree_iter_node_drop(struct btree_iter *, struct btree *);
|
||||
|
||||
void bch2_btree_iter_reinit_node(struct btree_iter *, struct btree *);
|
||||
|
||||
int __must_check __bch2_btree_iter_traverse(struct btree_iter *);
|
||||
|
||||
static inline int __must_check
|
||||
bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
{
|
||||
return iter->uptodate >= BTREE_ITER_NEED_RELOCK
|
||||
? __bch2_btree_iter_traverse(iter)
|
||||
: 0;
|
||||
}
|
||||
int __must_check bch2_btree_iter_traverse(struct btree_iter *);
|
||||
|
||||
int bch2_btree_iter_traverse_all(struct btree_trans *);
|
||||
|
||||
@ -175,9 +167,14 @@ struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *);
|
||||
|
||||
struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *);
|
||||
|
||||
bool bch2_btree_iter_advance_pos(struct btree_iter *);
|
||||
bool bch2_btree_iter_rewind_pos(struct btree_iter *);
|
||||
void bch2_btree_iter_set_pos(struct btree_iter *, struct bpos);
|
||||
bool bch2_btree_iter_advance(struct btree_iter *);
|
||||
bool bch2_btree_iter_rewind(struct btree_iter *);
|
||||
|
||||
static inline void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
|
||||
{
|
||||
bkey_init(&iter->k);
|
||||
iter->k.p = iter->pos = new_pos;
|
||||
}
|
||||
|
||||
/* Sort order for locking btree iterators: */
|
||||
static inline int btree_iter_lock_cmp(const struct btree_iter *l,
|
||||
|
@ -690,7 +690,7 @@ bch2_trans_commit_get_rw_cold(struct btree_trans *trans)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __bch2_trans_update2(struct btree_trans *trans,
|
||||
static void __bch2_trans_update2(struct btree_trans *trans,
|
||||
struct btree_insert_entry n)
|
||||
{
|
||||
struct btree_insert_entry *i;
|
||||
@ -711,15 +711,13 @@ static int __bch2_trans_update2(struct btree_trans *trans,
|
||||
else
|
||||
array_insert_item(trans->updates2, trans->nr_updates2,
|
||||
i - trans->updates2, n);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_trans_update2(struct btree_trans *trans,
|
||||
static void bch2_trans_update2(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_i *insert)
|
||||
{
|
||||
return __bch2_trans_update2(trans, (struct btree_insert_entry) {
|
||||
__bch2_trans_update2(trans, (struct btree_insert_entry) {
|
||||
.bkey_type = __btree_node_type(iter->level, iter->btree_id),
|
||||
.btree_id = iter->btree_id,
|
||||
.level = iter->level,
|
||||
@ -745,82 +743,81 @@ static int extent_update_to_keys(struct btree_trans *trans,
|
||||
BTREE_ITER_NOT_EXTENTS);
|
||||
n.is_extent = false;
|
||||
|
||||
ret = __bch2_trans_update2(trans, n);
|
||||
__bch2_trans_update2(trans, n);
|
||||
bch2_trans_iter_put(trans, n.iter);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int extent_handle_overwrites(struct btree_trans *trans,
|
||||
enum btree_id btree_id,
|
||||
struct bpos start, struct bpos end)
|
||||
struct bkey_i *insert)
|
||||
{
|
||||
struct btree_iter *iter, *update_iter;
|
||||
struct bpos start = bkey_start_pos(&insert->k);
|
||||
struct bkey_i *update;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
iter = bch2_trans_get_iter(trans, btree_id, start, BTREE_ITER_INTENT);
|
||||
iter = bch2_trans_get_iter(trans, btree_id, start,
|
||||
BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek_with_updates(iter);
|
||||
|
||||
while (k.k && !(ret = bkey_err(k))) {
|
||||
if (bkey_cmp(end, bkey_start_pos(k.k)) <= 0)
|
||||
if (bkey_cmp(insert->k.p, bkey_start_pos(k.k)) <= 0)
|
||||
break;
|
||||
|
||||
if (bkey_cmp(bkey_start_pos(k.k), start) < 0) {
|
||||
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
break;
|
||||
|
||||
bkey_reassemble(update, k);
|
||||
|
||||
bch2_cut_back(start, update);
|
||||
|
||||
update_iter = bch2_trans_copy_iter(trans, iter);
|
||||
update_iter->flags &= ~BTREE_ITER_IS_EXTENTS;
|
||||
bch2_btree_iter_set_pos(update_iter, update->k.p);
|
||||
ret = bch2_trans_update2(trans, update_iter, update);
|
||||
update_iter = bch2_trans_get_iter(trans, btree_id, update->k.p,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
bch2_trans_update2(trans, update_iter, update);
|
||||
bch2_trans_iter_put(trans, update_iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bkey_cmp(k.k->p, end) > 0) {
|
||||
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
|
||||
bkey_reassemble(update, k);
|
||||
bch2_cut_front(end, update);
|
||||
|
||||
update_iter = bch2_trans_copy_iter(trans, iter);
|
||||
update_iter->flags &= ~BTREE_ITER_IS_EXTENTS;
|
||||
bch2_btree_iter_set_pos(update_iter, update->k.p);
|
||||
ret = bch2_trans_update2(trans, update_iter, update);
|
||||
bch2_trans_iter_put(trans, update_iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else {
|
||||
if (bkey_cmp(k.k->p, insert->k.p) < 0 ||
|
||||
(!bkey_cmp(k.k->p, insert->k.p) && bkey_deleted(&insert->k))) {
|
||||
update = bch2_trans_kmalloc(trans, sizeof(struct bkey));
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
break;
|
||||
|
||||
update->k = *k.k;
|
||||
set_bkey_val_u64s(&update->k, 0);
|
||||
update->k.type = KEY_TYPE_deleted;
|
||||
update->k.size = 0;
|
||||
bkey_init(&update->k);
|
||||
update->k.p = k.k->p;
|
||||
|
||||
update_iter = bch2_trans_copy_iter(trans, iter);
|
||||
update_iter->flags &= ~BTREE_ITER_IS_EXTENTS;
|
||||
bch2_btree_iter_set_pos(update_iter, update->k.p);
|
||||
ret = bch2_trans_update2(trans, update_iter, update);
|
||||
update_iter = bch2_trans_get_iter(trans, btree_id, update->k.p,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
bch2_trans_update2(trans, update_iter, update);
|
||||
bch2_trans_iter_put(trans, update_iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bkey_cmp(k.k->p, insert->k.p) > 0) {
|
||||
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
break;
|
||||
|
||||
bkey_reassemble(update, k);
|
||||
bch2_cut_front(insert->k.p, update);
|
||||
|
||||
update_iter = bch2_trans_get_iter(trans, btree_id, update->k.p,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
bch2_trans_update2(trans, update_iter, update);
|
||||
bch2_trans_iter_put(trans, update_iter);
|
||||
break;
|
||||
}
|
||||
|
||||
k = bch2_btree_iter_next_with_updates(iter);
|
||||
}
|
||||
err:
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -885,24 +882,16 @@ int __bch2_trans_commit(struct btree_trans *trans)
|
||||
/* Turn extents updates into keys: */
|
||||
trans_for_each_update(trans, i)
|
||||
if (i->is_extent) {
|
||||
struct bpos start = bkey_start_pos(&i->k->k);
|
||||
|
||||
while (i + 1 < trans->updates + trans->nr_updates &&
|
||||
i[0].btree_id == i[1].btree_id &&
|
||||
!bkey_cmp(i[0].k->k.p, bkey_start_pos(&i[1].k->k)))
|
||||
i++;
|
||||
|
||||
ret = extent_handle_overwrites(trans, i->btree_id,
|
||||
start, i->k->k.p);
|
||||
if (ret)
|
||||
ret = extent_handle_overwrites(trans, i->btree_id, i->k);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
}
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
ret = i->is_extent
|
||||
? extent_update_to_keys(trans, *i)
|
||||
: __bch2_trans_update2(trans, *i);
|
||||
if (ret)
|
||||
: (__bch2_trans_update2(trans, *i), 0);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -356,7 +356,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
|
||||
if (err)
|
||||
break;
|
||||
|
||||
bch2_btree_iter_next(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
i->from = iter->pos;
|
||||
|
||||
err = flush_buf(i);
|
||||
|
@ -842,13 +842,13 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
|
||||
struct bch_extent_ptr *ptr, *ec_ptr = NULL;
|
||||
|
||||
if (extent_has_stripe_ptr(k, s->key.k.p.offset)) {
|
||||
bch2_btree_iter_next(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
continue;
|
||||
}
|
||||
|
||||
block = bkey_matches_stripe(&s->key.v, k);
|
||||
if (block < 0) {
|
||||
bch2_btree_iter_next(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -158,7 +158,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
|
||||
|
||||
const char *bch2_btree_ptr_invalid(const struct bch_fs *c, struct bkey_s_c k)
|
||||
{
|
||||
if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
|
||||
if (bkey_val_u64s(k.k) > BCH_REPLICAS_MAX)
|
||||
return "value too big";
|
||||
|
||||
return bch2_bkey_ptrs_invalid(c, k);
|
||||
@ -170,6 +170,22 @@ void bch2_btree_ptr_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
bch2_bkey_ptrs_to_text(out, c, k);
|
||||
}
|
||||
|
||||
const char *bch2_btree_ptr_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
|
||||
{
|
||||
struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
|
||||
|
||||
if (bkey_val_bytes(k.k) <= sizeof(*bp.v))
|
||||
return "value too small";
|
||||
|
||||
if (bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
|
||||
return "value too big";
|
||||
|
||||
if (bp.v->min_key.snapshot)
|
||||
return "invalid min_key.snapshot";
|
||||
|
||||
return bch2_bkey_ptrs_invalid(c, k);
|
||||
}
|
||||
|
||||
void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
|
@ -371,6 +371,7 @@ const char *bch2_btree_ptr_invalid(const struct bch_fs *, struct bkey_s_c);
|
||||
void bch2_btree_ptr_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bkey_s_c);
|
||||
|
||||
const char *bch2_btree_ptr_v2_invalid(const struct bch_fs *, struct bkey_s_c);
|
||||
void bch2_btree_ptr_v2_to_text(struct printbuf *, struct bch_fs *,
|
||||
struct bkey_s_c);
|
||||
void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
|
||||
@ -383,7 +384,7 @@ void bch2_btree_ptr_v2_compat(enum btree_id, unsigned, unsigned,
|
||||
}
|
||||
|
||||
#define bch2_bkey_ops_btree_ptr_v2 (struct bkey_ops) { \
|
||||
.key_invalid = bch2_btree_ptr_invalid, \
|
||||
.key_invalid = bch2_btree_ptr_v2_invalid, \
|
||||
.val_to_text = bch2_btree_ptr_v2_to_text, \
|
||||
.swab = bch2_ptr_swab, \
|
||||
.compat = bch2_btree_ptr_v2_compat, \
|
||||
|
@ -901,7 +901,7 @@ retry:
|
||||
|
||||
if (!bkey_extent_is_data(k.k) &&
|
||||
k.k->type != KEY_TYPE_reservation) {
|
||||
bch2_btree_iter_next(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -547,7 +547,7 @@ retry:
|
||||
i_sectors += k.k->size;
|
||||
bch2_bkey_buf_reassemble(&prev, c, k);
|
||||
|
||||
bch2_btree_iter_advance_pos(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
}
|
||||
fsck_err:
|
||||
if (ret == -EINTR)
|
||||
@ -703,7 +703,7 @@ retry:
|
||||
|
||||
}
|
||||
|
||||
bch2_btree_iter_advance_pos(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
}
|
||||
|
||||
hash_stop_chain(&trans, &h);
|
||||
@ -762,7 +762,7 @@ retry:
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
bch2_btree_iter_advance_pos(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
}
|
||||
fsck_err:
|
||||
if (ret == -EINTR)
|
||||
@ -1389,7 +1389,7 @@ peek_nlinks: link = genradix_iter_peek(&nlinks_iter, links);
|
||||
if (nlinks_pos == iter->pos.offset)
|
||||
genradix_iter_advance(&nlinks_iter, links);
|
||||
|
||||
bch2_btree_iter_advance_pos(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
bch2_trans_cond_resched(&trans);
|
||||
}
|
||||
fsck_err:
|
||||
|
@ -542,12 +542,12 @@ found_slot:
|
||||
int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
struct btree_iter *iter = NULL;
|
||||
struct bkey_i_inode_generation delete;
|
||||
struct bpos start = POS(inode_nr, 0);
|
||||
struct bpos end = POS(inode_nr + 1, 0);
|
||||
struct bch_inode_unpacked inode_u;
|
||||
struct bkey_s_c k;
|
||||
u64 bi_generation;
|
||||
int ret;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
@ -571,8 +571,6 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
|
||||
retry:
|
||||
bch2_trans_begin(&trans);
|
||||
|
||||
bi_generation = 0;
|
||||
|
||||
if (cached) {
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr),
|
||||
BTREE_ITER_CACHED|BTREE_ITER_INTENT);
|
||||
@ -587,41 +585,26 @@ retry:
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_fs_inconsistent_on(k.k->type != KEY_TYPE_inode, trans.c,
|
||||
if (k.k->type != KEY_TYPE_inode) {
|
||||
bch2_fs_inconsistent(trans.c,
|
||||
"inode %llu not found when deleting",
|
||||
inode_nr);
|
||||
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_inode: {
|
||||
struct bch_inode_unpacked inode_u;
|
||||
|
||||
if (!bch2_inode_unpack(bkey_s_c_to_inode(k), &inode_u))
|
||||
bi_generation = inode_u.bi_generation + 1;
|
||||
break;
|
||||
}
|
||||
case KEY_TYPE_inode_generation: {
|
||||
struct bkey_s_c_inode_generation g =
|
||||
bkey_s_c_to_inode_generation(k);
|
||||
bi_generation = le32_to_cpu(g.v->bi_generation);
|
||||
break;
|
||||
}
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!bi_generation) {
|
||||
bkey_init(&delete.k);
|
||||
delete.k.p.offset = inode_nr;
|
||||
} else {
|
||||
bch2_inode_unpack(bkey_s_c_to_inode(k), &inode_u);
|
||||
|
||||
bkey_inode_generation_init(&delete.k_i);
|
||||
delete.k.p.offset = inode_nr;
|
||||
delete.v.bi_generation = cpu_to_le32(bi_generation);
|
||||
}
|
||||
delete.k.p = iter->pos;
|
||||
delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
|
||||
|
||||
bch2_trans_update(&trans, iter, &delete.k_i, 0);
|
||||
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL);
|
||||
bch2_trans_iter_put(&trans, iter);
|
||||
err:
|
||||
bch2_trans_iter_put(&trans, iter);
|
||||
if (ret == -EINTR)
|
||||
goto retry;
|
||||
|
||||
|
@ -214,9 +214,10 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
|
||||
(bkey_extent_is_allocation(&new->k) -
|
||||
bkey_extent_is_allocation(old.k));
|
||||
|
||||
*disk_sectors_delta += sectors *
|
||||
(int) (bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new)) -
|
||||
bch2_bkey_nr_ptrs_fully_allocated(old));
|
||||
*disk_sectors_delta += sectors * bch2_bkey_nr_ptrs_allocated(bkey_i_to_s_c(new));
|
||||
*disk_sectors_delta -= new->k.p.snapshot == old.k->p.snapshot
|
||||
? sectors * bch2_bkey_nr_ptrs_fully_allocated(old)
|
||||
: 0;
|
||||
|
||||
if (!*should_check_enospc &&
|
||||
(new_replicas > bch2_bkey_replicas(c, old) ||
|
||||
|
@ -53,7 +53,7 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
|
||||
while ((k = bch2_btree_iter_peek(iter)).k &&
|
||||
!(ret = bkey_err(k))) {
|
||||
if (!bch2_bkey_has_device(k, dev_idx)) {
|
||||
bch2_btree_iter_next(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -639,7 +639,7 @@ next:
|
||||
atomic64_add(k.k->size * bch2_bkey_nr_ptrs_allocated(k),
|
||||
&stats->sectors_seen);
|
||||
next_nondata:
|
||||
bch2_btree_iter_next(iter);
|
||||
bch2_btree_iter_advance(iter);
|
||||
bch2_trans_cond_resched(&trans);
|
||||
}
|
||||
out:
|
||||
@ -835,13 +835,38 @@ static enum data_cmd migrate_btree_pred(struct bch_fs *c, void *arg,
|
||||
return migrate_pred(c, arg, bkey_i_to_s_c(&b->key), io_opts, data_opts);
|
||||
}
|
||||
|
||||
static bool bformat_needs_redo(struct bkey_format *f)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < f->nr_fields; i++) {
|
||||
unsigned unpacked_bits = bch2_bkey_format_current.bits_per_field[i];
|
||||
u64 unpacked_mask = ~((~0ULL << 1) << (unpacked_bits - 1));
|
||||
u64 field_offset = le64_to_cpu(f->field_offset[i]);
|
||||
|
||||
if (f->bits_per_field[i] > unpacked_bits)
|
||||
return true;
|
||||
|
||||
if ((f->bits_per_field[i] == unpacked_bits) && field_offset)
|
||||
return true;
|
||||
|
||||
if (((field_offset + ((1ULL << f->bits_per_field[i]) - 1)) &
|
||||
unpacked_mask) <
|
||||
field_offset)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static enum data_cmd rewrite_old_nodes_pred(struct bch_fs *c, void *arg,
|
||||
struct btree *b,
|
||||
struct bch_io_opts *io_opts,
|
||||
struct data_opts *data_opts)
|
||||
{
|
||||
if (b->version_ondisk != c->sb.version ||
|
||||
btree_node_need_rewrite(b)) {
|
||||
btree_node_need_rewrite(b) ||
|
||||
bformat_needs_redo(&b->format)) {
|
||||
data_opts->target = 0;
|
||||
data_opts->nr_replicas = 1;
|
||||
data_opts->btree_insert_flags = 0;
|
||||
@ -851,6 +876,26 @@ static enum data_cmd rewrite_old_nodes_pred(struct bch_fs *c, void *arg,
|
||||
return DATA_SKIP;
|
||||
}
|
||||
|
||||
int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = bch2_move_btree(c,
|
||||
0, POS_MIN,
|
||||
BTREE_ID_NR, POS_MAX,
|
||||
rewrite_old_nodes_pred, c, stats) ?: ret;
|
||||
if (!ret) {
|
||||
mutex_lock(&c->sb_lock);
|
||||
c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_EXTENTS_ABOVE_BTREE_UPDATES_DONE;
|
||||
c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_BFORMAT_OVERFLOW_DONE;
|
||||
c->disk_sb.sb->version_min = c->disk_sb.sb->version;
|
||||
bch2_write_super(c);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_data_job(struct bch_fs *c,
|
||||
struct bch_move_stats *stats,
|
||||
struct bch_ioctl_data op)
|
||||
@ -900,17 +945,7 @@ int bch2_data_job(struct bch_fs *c,
|
||||
ret = bch2_replicas_gc2(c) ?: ret;
|
||||
break;
|
||||
case BCH_DATA_OP_REWRITE_OLD_NODES:
|
||||
ret = bch2_move_btree(c,
|
||||
op.start_btree, op.start_pos,
|
||||
op.end_btree, op.end_pos,
|
||||
rewrite_old_nodes_pred, &op, stats) ?: ret;
|
||||
|
||||
if (!ret) {
|
||||
mutex_lock(&c->sb_lock);
|
||||
c->disk_sb.sb->version_min = c->disk_sb.sb->version;
|
||||
bch2_write_super(c);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
}
|
||||
ret = bch2_scan_old_btree_nodes(c, stats);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
|
@ -52,6 +52,8 @@ typedef enum data_cmd (*move_pred_fn)(struct bch_fs *, void *,
|
||||
struct bkey_s_c,
|
||||
struct bch_io_opts *, struct data_opts *);
|
||||
|
||||
int bch2_scan_old_btree_nodes(struct bch_fs *, struct bch_move_stats *);
|
||||
|
||||
int bch2_move_data(struct bch_fs *,
|
||||
enum btree_id, struct bpos,
|
||||
enum btree_id, struct bpos,
|
||||
|
@ -312,6 +312,9 @@ int bch2_rebalance_start(struct bch_fs *c)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
if (c->rebalance.thread)
|
||||
return 0;
|
||||
|
||||
if (c->opts.nochanges)
|
||||
return 0;
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "journal_io.h"
|
||||
#include "journal_reclaim.h"
|
||||
#include "journal_seq_blacklist.h"
|
||||
#include "move.h"
|
||||
#include "quota.h"
|
||||
#include "recovery.h"
|
||||
#include "replicas.h"
|
||||
@ -1207,7 +1208,28 @@ use_clean:
|
||||
bch_verbose(c, "quotas done");
|
||||
}
|
||||
|
||||
if (!(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_EXTENTS_ABOVE_BTREE_UPDATES_DONE)) ||
|
||||
!(c->sb.compat & (1ULL << BCH_COMPAT_FEAT_BFORMAT_OVERFLOW_DONE))) {
|
||||
struct bch_move_stats stats = { 0 };
|
||||
|
||||
bch_verbose(c, "scanning for old btree nodes");
|
||||
ret = bch2_fs_read_write(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_scan_old_btree_nodes(c, &stats);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "scanning for old btree nodes done");
|
||||
}
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
if (c->opts.version_upgrade) {
|
||||
c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
|
||||
c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
|
||||
write_sb = true;
|
||||
}
|
||||
|
||||
if (!test_bit(BCH_FS_ERROR, &c->flags)) {
|
||||
c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_ALLOC_INFO;
|
||||
write_sb = true;
|
||||
@ -1260,6 +1282,15 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
bch_notice(c, "initializing new filesystem");
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_EXTENTS_ABOVE_BTREE_UPDATES_DONE;
|
||||
c->disk_sb.sb->compat[0] |= 1ULL << BCH_COMPAT_FEAT_BFORMAT_OVERFLOW_DONE;
|
||||
|
||||
if (c->opts.version_upgrade) {
|
||||
c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
|
||||
c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
|
||||
bch2_write_super(c);
|
||||
}
|
||||
|
||||
for_each_online_member(ca, c, i)
|
||||
bch2_mark_dev_superblock(c, ca, 0);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
|
@ -965,11 +965,6 @@ int bch2_fs_mark_dirty(struct bch_fs *c)
|
||||
*/
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
if (c->opts.version_upgrade) {
|
||||
c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
|
||||
c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
|
||||
}
|
||||
|
||||
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
|
||||
c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALWAYS;
|
||||
ret = bch2_write_super(c);
|
||||
|
Loading…
Reference in New Issue
Block a user