Update bcachefs sources to 4a4139a563 bcachefs: Fix extent_sort_fix_overlapping()

This commit is contained in:
Kent Overstreet 2020-03-02 17:10:54 -05:00
parent 14e736e627
commit 2d238045d3
11 changed files with 179 additions and 177 deletions

View File

@ -1 +1 @@
1210f6c925974abcbd07b6cb7209a24482170d8c
4a4139a563c4ee59f22ef23219fc2a3bb990b7b8

View File

@ -311,6 +311,25 @@ static inline int extent_sort_fix_overlapping_cmp(struct btree *b,
cmp_int((unsigned long) r, (unsigned long) l);
}
/*
* The algorithm in extent_sort_fix_overlapping() relies on keys in the same
* bset being ordered by start offset - but 0 size whiteouts (which are always
* KEY_TYPE_deleted) break this ordering, so we need to skip over them:
*/
static void extent_iter_advance(struct sort_iter *iter, unsigned idx)
{
struct sort_iter_set *i = iter->data + idx;
do {
i->k = bkey_next_skip_noops(i->k, i->end);
} while (i->k != i->end && bkey_deleted(i->k));
if (i->k == i->end)
array_remove_item(iter->data, iter->used, idx);
else
__sort_iter_sift(iter, idx, extent_sort_fix_overlapping_cmp);
}
struct btree_nr_keys
bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
struct sort_iter *iter)
@ -323,19 +342,26 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
struct bkey_s l, r;
struct btree_nr_keys nr;
struct bkey_on_stack split;
unsigned i;
memset(&nr, 0, sizeof(nr));
bkey_on_stack_init(&split);
sort_iter_sort(iter, extent_sort_fix_overlapping_cmp);
for (i = 0; i < iter->used;) {
if (bkey_deleted(iter->data[i].k))
__sort_iter_advance(iter, i,
extent_sort_fix_overlapping_cmp);
else
i++;
}
while (!sort_iter_end(iter)) {
l = __bkey_disassemble(b, _l->k, &l_unpacked);
if (iter->used == 1) {
extent_sort_append(c, f, &nr, dst->start, &prev, l);
sort_iter_advance(iter,
extent_sort_fix_overlapping_cmp);
extent_iter_advance(iter, 0);
continue;
}
@ -344,15 +370,13 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
/* If current key and next key don't overlap, just append */
if (bkey_cmp(l.k->p, bkey_start_pos(r.k)) <= 0) {
extent_sort_append(c, f, &nr, dst->start, &prev, l);
sort_iter_advance(iter,
extent_sort_fix_overlapping_cmp);
extent_iter_advance(iter, 0);
continue;
}
/* Skip 0 size keys */
if (!r.k->size) {
__sort_iter_advance(iter, 1,
extent_sort_fix_overlapping_cmp);
extent_iter_advance(iter, 1);
continue;
}
@ -369,8 +393,7 @@ bch2_extent_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
if (_l->k > _r->k) {
/* l wins, trim r */
if (bkey_cmp(l.k->p, r.k->p) >= 0) {
__sort_iter_advance(iter, 1,
extent_sort_fix_overlapping_cmp);
extent_iter_advance(iter, 1);
} else {
bch2_cut_front_s(l.k->p, r);
extent_save(b, _r->k, r.k);

View File

@ -709,15 +709,15 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
unsigned *whiteout_u64s, int write,
bool have_retry)
{
struct bkey_packed *k, *prev = NULL;
struct bpos prev_pos = POS_MIN;
struct bkey_packed *k;
struct bkey prev = KEY(0, 0, 0);
struct bpos prev_data = POS_MIN;
bool seen_non_whiteout = false;
unsigned version;
const char *err;
int ret = 0;
if (i == &b->data->keys) {
if (!b->written) {
/* These indicate that we read the wrong btree node: */
btree_err_on(BTREE_NODE_ID(b->data) != b->btree_id,
BTREE_ERR_MUST_RETRY, c, b, i,
@ -853,25 +853,28 @@ static int validate_bset(struct bch_fs *c, struct btree *b,
if (!seen_non_whiteout &&
(!bkey_whiteout(k) ||
(bkey_cmp(prev_pos, bkey_start_pos(u.k)) > 0))) {
(bkey_cmp(prev.p, bkey_start_pos(u.k)) > 0))) {
*whiteout_u64s = k->_data - i->_data;
seen_non_whiteout = true;
} else if (bkey_cmp(prev_data, bkey_start_pos(u.k)) > 0 ||
bkey_cmp(prev_pos, u.k->p) > 0) {
bkey_cmp(prev.p, u.k->p) > 0) {
char buf1[80];
char buf2[80];
bch2_bkey_to_text(&PBUF(buf1), &prev);
bch2_bkey_to_text(&PBUF(buf2), u.k);
bch2_dump_bset(b, i, 0);
btree_err(BTREE_ERR_FATAL, c, b, i,
"keys out of order: %llu:%llu > %llu:%llu",
prev_pos.inode,
prev_pos.offset,
u.k->p.inode,
bkey_start_offset(u.k));
"keys out of order: %s > %s",
buf1, buf2);
/* XXX: repair this */
}
if (!bkey_deleted(u.k))
prev_data = u.k->p;
prev_pos = u.k->p;
prev = *u.k;
prev = k;
k = bkey_next_skip_noops(k, vstruct_last(i));
}

View File

@ -35,6 +35,26 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
return pos;
}
static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
struct btree *b)
{
return bkey_cmp(iter->pos, b->data->min_key) < 0;
}
static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
struct btree *b)
{
return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0;
}
static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
struct btree *b)
{
return iter->btree_id == b->btree_id &&
!btree_iter_pos_before_node(iter, b) &&
!btree_iter_pos_after_node(iter, b);
}
/* Btree node locking: */
void bch2_btree_node_unlock_write(struct btree *b, struct btree_iter *iter)
@ -399,6 +419,8 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
if (iter->uptodate > BTREE_ITER_NEED_PEEK)
return;
BUG_ON(!btree_iter_pos_in_node(iter, b));
bch2_btree_node_iter_verify(&l->iter, b);
/*
@ -736,26 +758,6 @@ static void btree_iter_verify_new_node(struct btree_iter *iter, struct btree *b)
btree_node_unlock(iter, b->level + 1);
}
static inline bool btree_iter_pos_before_node(struct btree_iter *iter,
struct btree *b)
{
return bkey_cmp(iter->pos, b->data->min_key) < 0;
}
static inline bool btree_iter_pos_after_node(struct btree_iter *iter,
struct btree *b)
{
return bkey_cmp(b->key.k.p, btree_iter_search_key(iter)) < 0;
}
static inline bool btree_iter_pos_in_node(struct btree_iter *iter,
struct btree *b)
{
return iter->btree_id == b->btree_id &&
!btree_iter_pos_before_node(iter, b) &&
!btree_iter_pos_after_node(iter, b);
}
static inline void __btree_iter_init(struct btree_iter *iter,
unsigned level)
{
@ -1373,6 +1375,10 @@ static inline bool btree_iter_set_pos_to_prev_leaf(struct btree_iter *iter)
return true;
}
/**
* btree_iter_peek_uptodate - given an iterator that is uptodate, return the key
* it currently points to
*/
static inline struct bkey_s_c btree_iter_peek_uptodate(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
@ -1409,7 +1415,8 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
if (iter->uptodate == BTREE_ITER_UPTODATE)
if (iter->uptodate == BTREE_ITER_UPTODATE &&
!bkey_deleted(&iter->k))
return btree_iter_peek_uptodate(iter);
while (1) {
@ -1503,7 +1510,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
if (iter->uptodate == BTREE_ITER_UPTODATE)
if (iter->uptodate == BTREE_ITER_UPTODATE &&
!bkey_deleted(&iter->k))
return btree_iter_peek_uptodate(iter);
while (1) {
@ -1655,33 +1663,15 @@ __bch2_btree_iter_peek_slot(struct btree_iter *iter)
{
struct btree_iter_level *l = &iter->l[0];
struct bkey_s_c k;
int ret;
if (iter->flags & BTREE_ITER_IS_EXTENTS)
return __bch2_btree_iter_peek_slot_extents(iter);
recheck:
while ((k = __btree_iter_peek_all(iter, l, &iter->k)).k &&
bkey_deleted(k.k) &&
bkey_cmp(k.k->p, iter->pos) == 0)
bch2_btree_node_iter_advance(&l->iter, l->b);
k = __btree_iter_peek_all(iter, l, &iter->k);
/*
* If we got to the end of the node, check if we need to traverse to the
* next node:
*/
if (unlikely(!k.k && btree_iter_pos_after_node(iter, l->b))) {
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
ret = bch2_btree_iter_traverse(iter);
if (unlikely(ret))
return bkey_s_c_err(ret);
EBUG_ON(k.k && bkey_deleted(k.k) && bkey_cmp(k.k->p, iter->pos) == 0);
goto recheck;
}
if (!k.k ||
bkey_deleted(k.k) ||
bkey_cmp(iter->pos, k.k->p)) {
if (!k.k || bkey_cmp(iter->pos, k.k->p)) {
/* hole */
bkey_init(&iter->k);
iter->k.p = iter->pos;
@ -1713,8 +1703,12 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
{
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
/* XXX directly setting iter->pos is wrong */
iter->pos = btree_type_successor(iter->btree_id, iter->k.p);
if (unlikely(btree_iter_pos_after_node(iter, iter->l[0].b)))
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
if (unlikely(iter->uptodate != BTREE_ITER_UPTODATE)) {
/*
* XXX: when we just need to relock we should be able to avoid
@ -1726,8 +1720,7 @@ struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
return bch2_btree_iter_peek_slot(iter);
}
if (!bkey_deleted(&iter->k))
bch2_btree_node_iter_advance(&iter->l[0].iter, iter->l[0].b);
btree_iter_advance_to_pos(iter, &iter->l[0], -1);
btree_iter_set_dirty(iter, BTREE_ITER_NEED_PEEK);

View File

@ -867,8 +867,8 @@ static void btree_update_reparent(struct btree_update *as,
* just transfer the journal pin to the new interior update so
* btree_update_nodes_written() can drop it.
*/
bch2_journal_pin_add_if_older(&c->journal, &child->journal,
&as->journal, interior_update_flush);
bch2_journal_pin_copy(&c->journal, &as->journal,
&child->journal, interior_update_flush);
bch2_journal_pin_drop(&c->journal, &child->journal);
as->journal_seq = max(as->journal_seq, child->journal_seq);
@ -1049,13 +1049,13 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
* oldest pin of any of the nodes we're freeing. We'll release the pin
* when the new nodes are persistent and reachable on disk:
*/
bch2_journal_pin_add_if_older(&c->journal, &w->journal,
&as->journal, interior_update_flush);
bch2_journal_pin_copy(&c->journal, &as->journal,
&w->journal, interior_update_flush);
bch2_journal_pin_drop(&c->journal, &w->journal);
w = btree_prev_write(b);
bch2_journal_pin_add_if_older(&c->journal, &w->journal,
&as->journal, interior_update_flush);
bch2_journal_pin_copy(&c->journal, &as->journal,
&w->journal, interior_update_flush);
bch2_journal_pin_drop(&c->journal, &w->journal);
mutex_unlock(&c->btree_interior_update_lock);

View File

@ -172,6 +172,9 @@ void bch2_btree_journal_key(struct btree_trans *trans,
struct journal *j = &c->journal;
struct btree *b = iter->l[0].b;
struct btree_write *w = btree_current_write(b);
u64 seq = likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
? trans->journal_res.seq
: j->replay_journal_seq;
EBUG_ON(iter->level || b->level);
EBUG_ON(trans->journal_res.ref !=
@ -183,16 +186,10 @@ void bch2_btree_journal_key(struct btree_trans *trans,
cpu_to_le64(trans->journal_res.seq);
}
if (unlikely(!journal_pin_active(&w->journal))) {
u64 seq = likely(!(trans->flags & BTREE_INSERT_JOURNAL_REPLAY))
? trans->journal_res.seq
: j->replay_journal_seq;
bch2_journal_pin_add(j, seq, &w->journal,
btree_node_write_idx(b) == 0
? btree_node_flush0
: btree_node_flush1);
}
bch2_journal_pin_add(j, seq, &w->journal,
btree_node_write_idx(b) == 0
? btree_node_flush0
: btree_node_flush1);
if (unlikely(!btree_node_dirty(b)))
set_btree_node_dirty(b);

View File

@ -1444,8 +1444,7 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans,
struct bkey_s_c k;
struct bkey_alloc_unpacked u;
struct bkey_i_alloc *a;
u16 *dst_sectors;
bool overflow;
u16 *dst_sectors, orig_sectors;
int ret;
ret = trans_get_key(trans, BTREE_ID_ALLOC,
@ -1502,13 +1501,12 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans,
dst_sectors = !p.ptr.cached
? &u.dirty_sectors
: &u.cached_sectors;
orig_sectors = *dst_sectors;
overflow = checked_add(*dst_sectors, sectors);
if (overflow) {
if (checked_add(*dst_sectors, sectors)) {
bch2_fs_inconsistent(c,
"bucket sector count overflow: %u + %lli > U16_MAX",
*dst_sectors, sectors);
orig_sectors, sectors);
/* return an error indicating that we need full fsck */
ret = -EIO;
goto out;

View File

@ -290,38 +290,6 @@ void bch2_journal_pin_put(struct journal *j, u64 seq)
}
}
static inline void __journal_pin_add(struct journal *j,
u64 seq,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
{
struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
BUG_ON(journal_pin_active(pin));
BUG_ON(!atomic_read(&pin_list->count));
atomic_inc(&pin_list->count);
pin->seq = seq;
pin->flush = flush_fn;
list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
/*
* If the journal is currently full, we might want to call flush_fn
* immediately:
*/
journal_wake(j);
}
void bch2_journal_pin_add(struct journal *j, u64 seq,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
{
spin_lock(&j->lock);
__journal_pin_add(j, seq, pin, flush_fn);
spin_unlock(&j->lock);
}
static inline void __journal_pin_drop(struct journal *j,
struct journal_entry_pin *pin)
{
@ -354,42 +322,46 @@ void bch2_journal_pin_drop(struct journal *j,
spin_unlock(&j->lock);
}
void bch2_journal_pin_update(struct journal *j, u64 seq,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
void __bch2_journal_pin_add(struct journal *j, u64 seq,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
{
struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
spin_lock(&j->lock);
if (pin->seq != seq) {
__journal_pin_drop(j, pin);
__journal_pin_add(j, seq, pin, flush_fn);
} else {
struct journal_entry_pin_list *pin_list =
journal_seq_pin(j, seq);
__journal_pin_drop(j, pin);
list_move(&pin->list, &pin_list->list);
}
BUG_ON(!atomic_read(&pin_list->count));
atomic_inc(&pin_list->count);
pin->seq = seq;
pin->flush = flush_fn;
list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
spin_unlock(&j->lock);
/*
* If the journal is currently full, we might want to call flush_fn
* immediately:
*/
journal_wake(j);
}
void bch2_journal_pin_add_if_older(struct journal *j,
struct journal_entry_pin *src_pin,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
void bch2_journal_pin_copy(struct journal *j,
struct journal_entry_pin *dst,
struct journal_entry_pin *src,
journal_pin_flush_fn flush_fn)
{
spin_lock(&j->lock);
if (journal_pin_active(src_pin) &&
(!journal_pin_active(pin) ||
src_pin->seq < pin->seq)) {
__journal_pin_drop(j, pin);
__journal_pin_add(j, src_pin->seq, pin, flush_fn);
}
spin_unlock(&j->lock);
if (journal_pin_active(src) &&
(!journal_pin_active(dst) || src->seq < dst->seq))
__bch2_journal_pin_add(j, src->seq, dst, flush_fn);
}
/**
* bch2_journal_pin_flush: ensure journal pin callback is no longer running
*/
void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
{
BUG_ON(journal_pin_active(pin));

View File

@ -29,16 +29,24 @@ journal_seq_pin(struct journal *j, u64 seq)
}
void bch2_journal_pin_put(struct journal *, u64);
void bch2_journal_pin_add(struct journal *, u64, struct journal_entry_pin *,
journal_pin_flush_fn);
void bch2_journal_pin_update(struct journal *, u64, struct journal_entry_pin *,
journal_pin_flush_fn);
void bch2_journal_pin_drop(struct journal *, struct journal_entry_pin *);
void bch2_journal_pin_add_if_older(struct journal *,
struct journal_entry_pin *,
struct journal_entry_pin *,
journal_pin_flush_fn);
void __bch2_journal_pin_add(struct journal *, u64, struct journal_entry_pin *,
journal_pin_flush_fn);
static inline void bch2_journal_pin_add(struct journal *j, u64 seq,
struct journal_entry_pin *pin,
journal_pin_flush_fn flush_fn)
{
if (unlikely(!journal_pin_active(pin)))
__bch2_journal_pin_add(j, seq, pin, flush_fn);
}
void bch2_journal_pin_copy(struct journal *,
struct journal_entry_pin *,
struct journal_entry_pin *,
journal_pin_flush_fn);
void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *);
void bch2_journal_do_discards(struct journal *);

View File

@ -549,6 +549,10 @@ void bch2_fs_stop(struct bch_fs *c)
cancel_work_sync(&c->journal_seq_blacklist_gc_work);
mutex_lock(&c->state_lock);
bch2_fs_read_only(c);
mutex_unlock(&c->state_lock);
for_each_member_device(ca, c, i)
if (ca->kobj.state_in_sysfs &&
ca->disk_sb.bdev)
@ -572,10 +576,6 @@ void bch2_fs_stop(struct bch_fs *c)
closure_sync(&c->cl);
closure_debug_destroy(&c->cl);
mutex_lock(&c->state_lock);
bch2_fs_read_only(c);
mutex_unlock(&c->state_lock);
/* btree prefetch might have kicked off reads in the background: */
bch2_btree_flush_all_reads(c);

View File

@ -18,7 +18,7 @@ static void delete_test_keys(struct bch_fs *c)
NULL);
BUG_ON(ret);
ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
POS(0, 0), POS(0, U64_MAX),
NULL);
BUG_ON(ret);
@ -37,7 +37,7 @@ static void test_delete(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, k.k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
@ -69,7 +69,7 @@ static void test_delete_written(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, k.k.p,
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, k.k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
@ -107,7 +107,7 @@ static void test_iterate(struct bch_fs *c, u64 nr)
bkey_cookie_init(&k.k_i);
k.k.p.offset = i;
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
NULL, NULL, 0);
BUG_ON(ret);
}
@ -116,9 +116,13 @@ static void test_iterate(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
POS_MIN, 0, k, ret)
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS,
POS_MIN, 0, k, ret) {
if (k.k->p.inode)
break;
BUG_ON(k.k->p.offset != i++);
}
BUG_ON(i != nr);
@ -202,7 +206,7 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr)
bkey_cookie_init(&k.k_i);
k.k.p.offset = i * 2;
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
NULL, NULL, 0);
BUG_ON(ret);
}
@ -211,8 +215,11 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
0, k, ret) {
if (k.k->p.inode)
break;
BUG_ON(k.k->p.offset != i);
i += 2;
}
@ -224,11 +231,12 @@ static void test_iterate_slots(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
BTREE_ITER_SLOTS, k, ret) {
BUG_ON(k.k->p.offset != i);
BUG_ON(bkey_deleted(k.k) != (i & 1));
BUG_ON(k.k->p.offset != i++);
i++;
if (i == nr * 2)
break;
}
@ -307,7 +315,7 @@ static void test_peek_end(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS, POS_MIN, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
k = bch2_btree_iter_peek(iter);
BUG_ON(k.k);
@ -421,7 +429,7 @@ static void rand_insert(struct bch_fs *c, u64 nr)
k.k.p.offset = test_rand();
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
__bch2_btree_insert(&trans, BTREE_ID_DIRENTS, &k.k_i));
__bch2_btree_insert(&trans, BTREE_ID_XATTRS, &k.k_i));
BUG_ON(ret);
}
@ -439,7 +447,7 @@ static void rand_lookup(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < nr; i++) {
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
POS(0, test_rand()), 0);
k = bch2_btree_iter_peek(iter);
@ -460,7 +468,7 @@ static void rand_mixed(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < nr; i++) {
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
POS(0, test_rand()), 0);
k = bch2_btree_iter_peek(iter);
@ -490,7 +498,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
struct bkey_s_c k;
int ret = 0;
iter = bch2_trans_get_iter(trans, BTREE_ID_DIRENTS, pos,
iter = bch2_trans_get_iter(trans, BTREE_ID_XATTRS, pos,
BTREE_ITER_INTENT);
ret = PTR_ERR_OR_ZERO(iter);
if (ret)
@ -542,7 +550,7 @@ static void seq_insert(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
insert.k.p = iter->pos;
@ -566,7 +574,7 @@ static void seq_lookup(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret)
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, 0, k, ret)
;
bch2_trans_exit(&trans);
}
@ -580,7 +588,7 @@ static void seq_overwrite(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
BTREE_ITER_INTENT, k, ret) {
struct bkey_i_cookie u;
@ -598,7 +606,7 @@ static void seq_delete(struct bch_fs *c, u64 nr)
{
int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
POS(0, 0), POS(0, U64_MAX),
NULL);
BUG_ON(ret);