Update bcachefs sources to ff3a76e1af bcachefs: Change need_whiteout_for_snapshot() to clone iterator

This commit is contained in:
Kent Overstreet 2021-12-19 19:37:29 -05:00
parent b19d9f92e1
commit d06f5690fa
13 changed files with 1254 additions and 164 deletions

View File

@ -1 +1 @@
635ca475f4f40ddcb2976f8f20a89df4c574aa22 ff3a76e1af04f51506f45e0f71d53f7e6dd51a75

2
.gitignore vendored
View File

@ -18,4 +18,4 @@ tests/__pycache__/
mount/target mount/target
mount.bcachefs mount.bcachefs
doc/bcachefs.5.rst bcachefs-principles-of-operation.*

View File

@ -28,15 +28,6 @@ PYTEST_CMD?=$(shell \
) )
PYTEST:=$(PYTEST_CMD) $(PYTEST_ARGS) PYTEST:=$(PYTEST_CMD) $(PYTEST_ARGS)
RST2MAN_ARGS?=
RST2MAN_CMD?=$(shell \
command -v rst2man \
|| which rst2man \
|| command -v rst2man.py \
|| which rst2man.py \
)
RST2MAN:=$(RST2MAN_CMD) $(RST2MAN_ARGS)
CARGO_ARGS= CARGO_ARGS=
CARGO=cargo $(CARGO_ARGS) CARGO=cargo $(CARGO_ARGS)
CARGO_PROFILE=release CARGO_PROFILE=release
@ -108,18 +99,6 @@ TAGS:
tags: tags:
ctags -R . ctags -R .
DOCSRC := opts_macro.h bcachefs.5.rst.tmpl
DOCGENERATED := bcachefs.5 doc/bcachefs.5.rst
DOCDEPS := $(addprefix ./doc/,$(DOCSRC))
bcachefs.5: $(DOCDEPS) libbcachefs/opts.h
ifneq (,$(RST2MAN_CMD))
$(CC) doc/opts_macro.h -I libbcachefs -I include -E 2>/dev/null \
| doc/macro2rst.py
$(RST2MAN) doc/bcachefs.5.rst bcachefs.5
else
@echo "WARNING: no rst2man found! Man page not generated."
endif
SRCS=$(shell find . -type f -iname '*.c') SRCS=$(shell find . -type f -iname '*.c')
DEPS=$(SRCS:.c=.d) DEPS=$(SRCS:.c=.d)
-include $(DEPS) -include $(DEPS)
@ -184,6 +163,11 @@ clean:
deb: all deb: all
debuild -us -uc -nc -b -i -I debuild -us -uc -nc -b -i -I
bcachefs-principles-of-operation.pdf: bcachefs-principles-of-operation.tex
pdflatex bcachefs-principles-of-operation.tex && pdflatex bcachefs-principles-of-operation.tex
doc: bcachefs-principles-of-operation.pdf
.PHONY: update-bcachefs-sources .PHONY: update-bcachefs-sources
update-bcachefs-sources: update-bcachefs-sources:
git rm -rf --ignore-unmatch libbcachefs git rm -rf --ignore-unmatch libbcachefs

File diff suppressed because it is too large Load Diff

View File

@ -117,23 +117,6 @@ bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
return nr; return nr;
} }
static void extent_sort_append(struct bch_fs *c,
struct bkey_format *f,
struct btree_nr_keys *nr,
struct bkey_packed **out,
struct bkey_s k)
{
if (!bkey_deleted(k.k)) {
if (!bch2_bkey_pack_key(*out, k.k, f))
memcpy_u64s_small(*out, k.k, BKEY_U64s);
memcpy_u64s_small(bkeyp_val(f, *out), k.v, bkey_val_u64s(k.k));
btree_keys_account_key_add(nr, 0, *out);
*out = bkey_next(*out);
}
}
/* Sort + repack in a new format: */ /* Sort + repack in a new format: */
struct btree_nr_keys struct btree_nr_keys
bch2_sort_repack(struct bset *dst, struct btree *src, bch2_sort_repack(struct bset *dst, struct btree *src,
@ -144,6 +127,7 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
struct bkey_format *in_f = &src->format; struct bkey_format *in_f = &src->format;
struct bkey_packed *in, *out = vstruct_last(dst); struct bkey_packed *in, *out = vstruct_last(dst);
struct btree_nr_keys nr; struct btree_nr_keys nr;
bool transform = memcmp(out_f, &src->format, sizeof(*out_f));
memset(&nr, 0, sizeof(nr)); memset(&nr, 0, sizeof(nr));
@ -151,8 +135,10 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
if (filter_whiteouts && bkey_deleted(in)) if (filter_whiteouts && bkey_deleted(in))
continue; continue;
if (bch2_bkey_transform(out_f, out, bkey_packed(in) if (!transform)
? in_f : &bch2_bkey_format_current, in)) bkey_copy(out, in);
else if (bch2_bkey_transform(out_f, out, bkey_packed(in)
? in_f : &bch2_bkey_format_current, in))
out->format = KEY_FORMAT_LOCAL_BTREE; out->format = KEY_FORMAT_LOCAL_BTREE;
else else
bch2_bkey_unpack(src, (void *) out, in); bch2_bkey_unpack(src, (void *) out, in);
@ -165,47 +151,6 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
return nr; return nr;
} }
/* Sort, repack, and call bch2_bkey_normalize() to drop stale pointers: */
struct btree_nr_keys
bch2_sort_repack_merge(struct bch_fs *c,
struct bset *dst, struct btree *src,
struct btree_node_iter *iter,
struct bkey_format *out_f,
bool filter_whiteouts)
{
struct bkey_packed *out = vstruct_last(dst), *k_packed;
struct bkey_buf k;
struct btree_nr_keys nr;
memset(&nr, 0, sizeof(nr));
bch2_bkey_buf_init(&k);
while ((k_packed = bch2_btree_node_iter_next_all(iter, src))) {
if (filter_whiteouts && bkey_deleted(k_packed))
continue;
/*
* NOTE:
* bch2_bkey_normalize may modify the key we pass it (dropping
* stale pointers) and we don't have a write lock on the src
* node; we have to make a copy of the entire key before calling
* normalize
*/
bch2_bkey_buf_realloc(&k, c, k_packed->u64s + BKEY_U64s);
bch2_bkey_unpack(src, k.k, k_packed);
if (filter_whiteouts &&
bch2_bkey_normalize(c, bkey_i_to_s(k.k)))
continue;
extent_sort_append(c, out_f, &nr, &out, bkey_i_to_s(k.k));
}
dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
bch2_bkey_buf_exit(&k, c);
return nr;
}
static inline int sort_keys_cmp(struct btree *b, static inline int sort_keys_cmp(struct btree *b,
struct bkey_packed *l, struct bkey_packed *l,
struct bkey_packed *r) struct bkey_packed *r)

View File

@ -37,11 +37,6 @@ struct btree_nr_keys
bch2_sort_repack(struct bset *, struct btree *, bch2_sort_repack(struct bset *, struct btree *,
struct btree_node_iter *, struct btree_node_iter *,
struct bkey_format *, bool); struct bkey_format *, bool);
struct btree_nr_keys
bch2_sort_repack_merge(struct bch_fs *,
struct bset *, struct btree *,
struct btree_node_iter *,
struct bkey_format *, bool);
unsigned bch2_sort_keys(struct bkey_packed *, unsigned bch2_sort_keys(struct bkey_packed *,
struct sort_iter *, bool); struct sort_iter *, bool);

View File

@ -391,16 +391,10 @@ void bch2_btree_sort_into(struct bch_fs *c,
bch2_btree_node_iter_init_from_start(&src_iter, src); bch2_btree_node_iter_init_from_start(&src_iter, src);
if (btree_node_is_extents(src)) nr = bch2_sort_repack(btree_bset_first(dst),
nr = bch2_sort_repack_merge(c, btree_bset_first(dst), src, &src_iter,
src, &src_iter, &dst->format,
&dst->format, true);
true);
else
nr = bch2_sort_repack(btree_bset_first(dst),
src, &src_iter,
&dst->format,
true);
bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort], bch2_time_stats_update(&c->times[BCH_TIME_btree_node_sort],
start_time); start_time);

View File

@ -155,11 +155,19 @@ void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
* goes to 0, and it's safe because we have the node intent * goes to 0, and it's safe because we have the node intent
* locked: * locked:
*/ */
atomic64_sub(__SIX_VAL(read_lock, readers), if (!b->c.lock.readers)
&b->c.lock.state.counter); atomic64_sub(__SIX_VAL(read_lock, readers),
&b->c.lock.state.counter);
else
this_cpu_sub(*b->c.lock.readers, readers);
btree_node_lock_type(trans->c, b, SIX_LOCK_write); btree_node_lock_type(trans->c, b, SIX_LOCK_write);
atomic64_add(__SIX_VAL(read_lock, readers),
&b->c.lock.state.counter); if (!b->c.lock.readers)
atomic64_add(__SIX_VAL(read_lock, readers),
&b->c.lock.state.counter);
else
this_cpu_add(*b->c.lock.readers, readers);
} }
bool __bch2_btree_node_relock(struct btree_trans *trans, bool __bch2_btree_node_relock(struct btree_trans *trans,
@ -369,19 +377,16 @@ bool __bch2_btree_node_lock(struct btree_trans *trans,
if (six_trylock_type(&b->c.lock, type)) if (six_trylock_type(&b->c.lock, type))
return true; return true;
#ifdef CONFIG_BCACHEFS_DEBUG
trans->locking_path_idx = path->idx; trans->locking_path_idx = path->idx;
trans->locking_pos = pos; trans->locking_pos = pos;
trans->locking_btree_id = path->btree_id; trans->locking_btree_id = path->btree_id;
trans->locking_level = level; trans->locking_level = level;
trans->locking = b; trans->locking = b;
#endif
ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0; ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p) == 0;
#ifdef CONFIG_BCACHEFS_DEBUG
trans->locking = NULL; trans->locking = NULL;
#endif
if (ret) if (ret)
bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)], bch2_time_stats_update(&trans->c->times[lock_to_time_stat(type)],
start_time); start_time);
@ -2796,12 +2801,10 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier); trans->srcu_idx = srcu_read_lock(&c->btree_trans_barrier);
#ifdef CONFIG_BCACHEFS_DEBUG
trans->pid = current->pid; trans->pid = current->pid;
mutex_lock(&c->btree_trans_lock); mutex_lock(&c->btree_trans_lock);
list_add(&trans->list, &c->btree_trans_list); list_add(&trans->list, &c->btree_trans_list);
mutex_unlock(&c->btree_trans_lock); mutex_unlock(&c->btree_trans_lock);
#endif
} }
static void check_btree_paths_leaked(struct btree_trans *trans) static void check_btree_paths_leaked(struct btree_trans *trans)
@ -2840,11 +2843,9 @@ void bch2_trans_exit(struct btree_trans *trans)
check_btree_paths_leaked(trans); check_btree_paths_leaked(trans);
#ifdef CONFIG_BCACHEFS_DEBUG
mutex_lock(&c->btree_trans_lock); mutex_lock(&c->btree_trans_lock);
list_del(&trans->list); list_del(&trans->list);
mutex_unlock(&c->btree_trans_lock); mutex_unlock(&c->btree_trans_lock);
#endif
srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx); srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
@ -2888,7 +2889,6 @@ bch2_btree_path_node_to_text(struct printbuf *out,
bch2_bpos_to_text(out, btree_node_pos(_b, cached)); bch2_bpos_to_text(out, btree_node_pos(_b, cached));
} }
#ifdef CONFIG_BCACHEFS_DEBUG
static bool trans_has_locks(struct btree_trans *trans) static bool trans_has_locks(struct btree_trans *trans)
{ {
struct btree_path *path; struct btree_path *path;
@ -2898,11 +2898,9 @@ static bool trans_has_locks(struct btree_trans *trans)
return true; return true;
return false; return false;
} }
#endif
void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c) void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
{ {
#ifdef CONFIG_BCACHEFS_DEBUG
struct btree_trans *trans; struct btree_trans *trans;
struct btree_path *path; struct btree_path *path;
struct btree *b; struct btree *b;
@ -2956,7 +2954,6 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
} }
} }
mutex_unlock(&c->btree_trans_lock); mutex_unlock(&c->btree_trans_lock);
#endif
} }
void bch2_fs_btree_iter_exit(struct bch_fs *c) void bch2_fs_btree_iter_exit(struct bch_fs *c)

View File

@ -366,7 +366,6 @@ struct btree_trans_commit_hook {
struct btree_trans { struct btree_trans {
struct bch_fs *c; struct bch_fs *c;
#ifdef CONFIG_BCACHEFS_DEBUG
struct list_head list; struct list_head list;
struct btree *locking; struct btree *locking;
unsigned locking_path_idx; unsigned locking_path_idx;
@ -374,7 +373,6 @@ struct btree_trans {
u8 locking_btree_id; u8 locking_btree_id;
u8 locking_level; u8 locking_level;
pid_t pid; pid_t pid;
#endif
unsigned long ip; unsigned long ip;
int srcu_idx; int srcu_idx;

View File

@ -1271,22 +1271,24 @@ err:
* When deleting, check if we need to emit a whiteout (because we're overwriting * When deleting, check if we need to emit a whiteout (because we're overwriting
* something in an ancestor snapshot) * something in an ancestor snapshot)
*/ */
static int need_whiteout_for_snapshot(struct btree_trans *trans, static int need_whiteout_for_snapshot(struct btree_trans *trans, struct btree_iter *orig)
enum btree_id btree_id, struct bpos pos)
{ {
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
u32 snapshot = pos.snapshot; u32 snapshot = orig->pos.snapshot;
int ret; int ret;
if (!bch2_snapshot_parent(trans->c, pos.snapshot)) if (!bch2_snapshot_parent(trans->c, snapshot))
return 0; return 0;
pos.snapshot++; bch2_trans_copy_iter(&iter, orig);
iter.flags &= BTREE_ITER_FILTER_SNAPSHOTS;
iter.flags |= BTREE_ITER_ALL_SNAPSHOTS;
for_each_btree_key_norestart(trans, iter, btree_id, pos, bch2_btree_iter_advance(&iter);
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
if (bkey_cmp(k.k->p, pos)) for_each_btree_key_continue_norestart(iter, 0, k, ret) {
if (bkey_cmp(k.k->p, orig->pos))
break; break;
if (bch2_snapshot_is_ancestor(trans->c, snapshot, if (bch2_snapshot_is_ancestor(trans->c, snapshot,
@ -1312,6 +1314,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
BUG_ON(trans->nr_updates >= BTREE_ITER_MAX); BUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
BUG_ON(bpos_cmp(k->k.p, iter->path->pos)); BUG_ON(bpos_cmp(k->k.p, iter->path->pos));
BUG_ON(bpos_cmp(k->k.p, iter->pos));
n = (struct btree_insert_entry) { n = (struct btree_insert_entry) {
.flags = flags, .flags = flags,
@ -1332,7 +1335,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
if (bkey_deleted(&n.k->k) && if (bkey_deleted(&n.k->k) &&
(iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) { (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)) {
int ret = need_whiteout_for_snapshot(trans, n.btree_id, n.k->k.p); int ret = need_whiteout_for_snapshot(trans, iter);
if (unlikely(ret < 0)) if (unlikely(ret < 0))
return ret; return ret;

View File

@ -2705,7 +2705,8 @@ int bch2_truncate(struct user_namespace *mnt_userns,
U64_MAX, &i_sectors_delta); U64_MAX, &i_sectors_delta);
i_sectors_acct(c, inode, NULL, i_sectors_delta); i_sectors_acct(c, inode, NULL, i_sectors_delta);
BUG_ON(!inode->v.i_size && inode->v.i_blocks); WARN_ON(!inode->v.i_size && inode->v.i_blocks &&
!bch2_journal_error(&c->journal));
if (unlikely(ret)) if (unlikely(ret))
goto err; goto err;

View File

@ -262,21 +262,6 @@ static long data_progress_to_text(struct printbuf *out, struct bch_fs *c)
return ret; return ret;
} }
static int fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
{
struct bch_fs_usage_online *fs_usage = bch2_fs_usage_read(c);
if (!fs_usage)
return -ENOMEM;
bch2_fs_usage_to_text(out, c, fs_usage);
percpu_up_read(&c->mark_lock);
kfree(fs_usage);
return 0;
}
static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c) static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c)
{ {
struct btree_trans trans; struct btree_trans trans;
@ -386,9 +371,6 @@ SHOW(bch2_fs)
/* Debugging: */ /* Debugging: */
if (attr == &sysfs_alloc_debug)
return fs_alloc_debug_to_text(&out, c) ?: out.pos - buf;
if (attr == &sysfs_journal_debug) { if (attr == &sysfs_journal_debug) {
bch2_journal_debug_to_text(&out, &c->journal); bch2_journal_debug_to_text(&out, &c->journal);
return out.pos - buf; return out.pos - buf;
@ -580,7 +562,6 @@ STORE(bch2_fs_internal)
SYSFS_OPS(bch2_fs_internal); SYSFS_OPS(bch2_fs_internal);
struct attribute *bch2_fs_internal_files[] = { struct attribute *bch2_fs_internal_files[] = {
&sysfs_alloc_debug,
&sysfs_journal_debug, &sysfs_journal_debug,
&sysfs_journal_pins, &sysfs_journal_pins,
&sysfs_btree_updates, &sysfs_btree_updates,
@ -588,17 +569,21 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_btree_cache, &sysfs_btree_cache,
&sysfs_btree_key_cache, &sysfs_btree_key_cache,
&sysfs_btree_transactions, &sysfs_btree_transactions,
&sysfs_new_stripes,
&sysfs_stripes_heap, &sysfs_stripes_heap,
&sysfs_open_buckets, &sysfs_open_buckets,
&sysfs_io_timers_read,
&sysfs_io_timers_write,
&sysfs_trigger_journal_flush,
&sysfs_trigger_gc,
&sysfs_prune_cache,
&sysfs_read_realloc_races, &sysfs_read_realloc_races,
&sysfs_extent_migrate_done, &sysfs_extent_migrate_done,
&sysfs_extent_migrate_raced, &sysfs_extent_migrate_raced,
&sysfs_trigger_journal_flush,
&sysfs_trigger_gc,
&sysfs_gc_gens_pos, &sysfs_gc_gens_pos,
&sysfs_prune_cache,
&sysfs_copy_gc_enabled, &sysfs_copy_gc_enabled,
&sysfs_copy_gc_wait, &sysfs_copy_gc_wait,
@ -607,11 +592,6 @@ struct attribute *bch2_fs_internal_files[] = {
&sysfs_rebalance_work, &sysfs_rebalance_work,
sysfs_pd_controller_files(rebalance), sysfs_pd_controller_files(rebalance),
&sysfs_new_stripes,
&sysfs_io_timers_read,
&sysfs_io_timers_write,
&sysfs_data_op_data_progress, &sysfs_data_op_data_progress,
&sysfs_internal_uuid, &sysfs_internal_uuid,

View File

@ -14,12 +14,14 @@ static void delete_test_keys(struct bch_fs *c)
int ret; int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_extents, ret = bch2_btree_delete_range(c, BTREE_ID_extents,
POS(0, 0), POS(0, U64_MAX), SPOS(0, 0, U32_MAX),
SPOS(0, U64_MAX, U32_MAX),
NULL); NULL);
BUG_ON(ret); BUG_ON(ret);
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs, ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
POS(0, 0), POS(0, U64_MAX), SPOS(0, 0, U32_MAX),
SPOS(0, U64_MAX, U32_MAX),
NULL); NULL);
BUG_ON(ret); BUG_ON(ret);
} }
@ -541,10 +543,11 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
u64 i; u64 i;
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, POS_MIN, 0); bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0);
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
bch2_btree_iter_set_pos(&iter, POS(0, test_rand())); bch2_btree_iter_set_pos(&iter, SPOS(0, test_rand(), U32_MAX));
k = bch2_btree_iter_peek(&iter); k = bch2_btree_iter_peek(&iter);
ret = bkey_err(k); ret = bkey_err(k);
@ -567,7 +570,7 @@ static int rand_mixed_trans(struct btree_trans *trans,
struct bkey_s_c k; struct bkey_s_c k;
int ret; int ret;
bch2_btree_iter_set_pos(iter, POS(0, pos)); bch2_btree_iter_set_pos(iter, SPOS(0, pos, U32_MAX));
k = bch2_btree_iter_peek(iter); k = bch2_btree_iter_peek(iter);
ret = bkey_err(k); ret = bkey_err(k);
@ -594,7 +597,8 @@ static int rand_mixed(struct bch_fs *c, u64 nr)
u64 i, rand; u64 i, rand;
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, POS_MIN, 0); bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0);
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
rand = test_rand(); rand = test_rand();
@ -673,7 +677,7 @@ static int seq_insert(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN, for_each_btree_key(&trans, iter, BTREE_ID_xattrs, SPOS(0, 0, U32_MAX),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) { BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
insert.k.p = iter.pos; insert.k.p = iter.pos;
@ -703,7 +707,8 @@ static int seq_lookup(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN, 0, k, ret) for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0, k, ret)
; ;
bch2_trans_iter_exit(&trans, &iter); bch2_trans_iter_exit(&trans, &iter);
@ -720,7 +725,8 @@ static int seq_overwrite(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN, for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX),
BTREE_ITER_INTENT, k, ret) { BTREE_ITER_INTENT, k, ret) {
struct bkey_i_cookie u; struct bkey_i_cookie u;
@ -745,8 +751,7 @@ static int seq_delete(struct bch_fs *c, u64 nr)
int ret; int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs, ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
POS(0, 0), POS(0, U64_MAX), SPOS(0, 0, U32_MAX), POS_MAX, NULL);
NULL);
if (ret) if (ret)
bch_err(c, "error in seq_delete: %i", ret); bch_err(c, "error in seq_delete: %i", ret);
return ret; return ret;