Update bcachefs sources to 8a65cc4951 bcachefs: Improve bch2_dev_freespace_init()

This commit is contained in:
Kent Overstreet 2022-11-29 01:15:08 -05:00
parent 934a84dfaf
commit 5a5a6c25a9
27 changed files with 498 additions and 414 deletions

View File

@ -1 +1 @@
5963d1b1a4a31af4282e6710c7948eb215160386 8a65cc495143fa43fb3c100de3c2b14519d3135f

View File

@ -385,15 +385,10 @@ void bch2_alloc_v4_swab(struct bkey_s k)
void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{ {
struct bch_alloc_v4 _a; struct bch_alloc_v4 _a;
const struct bch_alloc_v4 *a = &_a; const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
const struct bch_backpointer *bps; const struct bch_backpointer *bps;
unsigned i; unsigned i;
if (k.k->type == KEY_TYPE_alloc_v4)
a = bkey_s_c_to_alloc_v4(k).v;
else
bch2_alloc_to_v4(k, &_a);
prt_newline(out); prt_newline(out);
printbuf_indent_add(out, 2); printbuf_indent_add(out, 2);
@ -430,7 +425,7 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
printbuf_indent_sub(out, 4); printbuf_indent_sub(out, 4);
} }
void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
{ {
if (k.k->type == KEY_TYPE_alloc_v4) { if (k.k->type == KEY_TYPE_alloc_v4) {
int d; int d;
@ -512,9 +507,11 @@ static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_
* Not sketchy at doing it this way, nope... * Not sketchy at doing it this way, nope...
*/ */
struct bkey_i_alloc_v4 *ret = struct bkey_i_alloc_v4 *ret =
bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer)); bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
if (!IS_ERR(ret)) if (!IS_ERR(ret)) {
bkey_reassemble(&ret->k_i, k); bkey_reassemble(&ret->k_i, k);
memset((void *) ret + bkey_bytes(k.k), 0, sizeof(struct bch_backpointer));
}
return ret; return ret;
} }
@ -574,9 +571,8 @@ int bch2_alloc_read(struct bch_fs *c)
continue; continue;
ca = bch_dev_bkey_exists(c, k.k->p.inode); ca = bch_dev_bkey_exists(c, k.k->p.inode);
bch2_alloc_to_v4(k, &a);
*bucket_gen(ca, k.k->p.offset) = a.gen; *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
} }
bch2_trans_iter_exit(&trans, &iter); bch2_trans_iter_exit(&trans, &iter);
@ -610,7 +606,7 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
a->data_type != BCH_DATA_need_discard) a->data_type != BCH_DATA_need_discard)
return 0; return 0;
k = bch2_trans_kmalloc(trans, sizeof(*k)); k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
if (IS_ERR(k)) if (IS_ERR(k))
return PTR_ERR(k); return PTR_ERR(k);
@ -665,7 +661,8 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
unsigned flags) unsigned flags)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a, *new_a; struct bch_alloc_v4 old_a_convert, *new_a;
const struct bch_alloc_v4 *old_a;
u64 old_lru, new_lru; u64 old_lru, new_lru;
int ret = 0; int ret = 0;
@ -675,13 +672,13 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
*/ */
BUG_ON(new->k.type != KEY_TYPE_alloc_v4); BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
bch2_alloc_to_v4(old, &old_a); old_a = bch2_alloc_to_v4(old, &old_a_convert);
new_a = &bkey_i_to_alloc_v4(new)->v; new_a = &bkey_i_to_alloc_v4(new)->v;
new_a->data_type = alloc_data_type(*new_a, new_a->data_type); new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
if (new_a->dirty_sectors > old_a.dirty_sectors || if (new_a->dirty_sectors > old_a->dirty_sectors ||
new_a->cached_sectors > old_a.cached_sectors) { new_a->cached_sectors > old_a->cached_sectors) {
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now)); new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true); SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
@ -695,10 +692,10 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false); SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
} }
if (old_a.data_type != new_a->data_type || if (old_a->data_type != new_a->data_type ||
(new_a->data_type == BCH_DATA_free && (new_a->data_type == BCH_DATA_free &&
alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) { alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
ret = bch2_bucket_do_index(trans, old, &old_a, false) ?: ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true); bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_a, true);
if (ret) if (ret)
return ret; return ret;
@ -708,7 +705,7 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
!new_a->io_time[READ]) !new_a->io_time[READ])
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now)); new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
old_lru = alloc_lru_idx(old_a); old_lru = alloc_lru_idx(*old_a);
new_lru = alloc_lru_idx(*new_a); new_lru = alloc_lru_idx(*new_a);
if (old_lru != new_lru) { if (old_lru != new_lru) {
@ -731,7 +728,8 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_dev *ca; struct bch_dev *ca;
struct bch_alloc_v4 a; struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
unsigned discard_key_type, freespace_key_type; unsigned discard_key_type, freespace_key_type;
struct bkey_s_c alloc_k, k; struct bkey_s_c alloc_k, k;
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
@ -756,15 +754,15 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
if (!ca->mi.freespace_initialized) if (!ca->mi.freespace_initialized)
return 0; return 0;
bch2_alloc_to_v4(alloc_k, &a); a = bch2_alloc_to_v4(alloc_k, &a_convert);
discard_key_type = a.data_type == BCH_DATA_need_discard discard_key_type = a->data_type == BCH_DATA_need_discard
? KEY_TYPE_set : 0; ? KEY_TYPE_set : 0;
freespace_key_type = a.data_type == BCH_DATA_free freespace_key_type = a->data_type == BCH_DATA_free
? KEY_TYPE_set : 0; ? KEY_TYPE_set : 0;
bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p); bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, a)); bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
k = bch2_btree_iter_peek_slot(discard_iter); k = bch2_btree_iter_peek_slot(discard_iter);
ret = bkey_err(k); ret = bkey_err(k);
@ -835,7 +833,8 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_iter alloc_iter; struct btree_iter alloc_iter;
struct bkey_s_c alloc_k; struct bkey_s_c alloc_k;
struct bch_alloc_v4 a; struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
u64 genbits; u64 genbits;
struct bpos pos; struct bpos pos;
enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
@ -860,16 +859,16 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans,
if (ret) if (ret)
goto err; goto err;
bch2_alloc_to_v4(alloc_k, &a); a = bch2_alloc_to_v4(alloc_k, &a_convert);
if (fsck_err_on(a.data_type != state || if (fsck_err_on(a->data_type != state ||
(state == BCH_DATA_free && (state == BCH_DATA_free &&
genbits != alloc_freespace_genbits(a)), c, genbits != alloc_freespace_genbits(*a)), c,
"%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)", "%s\n incorrectly set in %s index (free %u, genbits %llu should be %llu)",
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
bch2_btree_ids[iter->btree_id], bch2_btree_ids[iter->btree_id],
a.data_type == state, a->data_type == state,
genbits >> 56, alloc_freespace_genbits(a) >> 56)) genbits >> 56, alloc_freespace_genbits(*a) >> 56))
goto delete; goto delete;
out: out:
err: err:
@ -937,7 +936,8 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_iter lru_iter; struct btree_iter lru_iter;
struct bch_alloc_v4 a; struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
struct bkey_s_c alloc_k, k; struct bkey_s_c alloc_k, k;
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
struct printbuf buf2 = PRINTBUF; struct printbuf buf2 = PRINTBUF;
@ -951,20 +951,20 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
if (ret) if (ret)
return ret; return ret;
bch2_alloc_to_v4(alloc_k, &a); a = bch2_alloc_to_v4(alloc_k, &a_convert);
if (a.data_type != BCH_DATA_cached) if (a->data_type != BCH_DATA_cached)
return 0; return 0;
bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru, bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
POS(alloc_k.k->p.inode, a.io_time[READ]), 0); POS(alloc_k.k->p.inode, a->io_time[READ]), 0);
k = bch2_btree_iter_peek_slot(&lru_iter); k = bch2_btree_iter_peek_slot(&lru_iter);
ret = bkey_err(k); ret = bkey_err(k);
if (ret) if (ret)
goto err; goto err;
if (fsck_err_on(!a.io_time[READ], c, if (fsck_err_on(!a->io_time[READ], c,
"cached bucket with read_time 0\n" "cached bucket with read_time 0\n"
" %s", " %s",
(printbuf_reset(&buf), (printbuf_reset(&buf),
@ -977,26 +977,24 @@ static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
(printbuf_reset(&buf), (printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
(bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) { (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
u64 read_time = a.io_time[READ]; u64 read_time = a->io_time[READ] ?:
atomic64_read(&c->io_clock[READ].now);
if (!a.io_time[READ])
a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
ret = bch2_lru_set(trans, ret = bch2_lru_set(trans,
alloc_k.k->p.inode, alloc_k.k->p.inode,
alloc_k.k->p.offset, alloc_k.k->p.offset,
&a.io_time[READ]); &read_time);
if (ret) if (ret)
goto err; goto err;
if (a.io_time[READ] != read_time) { if (a->io_time[READ] != read_time) {
struct bkey_i_alloc_v4 *a_mut = struct bkey_i_alloc_v4 *a_mut =
bch2_alloc_to_v4_mut(trans, alloc_k); bch2_alloc_to_v4_mut(trans, alloc_k);
ret = PTR_ERR_OR_ZERO(a_mut); ret = PTR_ERR_OR_ZERO(a_mut);
if (ret) if (ret)
goto err; goto err;
a_mut->v.io_time[READ] = a.io_time[READ]; a_mut->v.io_time[READ] = read_time;
ret = bch2_trans_update(trans, alloc_iter, ret = bch2_trans_update(trans, alloc_iter,
&a_mut->k_i, BTREE_TRIGGER_NORUN); &a_mut->k_i, BTREE_TRIGGER_NORUN);
if (ret) if (ret)
@ -1302,34 +1300,110 @@ void bch2_do_invalidates(struct bch_fs *c)
percpu_ref_put(&c->writes); percpu_ref_put(&c->writes);
} }
static int bucket_freespace_init(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k, struct bch_dev *ca)
{
struct bch_alloc_v4 a;
if (iter->pos.offset >= ca->mi.nbuckets)
return 1;
bch2_alloc_to_v4(k, &a);
return bch2_bucket_do_index(trans, k, &a, true);
}
static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca) static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
{ {
struct btree_trans trans; struct btree_trans trans;
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bpos end = POS(ca->dev_idx, ca->mi.nbuckets);
struct bch_member *m; struct bch_member *m;
int ret; int ret;
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc, bch2_trans_iter_init(&trans, &iter, BTREE_ID_alloc,
POS(ca->dev_idx, ca->mi.first_bucket), POS(ca->dev_idx, ca->mi.first_bucket),
BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k, BTREE_ITER_PREFETCH);
NULL, NULL, BTREE_INSERT_LAZY_RW, /*
bucket_freespace_init(&trans, &iter, k, ca)); * Scan the alloc btree for every bucket on @ca, and add buckets to the
* freespace/need_discard/need_gc_gens btrees as needed:
*/
while (1) {
bch2_trans_begin(&trans);
ret = 0;
if (bkey_ge(iter.pos, end))
break;
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto bkey_err;
if (k.k->type) {
/*
* We process live keys in the alloc btree one at a
* time:
*/
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
ret = bch2_bucket_do_index(&trans, k, a, true) ?:
bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL);
if (ret)
goto bkey_err;
bch2_btree_iter_advance(&iter);
} else {
/*
* When there's a hole, process a whole range of keys
* all at once:
*
* This is similar to how extent btree iterators in
* slots mode will synthesize a whole range - a
* KEY_TYPE_deleted extent.
*
* But alloc keys aren't extents (they have zero size),
* so we're open coding it here:
*/
struct btree_iter iter2;
struct bkey_i *freespace;
struct bpos next;
bch2_trans_copy_iter(&iter2, &iter);
k = bch2_btree_iter_peek_upto(&iter2,
bkey_min(bkey_min(end,
iter.path->l[0].b->key.k.p),
POS(iter.pos.inode, iter.pos.offset + U32_MAX - 1)));
next = iter2.pos;
ret = bkey_err(k);
bch2_trans_iter_exit(&trans, &iter2);
BUG_ON(next.offset >= iter.pos.offset + U32_MAX);
if (ret)
goto bkey_err;
freespace = bch2_trans_kmalloc(&trans, sizeof(*freespace));
ret = PTR_ERR_OR_ZERO(freespace);
if (ret)
goto bkey_err;
bkey_init(&freespace->k);
freespace->k.type = KEY_TYPE_set;
freespace->k.p = iter.pos;
bch2_key_resize(&freespace->k, next.offset - iter.pos.offset);
ret = __bch2_btree_insert(&trans, BTREE_ID_freespace, freespace) ?:
bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL);
if (ret)
goto bkey_err;
bch2_btree_iter_set_pos(&iter, next);
}
bkey_err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
}
bch2_trans_iter_exit(&trans, &iter);
bch2_trans_exit(&trans); bch2_trans_exit(&trans);
if (ret < 0) { if (ret < 0) {

View File

@ -89,7 +89,25 @@ static inline void set_alloc_v4_u64s(struct bkey_i_alloc_v4 *a)
struct bkey_i_alloc_v4 * struct bkey_i_alloc_v4 *
bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos); bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos);
void bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *); void __bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
static inline const struct bch_alloc_v4 *bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *convert)
{
const struct bch_alloc_v4 *ret;
if (unlikely(k.k->type != KEY_TYPE_alloc_v4))
goto slowpath;
ret = bkey_s_c_to_alloc_v4(k).v;
if (BCH_ALLOC_V4_BACKPOINTERS_START(ret) != BCH_ALLOC_V4_U64s)
goto slowpath;
return ret;
slowpath:
__bch2_alloc_to_v4(k, convert);
return convert;
}
struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c); struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int); int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);

View File

@ -194,7 +194,7 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca, static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 bucket, u64 bucket,
enum alloc_reserve reserve, enum alloc_reserve reserve,
struct bch_alloc_v4 *a, const struct bch_alloc_v4 *a,
struct bucket_alloc_state *s, struct bucket_alloc_state *s,
struct closure *cl) struct closure *cl)
{ {
@ -285,7 +285,8 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
struct btree_iter iter = { NULL }; struct btree_iter iter = { NULL };
struct bkey_s_c k; struct bkey_s_c k;
struct open_bucket *ob; struct open_bucket *ob;
struct bch_alloc_v4 a; struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
u64 b = free_entry & ~(~0ULL << 56); u64 b = free_entry & ~(~0ULL << 56);
unsigned genbits = free_entry >> 56; unsigned genbits = free_entry >> 56;
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
@ -309,12 +310,12 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
goto err; goto err;
} }
bch2_alloc_to_v4(k, &a); a = bch2_alloc_to_v4(k, &a_convert);
if (genbits != (alloc_freespace_genbits(a) >> 56)) { if (genbits != (alloc_freespace_genbits(*a) >> 56)) {
prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n" prt_printf(&buf, "bucket in freespace btree with wrong genbits (got %u should be %llu)\n"
" freespace key ", " freespace key ",
genbits, alloc_freespace_genbits(a) >> 56); genbits, alloc_freespace_genbits(*a) >> 56);
bch2_bkey_val_to_text(&buf, c, freespace_k); bch2_bkey_val_to_text(&buf, c, freespace_k);
prt_printf(&buf, "\n "); prt_printf(&buf, "\n ");
bch2_bkey_val_to_text(&buf, c, k); bch2_bkey_val_to_text(&buf, c, k);
@ -324,7 +325,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
} }
if (a.data_type != BCH_DATA_free) { if (a->data_type != BCH_DATA_free) {
prt_printf(&buf, "non free bucket in freespace btree\n" prt_printf(&buf, "non free bucket in freespace btree\n"
" freespace key "); " freespace key ");
bch2_bkey_val_to_text(&buf, c, freespace_k); bch2_bkey_val_to_text(&buf, c, freespace_k);
@ -358,7 +359,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
} }
} }
ob = __try_alloc_bucket(c, ca, b, reserve, &a, s, cl); ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
if (!ob) if (!ob)
iter.path->preserve = false; iter.path->preserve = false;
err: err:
@ -417,7 +418,8 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, s->cur_bucket), for_each_btree_key_norestart(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, s->cur_bucket),
BTREE_ITER_SLOTS, k, ret) { BTREE_ITER_SLOTS, k, ret) {
struct bch_alloc_v4 a; struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets))) if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
break; break;
@ -426,14 +428,14 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
is_superblock_bucket(ca, k.k->p.offset)) is_superblock_bucket(ca, k.k->p.offset))
continue; continue;
bch2_alloc_to_v4(k, &a); a = bch2_alloc_to_v4(k, &a_convert);
if (a.data_type != BCH_DATA_free) if (a->data_type != BCH_DATA_free)
continue; continue;
s->buckets_seen++; s->buckets_seen++;
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a, s, cl); ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
if (ob) if (ob)
break; break;
} }

View File

@ -323,15 +323,6 @@ bool bch2_bkey_pack_key(struct bkey_packed *out, const struct bkey *in,
#define x(id, field) if (!set_inc_field(&state, id, in->field)) return false; #define x(id, field) if (!set_inc_field(&state, id, in->field)) return false;
bkey_fields() bkey_fields()
#undef x #undef x
/*
* Extents - we have to guarantee that if an extent is packed, a trimmed
* version will also pack:
*/
if (bkey_start_offset(in) <
le64_to_cpu(format->field_offset[BKEY_FIELD_OFFSET]))
return false;
pack_state_finish(&state, out); pack_state_finish(&state, out);
out->u64s = format->key_u64s + in->u64s - BKEY_U64s; out->u64s = format->key_u64s + in->u64s - BKEY_U64s;
out->format = KEY_FORMAT_LOCAL_BTREE; out->format = KEY_FORMAT_LOCAL_BTREE;

View File

@ -752,7 +752,6 @@ static inline void bch2_bkey_format_add_key(struct bkey_format_state *s, const s
#define x(id, field) __bkey_format_add(s, id, k->field); #define x(id, field) __bkey_format_add(s, id, k->field);
bkey_fields() bkey_fields()
#undef x #undef x
__bkey_format_add(s, BKEY_FIELD_OFFSET, bkey_start_offset(k));
} }
void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos); void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);

View File

@ -1340,15 +1340,16 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode); struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
struct bucket gc, *b; struct bucket gc, *b;
struct bkey_i_alloc_v4 *a; struct bkey_i_alloc_v4 *a;
struct bch_alloc_v4 old, new; struct bch_alloc_v4 old_convert, new;
const struct bch_alloc_v4 *old;
enum bch_data_type type; enum bch_data_type type;
int ret; int ret;
if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets))) if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
return 1; return 1;
bch2_alloc_to_v4(k, &old); old = bch2_alloc_to_v4(k, &old_convert);
new = old; new = *old;
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
b = gc_bucket(ca, iter->pos.offset); b = gc_bucket(ca, iter->pos.offset);
@ -1360,7 +1361,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
type = __alloc_data_type(b->dirty_sectors, type = __alloc_data_type(b->dirty_sectors,
b->cached_sectors, b->cached_sectors,
b->stripe, b->stripe,
old, *old,
b->data_type); b->data_type);
if (b->data_type != type) { if (b->data_type != type) {
struct bch_dev_usage *u; struct bch_dev_usage *u;
@ -1382,7 +1383,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
gc.data_type != BCH_DATA_btree) gc.data_type != BCH_DATA_btree)
return 0; return 0;
if (gen_after(old.gen, gc.gen)) if (gen_after(old->gen, gc.gen))
return 0; return 0;
#define copy_bucket_field(_f) \ #define copy_bucket_field(_f) \
@ -1404,7 +1405,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
copy_bucket_field(stripe); copy_bucket_field(stripe);
#undef copy_bucket_field #undef copy_bucket_field
if (!bch2_alloc_v4_cmp(old, new)) if (!bch2_alloc_v4_cmp(*old, new))
return 0; return 0;
a = bch2_alloc_to_v4_mut(trans, k); a = bch2_alloc_to_v4_mut(trans, k);
@ -1462,7 +1463,8 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bucket *g; struct bucket *g;
struct bch_alloc_v4 a; struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
unsigned i; unsigned i;
int ret; int ret;
@ -1488,20 +1490,20 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
ca = bch_dev_bkey_exists(c, k.k->p.inode); ca = bch_dev_bkey_exists(c, k.k->p.inode);
g = gc_bucket(ca, k.k->p.offset); g = gc_bucket(ca, k.k->p.offset);
bch2_alloc_to_v4(k, &a); a = bch2_alloc_to_v4(k, &a_convert);
g->gen_valid = 1; g->gen_valid = 1;
g->gen = a.gen; g->gen = a->gen;
if (metadata_only && if (metadata_only &&
(a.data_type == BCH_DATA_user || (a->data_type == BCH_DATA_user ||
a.data_type == BCH_DATA_cached || a->data_type == BCH_DATA_cached ||
a.data_type == BCH_DATA_parity)) { a->data_type == BCH_DATA_parity)) {
g->data_type = a.data_type; g->data_type = a->data_type;
g->dirty_sectors = a.dirty_sectors; g->dirty_sectors = a->dirty_sectors;
g->cached_sectors = a.cached_sectors; g->cached_sectors = a->cached_sectors;
g->stripe = a.stripe; g->stripe = a->stripe;
g->stripe_redundancy = a.stripe_redundancy; g->stripe_redundancy = a->stripe_redundancy;
} }
} }
bch2_trans_iter_exit(&trans, &iter); bch2_trans_iter_exit(&trans, &iter);
@ -1567,15 +1569,12 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
" should be %u", " should be %u",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf), (bch2_bkey_val_to_text(&buf, c, k), buf.buf),
r->refcount)) { r->refcount)) {
struct bkey_i *new; struct bkey_i *new = bch2_bkey_make_mut(trans, k);
new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
ret = PTR_ERR_OR_ZERO(new); ret = PTR_ERR_OR_ZERO(new);
if (ret) if (ret)
return ret; return ret;
bkey_reassemble(new, k);
if (!r->refcount) if (!r->refcount)
new->k.type = KEY_TYPE_deleted; new->k.type = KEY_TYPE_deleted;
else else
@ -1889,13 +1888,11 @@ static int gc_btree_gens_key(struct btree_trans *trans,
percpu_up_read(&c->mark_lock); percpu_up_read(&c->mark_lock);
return 0; return 0;
update: update:
u = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); u = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(u); ret = PTR_ERR_OR_ZERO(u);
if (ret) if (ret)
return ret; return ret;
bkey_reassemble(u, k);
bch2_extent_normalize(c, bkey_i_to_s(u)); bch2_extent_normalize(c, bkey_i_to_s(u));
return bch2_trans_update(trans, iter, u, 0); return bch2_trans_update(trans, iter, u, 0);
} }
@ -1904,13 +1901,12 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
struct bkey_s_c k) struct bkey_s_c k)
{ {
struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode); struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
struct bch_alloc_v4 a; struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
struct bkey_i_alloc_v4 *a_mut; struct bkey_i_alloc_v4 *a_mut;
int ret; int ret;
bch2_alloc_to_v4(k, &a); if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
if (a.oldest_gen == ca->oldest_gen[iter->pos.offset])
return 0; return 0;
a_mut = bch2_alloc_to_v4_mut(trans, k); a_mut = bch2_alloc_to_v4_mut(trans, k);

View File

@ -1975,6 +1975,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
int ret; int ret;
EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS); EBUG_ON(iter->flags & BTREE_ITER_ALL_LEVELS);
EBUG_ON((iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) && bkey_eq(end, POS_MAX));
if (iter->update_path) { if (iter->update_path) {
bch2_path_put_nokeep(trans, iter->update_path, bch2_path_put_nokeep(trans, iter->update_path,
@ -1986,7 +1987,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
while (1) { while (1) {
k = __bch2_btree_iter_peek(iter, search_key); k = __bch2_btree_iter_peek(iter, search_key);
if (!k.k || bkey_err(k)) if (unlikely(!k.k))
goto end;
if (unlikely(bkey_err(k)))
goto out_no_locked; goto out_no_locked;
/* /*
@ -1999,11 +2002,10 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
else else
iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k)); iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
if (bkey_gt(iter_pos, end)) { if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
bch2_btree_iter_set_pos(iter, end); ? bkey_gt(iter_pos, end)
k = bkey_s_c_null; : bkey_ge(iter_pos, end)))
goto out_no_locked; goto end;
}
if (iter->update_path && if (iter->update_path &&
!bkey_eq(iter->update_path->pos, k.k->p)) { !bkey_eq(iter->update_path->pos, k.k->p)) {
@ -2087,6 +2089,10 @@ out_no_locked:
bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify_entry_exit(iter);
return k; return k;
end:
bch2_btree_iter_set_pos(iter, end);
k = bkey_s_c_null;
goto out_no_locked;
} }
/** /**
@ -2399,15 +2405,15 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
goto out_no_locked; goto out_no_locked;
} else { } else {
struct bpos next; struct bpos next;
struct bpos end = iter->pos;
if (iter->flags & BTREE_ITER_IS_EXTENTS)
end.offset = U64_MAX;
EBUG_ON(iter->path->level); EBUG_ON(iter->path->level);
if (iter->flags & BTREE_ITER_INTENT) { if (iter->flags & BTREE_ITER_INTENT) {
struct btree_iter iter2; struct btree_iter iter2;
struct bpos end = iter->pos;
if (iter->flags & BTREE_ITER_IS_EXTENTS)
end.offset = U64_MAX;
bch2_trans_copy_iter(&iter2, iter); bch2_trans_copy_iter(&iter2, iter);
k = bch2_btree_iter_peek_upto(&iter2, end); k = bch2_btree_iter_peek_upto(&iter2, end);
@ -2420,7 +2426,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
} else { } else {
struct bpos pos = iter->pos; struct bpos pos = iter->pos;
k = bch2_btree_iter_peek(iter); k = bch2_btree_iter_peek_upto(iter, end);
if (unlikely(bkey_err(k))) if (unlikely(bkey_err(k)))
bch2_btree_iter_set_pos(iter, pos); bch2_btree_iter_set_pos(iter, pos);
else else

View File

@ -397,19 +397,76 @@ void *__bch2_trans_kmalloc(struct btree_trans *, size_t);
static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size) static inline void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
{ {
unsigned new_top = trans->mem_top + size; size = roundup(size, 8);
void *p = trans->mem + trans->mem_top;
if (likely(trans->mem_top + size <= trans->mem_bytes)) {
void *p = trans->mem + trans->mem_top;
if (likely(new_top <= trans->mem_bytes)) {
trans->mem_top += size; trans->mem_top += size;
memset(p, 0, size); memset(p, 0, size);
return p; return p;
} else { } else {
return __bch2_trans_kmalloc(trans, size); return __bch2_trans_kmalloc(trans, size);
} }
} }
static inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *trans, size_t size)
{
size = roundup(size, 8);
if (likely(trans->mem_top + size <= trans->mem_bytes)) {
void *p = trans->mem + trans->mem_top;
trans->mem_top += size;
return p;
} else {
return __bch2_trans_kmalloc(trans, size);
}
}
static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct bkey_s_c k)
{
struct bkey_i *mut = bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k));
if (!IS_ERR(mut))
bkey_reassemble(mut, k);
return mut;
}
static inline struct bkey_i *bch2_bkey_get_mut(struct btree_trans *trans,
struct btree_iter *iter)
{
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
return unlikely(IS_ERR(k.k))
? ERR_CAST(k.k)
: bch2_bkey_make_mut(trans, k);
}
#define bch2_bkey_get_mut_typed(_trans, _iter, _type) \
({ \
struct bkey_i *_k = bch2_bkey_get_mut(_trans, _iter); \
struct bkey_i_##_type *_ret; \
\
if (IS_ERR(_k)) \
_ret = ERR_CAST(_k); \
else if (unlikely(_k->k.type != KEY_TYPE_##_type)) \
_ret = ERR_PTR(-ENOENT); \
else \
_ret = bkey_i_to_##_type(_k); \
_ret; \
})
#define bch2_bkey_alloc(_trans, _iter, _type) \
({ \
struct bkey_i_##_type *_k = bch2_trans_kmalloc(_trans, sizeof(*_k));\
if (!IS_ERR(_k)) { \
bkey_##_type##_init(&_k->k_i); \
_k->k.p = (_iter)->pos; \
} \
_k; \
})
u32 bch2_trans_begin(struct btree_trans *); u32 bch2_trans_begin(struct btree_trans *);
static inline struct btree * static inline struct btree *
@ -575,6 +632,36 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
_ret; \ _ret; \
}) })
#define for_each_btree_key2_upto(_trans, _iter, _btree_id, \
_start, _end, _flags, _k, _do) \
({ \
int _ret = 0; \
\
bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
(_start), (_flags)); \
\
while (1) { \
u32 _restart_count = bch2_trans_begin(_trans); \
\
_ret = 0; \
(_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, (_flags));\
if (!(_k).k) \
break; \
\
_ret = bkey_err(_k) ?: (_do); \
if (bch2_err_matches(_ret, BCH_ERR_transaction_restart))\
continue; \
if (_ret) \
break; \
bch2_trans_verify_not_restarted(_trans, _restart_count);\
if (!bch2_btree_iter_advance(&(_iter))) \
break; \
} \
\
bch2_trans_iter_exit((_trans), &(_iter)); \
_ret; \
})
#define for_each_btree_key_reverse(_trans, _iter, _btree_id, \ #define for_each_btree_key_reverse(_trans, _iter, _btree_id, \
_start, _flags, _k, _do) \ _start, _flags, _k, _do) \
({ \ ({ \
@ -613,6 +700,14 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
(_do) ?: bch2_trans_commit(_trans, (_disk_res),\ (_do) ?: bch2_trans_commit(_trans, (_disk_res),\
(_journal_seq), (_commit_flags))) (_journal_seq), (_commit_flags)))
#define for_each_btree_key_upto_commit(_trans, _iter, _btree_id, \
_start, _end, _iter_flags, _k, \
_disk_res, _journal_seq, _commit_flags,\
_do) \
for_each_btree_key2_upto(_trans, _iter, _btree_id, _start, _end, _iter_flags, _k,\
(_do) ?: bch2_trans_commit(_trans, (_disk_res),\
(_journal_seq), (_commit_flags)))
#define for_each_btree_key(_trans, _iter, _btree_id, \ #define for_each_btree_key(_trans, _iter, _btree_id, \
_start, _flags, _k, _ret) \ _start, _flags, _k, _ret) \
for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \ for (bch2_trans_iter_init((_trans), &(_iter), (_btree_id), \
@ -649,6 +744,12 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
!((_ret) = bkey_err(_k)) && (_k).k; \ !((_ret) = bkey_err(_k)) && (_k).k; \
bch2_btree_iter_advance(&(_iter))) bch2_btree_iter_advance(&(_iter)))
#define for_each_btree_key_upto_continue_norestart(_iter, _end, _flags, _k, _ret)\
for (; \
(_k) = bch2_btree_iter_peek_upto_type(&(_iter), _end, _flags), \
!((_ret) = bkey_err(_k)) && (_k).k; \
bch2_btree_iter_advance(&(_iter)))
/* new multiple iterator interface: */ /* new multiple iterator interface: */
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *); void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);

View File

@ -1199,12 +1199,10 @@ int bch2_trans_update_extent(struct btree_trans *trans,
if (ret) if (ret)
goto nomerge1; goto nomerge1;
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); update = bch2_bkey_make_mut(trans, k);
if ((ret = PTR_ERR_OR_ZERO(update))) if ((ret = PTR_ERR_OR_ZERO(update)))
goto err; goto err;
bkey_reassemble(update, k);
if (bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(insert))) { if (bch2_bkey_merge(c, bkey_i_to_s(update), bkey_i_to_s_c(insert))) {
ret = bch2_btree_delete_at(trans, &iter, flags); ret = bch2_btree_delete_at(trans, &iter, flags);
if (ret) if (ret)
@ -1234,12 +1232,10 @@ nomerge1:
trans->extra_journal_res += compressed_sectors; trans->extra_journal_res += compressed_sectors;
if (front_split) { if (front_split) {
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); update = bch2_bkey_make_mut(trans, k);
if ((ret = PTR_ERR_OR_ZERO(update))) if ((ret = PTR_ERR_OR_ZERO(update)))
goto err; goto err;
bkey_reassemble(update, k);
bch2_cut_back(start, update); bch2_cut_back(start, update);
bch2_trans_iter_init(trans, &update_iter, btree_id, update->k.p, bch2_trans_iter_init(trans, &update_iter, btree_id, update->k.p,
@ -1258,12 +1254,10 @@ nomerge1:
if (k.k->p.snapshot != insert->k.p.snapshot && if (k.k->p.snapshot != insert->k.p.snapshot &&
(front_split || back_split)) { (front_split || back_split)) {
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); update = bch2_bkey_make_mut(trans, k);
if ((ret = PTR_ERR_OR_ZERO(update))) if ((ret = PTR_ERR_OR_ZERO(update)))
goto err; goto err;
bkey_reassemble(update, k);
bch2_cut_front(start, update); bch2_cut_front(start, update);
bch2_cut_back(insert->k.p, update); bch2_cut_back(insert->k.p, update);
@ -1307,11 +1301,10 @@ nomerge1:
} }
if (back_split) { if (back_split) {
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); update = bch2_bkey_make_mut(trans, k);
if ((ret = PTR_ERR_OR_ZERO(update))) if ((ret = PTR_ERR_OR_ZERO(update)))
goto err; goto err;
bkey_reassemble(update, k);
bch2_cut_front(insert->k.p, update); bch2_cut_front(insert->k.p, update);
ret = bch2_trans_update_by_path(trans, iter.path, update, ret = bch2_trans_update_by_path(trans, iter.path, update,
@ -1652,7 +1645,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
int ret = 0; int ret = 0;
bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT); bch2_trans_iter_init(trans, &iter, id, start, BTREE_ITER_INTENT);
while ((k = bch2_btree_iter_peek(&iter)).k) { while ((k = bch2_btree_iter_peek_upto(&iter, end)).k) {
struct disk_reservation disk_res = struct disk_reservation disk_res =
bch2_disk_reservation_init(trans->c, 0); bch2_disk_reservation_init(trans->c, 0);
struct bkey_i delete; struct bkey_i delete;
@ -1661,9 +1654,6 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
if (ret) if (ret)
goto err; goto err;
if (bkey_ge(iter.pos, end))
break;
bkey_init(&delete.k); bkey_init(&delete.k);
/* /*

View File

@ -486,8 +486,10 @@ int bch2_mark_alloc(struct btree_trans *trans,
{ {
bool gc = flags & BTREE_TRIGGER_GC; bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq; u64 journal_seq = trans->journal_res.seq;
u64 bucket_journal_seq;
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bch_alloc_v4 old_a, new_a; struct bch_alloc_v4 old_a_convert, new_a_convert;
const struct bch_alloc_v4 *old_a, *new_a;
struct bch_dev *ca; struct bch_dev *ca;
int ret = 0; int ret = 0;
@ -504,36 +506,38 @@ int bch2_mark_alloc(struct btree_trans *trans,
ca = bch_dev_bkey_exists(c, new.k->p.inode); ca = bch_dev_bkey_exists(c, new.k->p.inode);
bch2_alloc_to_v4(old, &old_a); old_a = bch2_alloc_to_v4(old, &old_a_convert);
bch2_alloc_to_v4(new, &new_a); new_a = bch2_alloc_to_v4(new, &new_a_convert);
bucket_journal_seq = new_a->journal_seq;
if ((flags & BTREE_TRIGGER_INSERT) && if ((flags & BTREE_TRIGGER_INSERT) &&
data_type_is_empty(old_a.data_type) != data_type_is_empty(old_a->data_type) !=
data_type_is_empty(new_a.data_type) && data_type_is_empty(new_a->data_type) &&
new.k->type == KEY_TYPE_alloc_v4) { new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v; struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
BUG_ON(!journal_seq); EBUG_ON(!journal_seq);
/* /*
* If the btree updates referring to a bucket weren't flushed * If the btree updates referring to a bucket weren't flushed
* before the bucket became empty again, then the we don't have * before the bucket became empty again, then the we don't have
* to wait on a journal flush before we can reuse the bucket: * to wait on a journal flush before we can reuse the bucket:
*/ */
new_a.journal_seq = data_type_is_empty(new_a.data_type) && v->journal_seq = bucket_journal_seq =
data_type_is_empty(new_a->data_type) &&
(journal_seq == v->journal_seq || (journal_seq == v->journal_seq ||
bch2_journal_noflush_seq(&c->journal, v->journal_seq)) bch2_journal_noflush_seq(&c->journal, v->journal_seq))
? 0 : journal_seq; ? 0 : journal_seq;
v->journal_seq = new_a.journal_seq;
} }
if (!data_type_is_empty(old_a.data_type) && if (!data_type_is_empty(old_a->data_type) &&
data_type_is_empty(new_a.data_type) && data_type_is_empty(new_a->data_type) &&
new_a.journal_seq) { bucket_journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk, c->journal.flushed_seq_ondisk,
new.k->p.inode, new.k->p.offset, new.k->p.inode, new.k->p.offset,
new_a.journal_seq); bucket_journal_seq);
if (ret) { if (ret) {
bch2_fs_fatal_error(c, bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret); "error setting bucket_needs_journal_commit: %i", ret);
@ -542,10 +546,10 @@ int bch2_mark_alloc(struct btree_trans *trans,
} }
percpu_down_read(&c->mark_lock); percpu_down_read(&c->mark_lock);
if (!gc && new_a.gen != old_a.gen) if (!gc && new_a->gen != old_a->gen)
*bucket_gen(ca, new.k->p.offset) = new_a.gen; *bucket_gen(ca, new.k->p.offset) = new_a->gen;
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc); bch2_dev_usage_update(c, ca, *old_a, *new_a, journal_seq, gc);
if (gc) { if (gc) {
struct bucket *g = gc_bucket(ca, new.k->p.offset); struct bucket *g = gc_bucket(ca, new.k->p.offset);
@ -553,12 +557,12 @@ int bch2_mark_alloc(struct btree_trans *trans,
bucket_lock(g); bucket_lock(g);
g->gen_valid = 1; g->gen_valid = 1;
g->gen = new_a.gen; g->gen = new_a->gen;
g->data_type = new_a.data_type; g->data_type = new_a->data_type;
g->stripe = new_a.stripe; g->stripe = new_a->stripe;
g->stripe_redundancy = new_a.stripe_redundancy; g->stripe_redundancy = new_a->stripe_redundancy;
g->dirty_sectors = new_a.dirty_sectors; g->dirty_sectors = new_a->dirty_sectors;
g->cached_sectors = new_a.cached_sectors; g->cached_sectors = new_a->cached_sectors;
bucket_unlock(g); bucket_unlock(g);
} }
@ -570,9 +574,9 @@ int bch2_mark_alloc(struct btree_trans *trans,
*/ */
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) && if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
old_a.cached_sectors) { old_a->cached_sectors) {
ret = update_cached_sectors(c, new, ca->dev_idx, ret = update_cached_sectors(c, new, ca->dev_idx,
-((s64) old_a.cached_sectors), -((s64) old_a->cached_sectors),
journal_seq, gc); journal_seq, gc);
if (ret) { if (ret) {
bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors", bch2_fs_fatal_error(c, "%s(): no replicas entry while updating cached sectors",
@ -581,20 +585,20 @@ int bch2_mark_alloc(struct btree_trans *trans,
} }
} }
if (new_a.data_type == BCH_DATA_free && if (new_a->data_type == BCH_DATA_free &&
(!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk)) (!new_a->journal_seq || new_a->journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait); closure_wake_up(&c->freelist_wait);
if (new_a.data_type == BCH_DATA_need_discard && if (new_a->data_type == BCH_DATA_need_discard &&
(!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk)) (!bucket_journal_seq || bucket_journal_seq < c->journal.flushed_seq_ondisk))
bch2_do_discards(c); bch2_do_discards(c);
if (old_a.data_type != BCH_DATA_cached && if (old_a->data_type != BCH_DATA_cached &&
new_a.data_type == BCH_DATA_cached && new_a->data_type == BCH_DATA_cached &&
should_invalidate_buckets(ca, bch2_dev_usage_read(ca))) should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
bch2_do_invalidates(c); bch2_do_invalidates(c);
if (new_a.data_type == BCH_DATA_need_gc_gens) if (new_a->data_type == BCH_DATA_need_gc_gens)
bch2_do_gc_gens(c); bch2_do_gc_gens(c);
return 0; return 0;
@ -1409,7 +1413,6 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
s64 sectors, enum bch_data_type data_type) s64 sectors, enum bch_data_type data_type)
{ {
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k;
struct bkey_i_stripe *s; struct bkey_i_stripe *s;
struct bch_replicas_padded r; struct bch_replicas_padded r;
int ret = 0; int ret = 0;
@ -1417,20 +1420,16 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx), bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, p.ec.idx),
BTREE_ITER_INTENT| BTREE_ITER_INTENT|
BTREE_ITER_WITH_UPDATES); BTREE_ITER_WITH_UPDATES);
k = bch2_btree_iter_peek_slot(&iter); s = bch2_bkey_get_mut_typed(trans, &iter, stripe);
ret = bkey_err(k); ret = PTR_ERR_OR_ZERO(s);
if (ret) if (unlikely(ret)) {
goto err; bch2_trans_inconsistent_on(ret == -ENOENT, trans,
if (k.k->type != KEY_TYPE_stripe) {
bch2_trans_inconsistent(trans,
"pointer to nonexistent stripe %llu", "pointer to nonexistent stripe %llu",
(u64) p.ec.idx); (u64) p.ec.idx);
ret = -EIO;
goto err; goto err;
} }
if (!bch2_ptr_matches_stripe(bkey_s_c_to_stripe(k).v, p)) { if (!bch2_ptr_matches_stripe(&s->v, p)) {
bch2_trans_inconsistent(trans, bch2_trans_inconsistent(trans,
"stripe pointer doesn't match stripe %llu", "stripe pointer doesn't match stripe %llu",
(u64) p.ec.idx); (u64) p.ec.idx);
@ -1438,12 +1437,6 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
goto err; goto err;
} }
s = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
ret = PTR_ERR_OR_ZERO(s);
if (ret)
goto err;
bkey_reassemble(&s->k_i, k);
stripe_blockcount_set(&s->v, p.ec.block, stripe_blockcount_set(&s->v, p.ec.block,
stripe_blockcount_get(&s->v, p.ec.block) + stripe_blockcount_get(&s->v, p.ec.block) +
sectors); sectors);
@ -1718,8 +1711,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_i *k;
struct bkey_i *n;
__le64 *refcount; __le64 *refcount;
int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1; int add = !(flags & BTREE_TRIGGER_OVERWRITE) ? 1 : -1;
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
@ -1728,19 +1720,12 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx), bch2_trans_iter_init(trans, &iter, BTREE_ID_reflink, POS(0, *idx),
BTREE_ITER_INTENT| BTREE_ITER_INTENT|
BTREE_ITER_WITH_UPDATES); BTREE_ITER_WITH_UPDATES);
k = bch2_btree_iter_peek_slot(&iter); k = bch2_bkey_get_mut(trans, &iter);
ret = bkey_err(k); ret = PTR_ERR_OR_ZERO(k);
if (ret) if (ret)
goto err; goto err;
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); refcount = bkey_refcount(k);
ret = PTR_ERR_OR_ZERO(n);
if (ret)
goto err;
bkey_reassemble(n, k);
refcount = bkey_refcount(n);
if (!refcount) { if (!refcount) {
bch2_bkey_val_to_text(&buf, c, p.s_c); bch2_bkey_val_to_text(&buf, c, p.s_c);
bch2_trans_inconsistent(trans, bch2_trans_inconsistent(trans,
@ -1764,12 +1749,12 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
u64 pad; u64 pad;
pad = max_t(s64, le32_to_cpu(v->front_pad), pad = max_t(s64, le32_to_cpu(v->front_pad),
le64_to_cpu(v->idx) - bkey_start_offset(k.k)); le64_to_cpu(v->idx) - bkey_start_offset(&k->k));
BUG_ON(pad > U32_MAX); BUG_ON(pad > U32_MAX);
v->front_pad = cpu_to_le32(pad); v->front_pad = cpu_to_le32(pad);
pad = max_t(s64, le32_to_cpu(v->back_pad), pad = max_t(s64, le32_to_cpu(v->back_pad),
k.k->p.offset - p.k->size - le64_to_cpu(v->idx)); k->k.p.offset - p.k->size - le64_to_cpu(v->idx));
BUG_ON(pad > U32_MAX); BUG_ON(pad > U32_MAX);
v->back_pad = cpu_to_le32(pad); v->back_pad = cpu_to_le32(pad);
} }
@ -1777,11 +1762,11 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
le64_add_cpu(refcount, add); le64_add_cpu(refcount, add);
bch2_btree_iter_set_pos_to_extent_start(&iter); bch2_btree_iter_set_pos_to_extent_start(&iter);
ret = bch2_trans_update(trans, &iter, n, 0); ret = bch2_trans_update(trans, &iter, k, 0);
if (ret) if (ret)
goto err; goto err;
*idx = k.k->p.offset; *idx = k->k.p.offset;
err: err:
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf); printbuf_exit(&buf);

View File

@ -684,7 +684,7 @@ static int ec_stripe_delete(struct bch_fs *c, size_t idx)
{ {
return bch2_btree_delete_range(c, BTREE_ID_stripes, return bch2_btree_delete_range(c, BTREE_ID_stripes,
POS(0, idx), POS(0, idx),
POS(0, idx + 1), POS(0, idx),
0, NULL); 0, NULL);
} }
@ -841,13 +841,11 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
dev = s->key.v.ptrs[block].dev; dev = s->key.v.ptrs[block].dev;
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); n = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(n); ret = PTR_ERR_OR_ZERO(n);
if (ret) if (ret)
return ret; return ret;
bkey_reassemble(n, k);
bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev); bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
ec_ptr = (void *) bch2_bkey_has_device(bkey_i_to_s_c(n), dev); ec_ptr = (void *) bch2_bkey_has_device(bkey_i_to_s_c(n), dev);
BUG_ON(!ec_ptr); BUG_ON(!ec_ptr);

View File

@ -128,12 +128,9 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
bch2_trans_copy_iter(&copy, iter); bch2_trans_copy_iter(&copy, iter);
for_each_btree_key_continue_norestart(copy, 0, k, ret) { for_each_btree_key_upto_continue_norestart(copy, insert->k.p, 0, k, ret) {
unsigned offset = 0; unsigned offset = 0;
if (bkey_ge(bkey_start_pos(k.k), *end))
break;
if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k))) if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
offset = bkey_start_offset(&insert->k) - offset = bkey_start_offset(&insert->k) -
bkey_start_offset(k.k); bkey_start_offset(k.k);

View File

@ -2607,15 +2607,11 @@ retry:
if (ret) if (ret)
goto err; goto err;
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) { for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents, start, end, 0, k, ret)
if (bkey_ge(bkey_start_pos(k.k), end))
break;
if (bkey_extent_is_data(k.k)) { if (bkey_extent_is_data(k.k)) {
ret = 1; ret = 1;
break; break;
} }
}
start = iter.pos; start = iter.pos;
bch2_trans_iter_exit(&trans, &iter); bch2_trans_iter_exit(&trans, &iter);
err: err:
@ -2655,8 +2651,8 @@ static int __bch2_truncate_page(struct bch_inode_info *inode,
* page * page
*/ */
ret = range_has_data(c, inode->ei_subvol, ret = range_has_data(c, inode->ei_subvol,
POS(inode->v.i_ino, index << PAGE_SECTORS_SHIFT), POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT)),
POS(inode->v.i_ino, (index + 1) << PAGE_SECTORS_SHIFT)); POS(inode->v.i_ino, (index << PAGE_SECTORS_SHIFT) + PAGE_SECTORS));
if (ret <= 0) if (ret <= 0)
return ret; return ret;
@ -3038,7 +3034,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
k = insert k = insert
? bch2_btree_iter_peek_prev(&src) ? bch2_btree_iter_peek_prev(&src)
: bch2_btree_iter_peek(&src); : bch2_btree_iter_peek_upto(&src, POS(inode->v.i_ino, U64_MAX));
if ((ret = bkey_err(k))) if ((ret = bkey_err(k)))
continue; continue;
@ -3313,6 +3309,10 @@ err:
return bch2_err_class(ret); return bch2_err_class(ret);
} }
/*
* Take a quota reservation for unallocated blocks in a given file range
* Does not check pagecache
*/
static int quota_reserve_range(struct bch_inode_info *inode, static int quota_reserve_range(struct bch_inode_info *inode,
struct quota_res *res, struct quota_res *res,
u64 start, u64 end) u64 start, u64 end)
@ -3528,11 +3528,11 @@ retry:
if (ret) if (ret)
goto err; goto err;
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, for_each_btree_key_upto_norestart(&trans, iter, BTREE_ID_extents,
SPOS(inode->v.i_ino, offset >> 9, snapshot), 0, k, ret) { SPOS(inode->v.i_ino, offset >> 9, snapshot),
if (k.k->p.inode != inode->v.i_ino) { POS(inode->v.i_ino, U64_MAX),
break; 0, k, ret) {
} else if (bkey_extent_is_data(k.k)) { if (bkey_extent_is_data(k.k)) {
next_data = max(offset, bkey_start_offset(k.k) << 9); next_data = max(offset, bkey_start_offset(k.k) << 9);
break; break;
} else if (k.k->p.offset >> 9 > isize) } else if (k.k->p.offset >> 9 > isize)

View File

@ -779,12 +779,10 @@ static int hash_redo_key(struct btree_trans *trans,
if (IS_ERR(delete)) if (IS_ERR(delete))
return PTR_ERR(delete); return PTR_ERR(delete);
tmp = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); tmp = bch2_bkey_make_mut(trans, k);
if (IS_ERR(tmp)) if (IS_ERR(tmp))
return PTR_ERR(tmp); return PTR_ERR(tmp);
bkey_reassemble(tmp, k);
bkey_init(&delete->k); bkey_init(&delete->k);
delete->k.p = k_iter->pos; delete->k.p = k_iter->pos;
return bch2_btree_iter_traverse(k_iter) ?: return bch2_btree_iter_traverse(k_iter) ?:

View File

@ -364,22 +364,25 @@ int bch2_inode_write(struct btree_trans *trans,
return bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0); return bch2_trans_update(trans, iter, &inode_p->inode.k_i, 0);
} }
struct bkey_s_c bch2_inode_to_v3(struct btree_trans *trans, struct bkey_s_c k) struct bkey_i *bch2_inode_to_v3(struct btree_trans *trans, struct bkey_i *k)
{ {
struct bch_inode_unpacked u; struct bch_inode_unpacked u;
struct bkey_inode_buf *inode_p; struct bkey_inode_buf *inode_p;
int ret; int ret;
if (!bkey_is_inode(&k->k))
return ERR_PTR(-ENOENT);
inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p)); inode_p = bch2_trans_kmalloc(trans, sizeof(*inode_p));
if (IS_ERR(inode_p)) if (IS_ERR(inode_p))
return bkey_s_c_err(PTR_ERR(inode_p)); return ERR_CAST(inode_p);
ret = bch2_inode_unpack(k, &u); ret = bch2_inode_unpack(bkey_i_to_s_c(k), &u);
if (ret) if (ret)
return bkey_s_c_err(ret); return ERR_PTR(ret);
bch2_inode_pack(inode_p, &u); bch2_inode_pack(inode_p, &u);
return bkey_i_to_s_c(&inode_p->inode.k_i); return &inode_p->inode.k_i;
} }
static int __bch2_inode_invalid(struct bkey_s_c k, struct printbuf *err) static int __bch2_inode_invalid(struct bkey_s_c k, struct printbuf *err)

View File

@ -82,7 +82,7 @@ struct bkey_inode_buf {
void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *); void bch2_inode_pack(struct bkey_inode_buf *, const struct bch_inode_unpacked *);
int bch2_inode_unpack(struct bkey_s_c, struct bch_inode_unpacked *); int bch2_inode_unpack(struct bkey_s_c, struct bch_inode_unpacked *);
struct bkey_s_c bch2_inode_to_v3(struct btree_trans *, struct bkey_s_c); struct bkey_i *bch2_inode_to_v3(struct btree_trans *, struct bkey_i *);
void bch2_inode_unpacked_to_text(struct printbuf *, struct bch_inode_unpacked *); void bch2_inode_unpacked_to_text(struct printbuf *, struct bch_inode_unpacked *);

View File

@ -239,9 +239,8 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
s64 i_sectors_delta) s64 i_sectors_delta)
{ {
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c inode_k; struct bkey_i *k;
struct bkey_s_c_inode_v3 inode; struct bkey_i_inode_v3 *inode;
struct bkey_i_inode_v3 *new_inode;
int ret; int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
@ -249,40 +248,29 @@ static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
extent_iter->pos.inode, extent_iter->pos.inode,
extent_iter->snapshot), extent_iter->snapshot),
BTREE_ITER_INTENT|BTREE_ITER_CACHED); BTREE_ITER_INTENT|BTREE_ITER_CACHED);
inode_k = bch2_btree_iter_peek_slot(&iter); k = bch2_bkey_get_mut(trans, &iter);
ret = bkey_err(inode_k); ret = PTR_ERR_OR_ZERO(k);
if (unlikely(ret)) if (unlikely(ret))
goto err; goto err;
ret = bkey_is_inode(inode_k.k) ? 0 : -ENOENT; if (unlikely(k->k.type != KEY_TYPE_inode_v3)) {
if (unlikely(ret)) k = bch2_inode_to_v3(trans, k);
goto err; ret = PTR_ERR_OR_ZERO(k);
if (unlikely(inode_k.k->type != KEY_TYPE_inode_v3)) {
inode_k = bch2_inode_to_v3(trans, inode_k);
ret = bkey_err(inode_k);
if (unlikely(ret)) if (unlikely(ret))
goto err; goto err;
} }
inode = bkey_s_c_to_inode_v3(inode_k); inode = bkey_i_to_inode_v3(k);
new_inode = bch2_trans_kmalloc(trans, bkey_bytes(inode_k.k)); if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_I_SIZE_DIRTY) &&
ret = PTR_ERR_OR_ZERO(new_inode); new_i_size > le64_to_cpu(inode->v.bi_size))
if (unlikely(ret)) inode->v.bi_size = cpu_to_le64(new_i_size);
goto err;
bkey_reassemble(&new_inode->k_i, inode.s_c); le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta);
if (!(le64_to_cpu(inode.v->bi_flags) & BCH_INODE_I_SIZE_DIRTY) && inode->k.p.snapshot = iter.snapshot;
new_i_size > le64_to_cpu(inode.v->bi_size))
new_inode->v.bi_size = cpu_to_le64(new_i_size);
le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta); ret = bch2_trans_update(trans, &iter, &inode->k_i,
new_inode->k.p.snapshot = iter.snapshot;
ret = bch2_trans_update(trans, &iter, &new_inode->k_i,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE); BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
err: err:
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
@ -513,16 +501,19 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
bch2_btree_iter_set_snapshot(iter, snapshot); bch2_btree_iter_set_snapshot(iter, snapshot);
k = bch2_btree_iter_peek(iter); /*
if (bkey_ge(iter->pos, end_pos)) { * peek_upto() doesn't have ideal semantics for extents:
bch2_btree_iter_set_pos(iter, end_pos); */
k = bch2_btree_iter_peek_upto(iter, end_pos);
if (!k.k)
break; break;
}
ret = bkey_err(k); ret = bkey_err(k);
if (ret) if (ret)
continue; continue;
BUG_ON(bkey_ge(iter->pos, end_pos));
bkey_init(&delete.k); bkey_init(&delete.k);
delete.k.p = iter->pos; delete.k.p = iter->pos;
@ -535,6 +526,8 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
bch2_disk_reservation_put(c, &disk_res); bch2_disk_reservation_put(c, &disk_res);
} }
BUG_ON(bkey_gt(iter->pos, end_pos));
return ret ?: ret2; return ret ?: ret2;
} }
@ -1323,13 +1316,11 @@ static int bch2_nocow_write_convert_one_unwritten(struct btree_trans *trans,
return 0; return 0;
} }
new = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); new = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(new); ret = PTR_ERR_OR_ZERO(new);
if (ret) if (ret)
return ret; return ret;
bkey_reassemble(new, k);
bch2_cut_front(bkey_start_pos(&orig->k), new); bch2_cut_front(bkey_start_pos(&orig->k), new);
bch2_cut_back(orig->k.p, new); bch2_cut_back(orig->k.p, new);
@ -1362,12 +1353,11 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for_each_keylist_key(&op->insert_keys, orig) { for_each_keylist_key(&op->insert_keys, orig) {
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_extents, ret = for_each_btree_key_upto_commit(&trans, iter, BTREE_ID_extents,
bkey_start_pos(&orig->k), bkey_start_pos(&orig->k), orig->k.p,
BTREE_ITER_INTENT, k, BTREE_ITER_INTENT, k,
NULL, NULL, BTREE_INSERT_NOFAIL, ({ NULL, NULL, BTREE_INSERT_NOFAIL, ({
if (bkey_ge(bkey_start_pos(k.k), orig->k.p)) BUG_ON(bkey_ge(bkey_start_pos(k.k), orig->k.p));
break;
bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size); bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size);
})); }));

View File

@ -101,14 +101,12 @@ int bch2_lru_set(struct btree_trans *trans, u64 lru_id, u64 idx, u64 *time)
BUG_ON(iter.pos.inode != lru_id); BUG_ON(iter.pos.inode != lru_id);
*time = iter.pos.offset; *time = iter.pos.offset;
lru = bch2_trans_kmalloc(trans, sizeof(*lru)); lru = bch2_bkey_alloc(trans, &iter, lru);
ret = PTR_ERR_OR_ZERO(lru); ret = PTR_ERR_OR_ZERO(lru);
if (ret) if (ret)
goto err; goto err;
bkey_lru_init(&lru->k_i); lru->v.idx = cpu_to_le64(idx);
lru->k.p = iter.pos;
lru->v.idx = cpu_to_le64(idx);
ret = bch2_trans_update(trans, &iter, &lru->k_i, 0); ret = bch2_trans_update(trans, &iter, &lru->k_i, 0);
if (ret) if (ret)
@ -164,17 +162,7 @@ static int bch2_check_lru_key(struct btree_trans *trans,
" for %s", " for %s",
(bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf), (bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf),
(bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) { (bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
struct bkey_i *update = ret = bch2_btree_delete_at(trans, lru_iter, 0);
bch2_trans_kmalloc(trans, sizeof(*update));
ret = PTR_ERR_OR_ZERO(update);
if (ret)
goto err;
bkey_init(&update->k);
update->k.p = lru_iter->pos;
ret = bch2_trans_update(trans, lru_iter, update, 0);
if (ret) if (ret)
goto err; goto err;
} }

View File

@ -49,13 +49,11 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
if (!bch2_bkey_has_device(k, dev_idx)) if (!bch2_bkey_has_device(k, dev_idx))
return 0; return 0;
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); n = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(n); ret = PTR_ERR_OR_ZERO(n);
if (ret) if (ret)
return ret; return ret;
bkey_reassemble(n, k);
ret = drop_dev_ptrs(c, bkey_i_to_s(n), dev_idx, flags, false); ret = drop_dev_ptrs(c, bkey_i_to_s(n), dev_idx, flags, false);
if (ret) if (ret)
return ret; return ret;

View File

@ -198,13 +198,11 @@ static int bch2_extent_drop_ptrs(struct btree_trans *trans,
struct bkey_i *n; struct bkey_i *n;
int ret; int ret;
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k)); n = bch2_bkey_make_mut(trans, k);
ret = PTR_ERR_OR_ZERO(n); ret = PTR_ERR_OR_ZERO(n);
if (ret) if (ret)
return ret; return ret;
bkey_reassemble(n, k);
while (data_opts.kill_ptrs) { while (data_opts.kill_ptrs) {
unsigned i = 0, drop = __fls(data_opts.kill_ptrs); unsigned i = 0, drop = __fls(data_opts.kill_ptrs);
struct bch_extent_ptr *ptr; struct bch_extent_ptr *ptr;

View File

@ -44,7 +44,6 @@ static int find_buckets_to_copygc(struct bch_fs *c)
struct btree_trans trans; struct btree_trans trans;
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
struct bch_alloc_v4 a;
int ret; int ret;
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
@ -61,22 +60,24 @@ static int find_buckets_to_copygc(struct bch_fs *c)
BTREE_ITER_PREFETCH, k, ret) { BTREE_ITER_PREFETCH, k, ret) {
struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode); struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
struct copygc_heap_entry e; struct copygc_heap_entry e;
struct bch_alloc_v4 a_convert;
const struct bch_alloc_v4 *a;
bch2_alloc_to_v4(k, &a); a = bch2_alloc_to_v4(k, &a_convert);
if ((a.data_type != BCH_DATA_btree && if ((a->data_type != BCH_DATA_btree &&
a.data_type != BCH_DATA_user) || a->data_type != BCH_DATA_user) ||
a.dirty_sectors >= ca->mi.bucket_size || a->dirty_sectors >= ca->mi.bucket_size ||
bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset)) bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
continue; continue;
e = (struct copygc_heap_entry) { e = (struct copygc_heap_entry) {
.dev = iter.pos.inode, .dev = iter.pos.inode,
.gen = a.gen, .gen = a->gen,
.replicas = 1 + a.stripe_redundancy, .replicas = 1 + a->stripe_redundancy,
.fragmentation = div_u64((u64) a.dirty_sectors * (1ULL << 31), .fragmentation = div_u64((u64) a->dirty_sectors * (1ULL << 31),
ca->mi.bucket_size), ca->mi.bucket_size),
.sectors = a.dirty_sectors, .sectors = a->dirty_sectors,
.bucket = iter.pos.offset, .bucket = iter.pos.offset,
}; };
heap_add_or_replace(h, e, -fragmentation_cmp, NULL); heap_add_or_replace(h, e, -fragmentation_cmp, NULL);

View File

@ -271,8 +271,9 @@ int bch2_opt_parse(struct bch_fs *c,
case BCH_OPT_BOOL: case BCH_OPT_BOOL:
ret = kstrtou64(val, 10, res); ret = kstrtou64(val, 10, res);
if (ret < 0 || (*res != 0 && *res != 1)) { if (ret < 0 || (*res != 0 && *res != 1)) {
prt_printf(err, "%s: must be bool", if (err)
opt->attr.name); prt_printf(err, "%s: must be bool",
opt->attr.name);
return ret; return ret;
} }
break; break;

View File

@ -709,7 +709,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
ret = bch2_btree_delete_range(c, BTREE_ID_quotas, ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_USR, 0), POS(QTYP_USR, 0),
POS(QTYP_USR + 1, 0), POS(QTYP_USR, U64_MAX),
0, NULL); 0, NULL);
if (ret) if (ret)
return ret; return ret;
@ -721,7 +721,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
ret = bch2_btree_delete_range(c, BTREE_ID_quotas, ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_GRP, 0), POS(QTYP_GRP, 0),
POS(QTYP_GRP + 1, 0), POS(QTYP_GRP, U64_MAX),
0, NULL); 0, NULL);
if (ret) if (ret)
return ret; return ret;
@ -733,7 +733,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
ret = bch2_btree_delete_range(c, BTREE_ID_quotas, ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_PRJ, 0), POS(QTYP_PRJ, 0),
POS(QTYP_PRJ + 1, 0), POS(QTYP_PRJ, U64_MAX),
0, NULL); 0, NULL);
if (ret) if (ret)
return ret; return ret;

View File

@ -251,10 +251,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
struct bkey_s_c k; struct bkey_s_c k;
int ret; int ret;
for_each_btree_key_continue_norestart(*iter, 0, k, ret) { for_each_btree_key_upto_continue_norestart(*iter, end, 0, k, ret) {
if (bkey_ge(iter->pos, end))
break;
if (bkey_extent_is_unwritten(k)) if (bkey_extent_is_unwritten(k))
continue; continue;

View File

@ -377,33 +377,22 @@ int bch2_fs_snapshots_start(struct bch_fs *c)
static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id) static int bch2_snapshot_node_set_deleted(struct btree_trans *trans, u32 id)
{ {
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k;
struct bkey_i_snapshot *s; struct bkey_i_snapshot *s;
int ret = 0; int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id), bch2_trans_iter_init(trans, &iter, BTREE_ID_snapshots, POS(0, id),
BTREE_ITER_INTENT); BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter); s = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
ret = bkey_err(k); ret = PTR_ERR_OR_ZERO(s);
if (ret) if (unlikely(ret)) {
goto err; bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", id);
if (k.k->type != KEY_TYPE_snapshot) {
bch2_fs_inconsistent(trans->c, "missing snapshot %u", id);
ret = -ENOENT;
goto err; goto err;
} }
/* already deleted? */ /* already deleted? */
if (BCH_SNAPSHOT_DELETED(bkey_s_c_to_snapshot(k).v)) if (BCH_SNAPSHOT_DELETED(&s->v))
goto err; goto err;
s = bch2_trans_kmalloc(trans, sizeof(*s));
ret = PTR_ERR_OR_ZERO(s);
if (ret)
goto err;
bkey_reassemble(&s->k_i, k);
SET_BCH_SNAPSHOT_DELETED(&s->v, true); SET_BCH_SNAPSHOT_DELETED(&s->v, true);
SET_BCH_SNAPSHOT_SUBVOL(&s->v, false); SET_BCH_SNAPSHOT_SUBVOL(&s->v, false);
s->v.subvol = 0; s->v.subvol = 0;
@ -421,7 +410,6 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
struct btree_iter iter, p_iter = (struct btree_iter) { NULL }; struct btree_iter iter, p_iter = (struct btree_iter) { NULL };
struct bkey_s_c k; struct bkey_s_c k;
struct bkey_s_c_snapshot s; struct bkey_s_c_snapshot s;
struct bkey_i_snapshot *parent;
u32 parent_id; u32 parent_id;
unsigned i; unsigned i;
int ret = 0; int ret = 0;
@ -445,27 +433,18 @@ static int bch2_snapshot_node_delete(struct btree_trans *trans, u32 id)
parent_id = le32_to_cpu(s.v->parent); parent_id = le32_to_cpu(s.v->parent);
if (parent_id) { if (parent_id) {
struct bkey_i_snapshot *parent;
bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots, bch2_trans_iter_init(trans, &p_iter, BTREE_ID_snapshots,
POS(0, parent_id), POS(0, parent_id),
BTREE_ITER_INTENT); BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&p_iter); parent = bch2_bkey_get_mut_typed(trans, &p_iter, snapshot);
ret = bkey_err(k); ret = PTR_ERR_OR_ZERO(parent);
if (ret) if (unlikely(ret)) {
goto err; bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing snapshot %u", parent_id);
if (k.k->type != KEY_TYPE_snapshot) {
bch2_fs_inconsistent(trans->c, "missing snapshot %u", parent_id);
ret = -ENOENT;
goto err; goto err;
} }
parent = bch2_trans_kmalloc(trans, sizeof(*parent));
ret = PTR_ERR_OR_ZERO(parent);
if (ret)
goto err;
bkey_reassemble(&parent->k_i, k);
for (i = 0; i < 2; i++) for (i = 0; i < 2; i++)
if (le32_to_cpu(parent->v.children[i]) == id) if (le32_to_cpu(parent->v.children[i]) == id)
break; break;
@ -522,13 +501,11 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
goto err; goto err;
} }
n = bch2_trans_kmalloc(trans, sizeof(*n)); n = bch2_bkey_alloc(trans, &iter, snapshot);
ret = PTR_ERR_OR_ZERO(n); ret = PTR_ERR_OR_ZERO(n);
if (ret) if (ret)
goto err; goto err;
bkey_snapshot_init(&n->k_i);
n->k.p = iter.pos;
n->v.flags = 0; n->v.flags = 0;
n->v.parent = cpu_to_le32(parent); n->v.parent = cpu_to_le32(parent);
n->v.subvol = cpu_to_le32(snapshot_subvols[i]); n->v.subvol = cpu_to_le32(snapshot_subvols[i]);
@ -545,24 +522,14 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
if (parent) { if (parent) {
bch2_btree_iter_set_pos(&iter, POS(0, parent)); bch2_btree_iter_set_pos(&iter, POS(0, parent));
k = bch2_btree_iter_peek(&iter); n = bch2_bkey_get_mut_typed(trans, &iter, snapshot);
ret = bkey_err(k); ret = PTR_ERR_OR_ZERO(n);
if (ret) if (unlikely(ret)) {
goto err; if (ret == -ENOENT)
bch_err(trans->c, "snapshot %u not found", parent);
if (k.k->type != KEY_TYPE_snapshot) {
bch_err(trans->c, "snapshot %u not found", parent);
ret = -ENOENT;
goto err; goto err;
} }
n = bch2_trans_kmalloc(trans, sizeof(*n));
ret = PTR_ERR_OR_ZERO(n);
if (ret)
goto err;
bkey_reassemble(&n->k_i, k);
if (n->v.children[0] || n->v.children[1]) { if (n->v.children[0] || n->v.children[1]) {
bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children"); bch_err(trans->c, "Trying to add child snapshot nodes to parent that already has children");
ret = -EINVAL; ret = -EINVAL;
@ -967,7 +934,6 @@ int bch2_subvolume_wait_for_pagecache_and_delete_hook(struct btree_trans *trans,
int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid) int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
{ {
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k;
struct bkey_i_subvolume *n; struct bkey_i_subvolume *n;
struct subvolume_unlink_hook *h; struct subvolume_unlink_hook *h;
int ret = 0; int ret = 0;
@ -976,23 +942,13 @@ int bch2_subvolume_unlink(struct btree_trans *trans, u32 subvolid)
POS(0, subvolid), POS(0, subvolid),
BTREE_ITER_CACHED| BTREE_ITER_CACHED|
BTREE_ITER_INTENT); BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter); n = bch2_bkey_get_mut_typed(trans, &iter, subvolume);
ret = bkey_err(k); ret = PTR_ERR_OR_ZERO(n);
if (ret) if (unlikely(ret)) {
goto err; bch2_fs_inconsistent_on(ret == -ENOENT, trans->c, "missing subvolume %u", subvolid);
if (k.k->type != KEY_TYPE_subvolume) {
bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvolid);
ret = -EIO;
goto err; goto err;
} }
n = bch2_trans_kmalloc(trans, sizeof(*n));
ret = PTR_ERR_OR_ZERO(n);
if (ret)
goto err;
bkey_reassemble(&n->k_i, k);
SET_BCH_SUBVOLUME_UNLINKED(&n->v, true); SET_BCH_SUBVOLUME_UNLINKED(&n->v, true);
ret = bch2_trans_update(trans, &iter, &n->k_i, 0); ret = bch2_trans_update(trans, &iter, &n->k_i, 0);
@ -1049,27 +1005,19 @@ found_slot:
if (src_subvolid) { if (src_subvolid) {
/* Creating a snapshot: */ /* Creating a snapshot: */
src_subvol = bch2_trans_kmalloc(trans, sizeof(*src_subvol));
ret = PTR_ERR_OR_ZERO(src_subvol);
if (ret)
goto err;
bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes, bch2_trans_iter_init(trans, &src_iter, BTREE_ID_subvolumes,
POS(0, src_subvolid), POS(0, src_subvolid),
BTREE_ITER_CACHED| BTREE_ITER_CACHED|
BTREE_ITER_INTENT); BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&src_iter); src_subvol = bch2_bkey_get_mut_typed(trans, &src_iter, subvolume);
ret = bkey_err(k); ret = PTR_ERR_OR_ZERO(src_subvol);
if (ret) if (unlikely(ret)) {
goto err; bch2_fs_inconsistent_on(ret == -ENOENT, trans->c,
"subvolume %u not found", src_subvolid);
if (k.k->type != KEY_TYPE_subvolume) {
bch_err(c, "subvolume %u not found", src_subvolid);
ret = -ENOENT;
goto err; goto err;
} }
bkey_reassemble(&src_subvol->k_i, k);
parent = le32_to_cpu(src_subvol->v.snapshot); parent = le32_to_cpu(src_subvol->v.snapshot);
} }
@ -1086,18 +1034,16 @@ found_slot:
goto err; goto err;
} }
new_subvol = bch2_trans_kmalloc(trans, sizeof(*new_subvol)); new_subvol = bch2_bkey_alloc(trans, &dst_iter, subvolume);
ret = PTR_ERR_OR_ZERO(new_subvol); ret = PTR_ERR_OR_ZERO(new_subvol);
if (ret) if (ret)
goto err; goto err;
bkey_subvolume_init(&new_subvol->k_i);
new_subvol->v.flags = 0; new_subvol->v.flags = 0;
new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]); new_subvol->v.snapshot = cpu_to_le32(new_nodes[0]);
new_subvol->v.inode = cpu_to_le64(inode); new_subvol->v.inode = cpu_to_le64(inode);
SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro); SET_BCH_SUBVOLUME_RO(&new_subvol->v, ro);
SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0); SET_BCH_SUBVOLUME_SNAP(&new_subvol->v, src_subvolid != 0);
new_subvol->k.p = dst_iter.pos;
ret = bch2_trans_update(trans, &dst_iter, &new_subvol->k_i, 0); ret = bch2_trans_update(trans, &dst_iter, &new_subvol->k_i, 0);
if (ret) if (ret)
goto err; goto err;

View File

@ -15,13 +15,14 @@ static void delete_test_keys(struct bch_fs *c)
int ret; int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_extents, ret = bch2_btree_delete_range(c, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), SPOS_MAX, SPOS(0, 0, U32_MAX),
0, POS(0, U64_MAX),
NULL); 0, NULL);
BUG_ON(ret); BUG_ON(ret);
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs, ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), SPOS_MAX, SPOS(0, 0, U32_MAX),
POS(0, U64_MAX),
0, NULL); 0, NULL);
BUG_ON(ret); BUG_ON(ret);
} }
@ -145,8 +146,9 @@ static int test_iterate(struct bch_fs *c, u64 nr)
i = 0; i = 0;
ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs, ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0, k, ({ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
0, k, ({
BUG_ON(k.k->p.offset != i++); BUG_ON(k.k->p.offset != i++);
0; 0;
})); }));
@ -211,8 +213,9 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
i = 0; i = 0;
ret = for_each_btree_key2(&trans, iter, BTREE_ID_extents, ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), 0, k, ({ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
0, k, ({
BUG_ON(bkey_start_offset(k.k) != i); BUG_ON(bkey_start_offset(k.k) != i);
i = k.k->p.offset; i = k.k->p.offset;
0; 0;
@ -278,8 +281,9 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
i = 0; i = 0;
ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs, ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0, k, ({ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
0, k, ({
BUG_ON(k.k->p.offset != i); BUG_ON(k.k->p.offset != i);
i += 2; i += 2;
0; 0;
@ -295,8 +299,8 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
i = 0; i = 0;
ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs, ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
BTREE_ITER_SLOTS, k, ({ BTREE_ITER_SLOTS, k, ({
if (i >= nr * 2) if (i >= nr * 2)
break; break;
@ -351,8 +355,9 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
i = 0; i = 0;
ret = for_each_btree_key2(&trans, iter, BTREE_ID_extents, ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), 0, k, ({ SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
0, k, ({
BUG_ON(bkey_start_offset(k.k) != i + 8); BUG_ON(bkey_start_offset(k.k) != i + 8);
BUG_ON(k.k->size != 8); BUG_ON(k.k->size != 8);
i += 16; i += 16;
@ -369,8 +374,8 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
i = 0; i = 0;
ret = for_each_btree_key2(&trans, iter, BTREE_ID_extents, ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
BTREE_ITER_SLOTS, k, ({ BTREE_ITER_SLOTS, k, ({
if (i == nr) if (i == nr)
break; break;
@ -405,10 +410,10 @@ static int test_peek_end(struct bch_fs *c, u64 nr)
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0); SPOS(0, 0, U32_MAX), 0);
lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter))); lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k); BUG_ON(k.k);
lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter))); lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k); BUG_ON(k.k);
bch2_trans_iter_exit(&trans, &iter); bch2_trans_iter_exit(&trans, &iter);
@ -426,10 +431,10 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
SPOS(0, 0, U32_MAX), 0); SPOS(0, 0, U32_MAX), 0);
lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter))); lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k); BUG_ON(k.k);
lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter))); lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k); BUG_ON(k.k);
bch2_trans_iter_exit(&trans, &iter); bch2_trans_iter_exit(&trans, &iter);
@ -519,7 +524,7 @@ static int test_snapshot_filter(struct bch_fs *c, u32 snapid_lo, u32 snapid_hi)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
SPOS(0, 0, snapid_lo), 0); SPOS(0, 0, snapid_lo), 0);
lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter))); lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek_upto(&iter, POS(0, U64_MAX))));
BUG_ON(k.k->p.snapshot != U32_MAX); BUG_ON(k.k->p.snapshot != U32_MAX);
@ -798,8 +803,9 @@ static int seq_lookup(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
ret = for_each_btree_key2(&trans, iter, BTREE_ID_xattrs, ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), 0, k, SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
0, k,
0); 0);
if (ret) if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret)); bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
@ -839,7 +845,8 @@ static int seq_delete(struct bch_fs *c, u64 nr)
int ret; int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs, ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
SPOS(0, 0, U32_MAX), SPOS_MAX, SPOS(0, 0, U32_MAX),
POS(0, U64_MAX),
0, NULL); 0, NULL);
if (ret) if (ret)
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret)); bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));