Update bcachefs sources to 5e392aed7a bcachefs: Kill bch2_alloc_write()

This commit is contained in:
Kent Overstreet 2022-03-31 16:57:08 -04:00
parent cc1b64e992
commit 70f2681838
20 changed files with 577 additions and 465 deletions

View File

@ -1 +1 @@
f638850417c9042dab40511e3c3ed0b1be355301
5e392aed7aa06fd3476d1e5da1248f4d47867fc4

View File

@ -756,14 +756,6 @@ DEFINE_EVENT(transaction_restart_iter, trans_restart_btree_node_split,
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_mark,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,
enum btree_id btree_id,
struct bpos *pos),
TP_ARGS(trans_fn, caller_ip, btree_id, pos)
);
DEFINE_EVENT(transaction_restart_iter, trans_restart_upgrade,
TP_PROTO(const char *trans_fn,
unsigned long caller_ip,

View File

@ -27,6 +27,8 @@
#include <linux/sort.h>
#include <trace/events/bcachefs.h>
/* Persistent alloc info: */
static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
#define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
BCH_ALLOC_FIELDS_V1()
@ -42,7 +44,19 @@ const char * const bch2_bucket_states[] = {
NULL
};
/* Persistent alloc info: */
struct bkey_alloc_unpacked {
u64 journal_seq;
u64 bucket;
u8 dev;
u8 gen;
u8 oldest_gen;
u8 data_type;
bool need_discard:1;
bool need_inc_gen:1;
#define x(_name, _bits) u##_bits _name;
BCH_ALLOC_FIELDS_V2()
#undef x
};
static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
const void **p, unsigned field)
@ -164,8 +178,8 @@ static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
out->gen = a.v->gen;
out->oldest_gen = a.v->oldest_gen;
out->data_type = a.v->data_type;
out->need_discard = BCH_ALLOC_NEED_DISCARD(a.v);
out->need_inc_gen = BCH_ALLOC_NEED_INC_GEN(a.v);
out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
out->journal_seq = le64_to_cpu(a.v->journal_seq);
#define x(_name, _bits) \
@ -187,49 +201,7 @@ static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
return 0;
}
static void bch2_alloc_pack_v3(struct bkey_alloc_buf *dst,
const struct bkey_alloc_unpacked src)
{
struct bkey_i_alloc_v3 *a = bkey_alloc_v3_init(&dst->k);
unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
u8 *out = a->v.data;
u8 *end = (void *) &dst[1];
u8 *last_nonzero_field = out;
unsigned bytes;
a->k.p = POS(src.dev, src.bucket);
a->v.gen = src.gen;
a->v.oldest_gen = src.oldest_gen;
a->v.data_type = src.data_type;
a->v.journal_seq = cpu_to_le64(src.journal_seq);
SET_BCH_ALLOC_NEED_DISCARD(&a->v, src.need_discard);
SET_BCH_ALLOC_NEED_INC_GEN(&a->v, src.need_inc_gen);
#define x(_name, _bits) \
nr_fields++; \
\
if (src._name) { \
out += bch2_varint_encode_fast(out, src._name); \
\
last_nonzero_field = out; \
last_nonzero_fieldnr = nr_fields; \
} else { \
*out++ = 0; \
}
BCH_ALLOC_FIELDS_V2()
#undef x
BUG_ON(out > end);
out = last_nonzero_field;
a->v.nr_fields = last_nonzero_fieldnr;
bytes = (u8 *) out - (u8 *) &a->v;
set_bkey_val_bytes(&a->k, bytes);
memset_u64s_tail(&a->v, 0, bytes);
}
struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
{
struct bkey_alloc_unpacked ret = {
.dev = k.k->p.inode,
@ -252,25 +224,71 @@ struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
return ret;
}
struct bkey_alloc_buf *bch2_alloc_pack(struct btree_trans *trans,
const struct bkey_alloc_unpacked src)
void bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
{
struct bkey_alloc_buf *dst;
if (k.k->type == KEY_TYPE_alloc_v4) {
*out = *bkey_s_c_to_alloc_v4(k).v;
} else {
struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
dst = bch2_trans_kmalloc(trans, sizeof(struct bkey_alloc_buf));
if (!IS_ERR(dst))
bch2_alloc_pack_v3(dst, src);
return dst;
*out = (struct bch_alloc_v4) {
.journal_seq = u.journal_seq,
.flags = u.need_discard,
.gen = u.gen,
.oldest_gen = u.oldest_gen,
.data_type = u.data_type,
.stripe_redundancy = u.stripe_redundancy,
.dirty_sectors = u.dirty_sectors,
.cached_sectors = u.cached_sectors,
.io_time[READ] = u.read_time,
.io_time[WRITE] = u.write_time,
.stripe = u.stripe,
};
}
}
int bch2_alloc_write(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_alloc_unpacked *u, unsigned trigger_flags)
struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
{
struct bkey_alloc_buf *a = bch2_alloc_pack(trans, *u);
struct bkey_i_alloc_v4 *ret;
return PTR_ERR_OR_ZERO(a) ?:
bch2_trans_update(trans, iter, &a->k, trigger_flags);
if (k.k->type == KEY_TYPE_alloc_v4) {
ret = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
if (!IS_ERR(ret))
bkey_reassemble(&ret->k_i, k);
} else {
ret = bch2_trans_kmalloc(trans, sizeof(*ret));
if (!IS_ERR(ret)) {
bkey_alloc_v4_init(&ret->k_i);
ret->k.p = k.k->p;
bch2_alloc_to_v4(k, &ret->v);
}
}
return ret;
}
struct bkey_i_alloc_v4 *
bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
struct bpos pos)
{
struct bkey_s_c k;
struct bkey_i_alloc_v4 *a;
int ret;
bch2_trans_iter_init(trans, iter, BTREE_ID_alloc, pos,
BTREE_ITER_WITH_UPDATES|
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret) {
bch2_trans_iter_exit(trans, iter);
return ERR_PTR(ret);
}
a = bch2_alloc_to_v4_mut(trans, k);
if (IS_ERR(a))
bch2_trans_iter_exit(trans, iter);
return a;
}
static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
@ -316,28 +334,70 @@ const char *bch2_alloc_v2_invalid(const struct bch_fs *c, struct bkey_s_c k)
const char *bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_alloc_unpacked u;
struct bch_dev *ca;
if (k.k->p.inode >= c->sb.nr_devices ||
!c->devs[k.k->p.inode])
return "invalid device";
ca = bch_dev_bkey_exists(c, k.k->p.inode);
if (k.k->p.offset < ca->mi.first_bucket ||
k.k->p.offset >= ca->mi.nbuckets)
return "invalid bucket";
if (bch2_alloc_unpack_v3(&u, k))
return "unpack error";
return NULL;
}
void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
const char *bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
struct bch_dev *ca;
pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu need_discard %u",
u.gen, u.oldest_gen, bch2_data_types[u.data_type],
u.journal_seq, u.need_discard);
#define x(_name, ...) pr_buf(out, " " #_name " %llu", (u64) u._name);
BCH_ALLOC_FIELDS_V2()
#undef x
if (k.k->p.inode >= c->sb.nr_devices ||
!c->devs[k.k->p.inode])
return "invalid device";
ca = bch_dev_bkey_exists(c, k.k->p.inode);
if (k.k->p.offset < ca->mi.first_bucket ||
k.k->p.offset >= ca->mi.nbuckets)
return "invalid bucket";
return NULL;
}
void bch2_alloc_v4_swab(struct bkey_s k)
{
struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
a->journal_seq = swab64(a->journal_seq);
a->flags = swab32(a->flags);
a->dirty_sectors = swab32(a->dirty_sectors);
a->cached_sectors = swab32(a->cached_sectors);
a->io_time[0] = swab64(a->io_time[0]);
a->io_time[1] = swab64(a->io_time[1]);
a->stripe = swab32(a->stripe);
a->nr_external_backpointers = swab32(a->nr_external_backpointers);
}
void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
{
struct bch_alloc_v4 a;
bch2_alloc_to_v4(k, &a);
pr_buf(out, "gen %u oldest_gen %u data_type %s journal_seq %llu need_discard %llu",
a.gen, a.oldest_gen, bch2_data_types[a.data_type],
a.journal_seq, BCH_ALLOC_V4_NEED_DISCARD(&a));
pr_buf(out, " dirty_sectors %u", a.dirty_sectors);
pr_buf(out, " cached_sectors %u", a.cached_sectors);
pr_buf(out, " stripe %u", a.stripe);
pr_buf(out, " stripe_redundancy %u", a.stripe_redundancy);
pr_buf(out, " read_time %llu", a.io_time[READ]);
pr_buf(out, " write_time %llu", a.io_time[WRITE]);
}
int bch2_alloc_read(struct bch_fs *c)
@ -345,6 +405,7 @@ int bch2_alloc_read(struct bch_fs *c)
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bch_alloc_v4 a;
struct bch_dev *ca;
int ret;
@ -353,8 +414,9 @@ int bch2_alloc_read(struct bch_fs *c)
for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
ca = bch_dev_bkey_exists(c, k.k->p.inode);
bch2_alloc_to_v4(k, &a);
*bucket_gen(ca, k.k->p.offset) = bch2_alloc_unpack(k).gen;
*bucket_gen(ca, k.k->p.offset) = a.gen;
}
bch2_trans_iter_exit(&trans, &iter);
@ -370,11 +432,11 @@ int bch2_alloc_read(struct bch_fs *c)
static int bch2_bucket_do_index(struct btree_trans *trans,
struct bkey_s_c alloc_k,
struct bkey_alloc_unpacked a,
struct bch_alloc_v4 a,
bool set)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, a.dev);
struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
struct btree_iter iter;
struct bkey_s_c old;
struct bkey_i *k;
@ -399,12 +461,12 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
switch (state) {
case BUCKET_free:
btree = BTREE_ID_freespace;
k->k.p = alloc_freespace_pos(a);
k->k.p = alloc_freespace_pos(alloc_k.k->p, a);
bch2_key_resize(&k->k, 1);
break;
case BUCKET_need_discard:
btree = BTREE_ID_need_discard;
k->k.p = POS(a.dev, a.bucket);
k->k.p = alloc_k.k->p;
break;
default:
return 0;
@ -443,40 +505,45 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
unsigned flags)
{
struct bch_fs *c = trans->c;
struct bkey_alloc_unpacked old_u = bch2_alloc_unpack(old);
struct bkey_alloc_unpacked new_u = bch2_alloc_unpack(bkey_i_to_s_c(new));
struct bch_alloc_v4 old_a, *new_a;
u64 old_lru, new_lru;
bool need_repack = false;
int ret = 0;
if (new_u.dirty_sectors > old_u.dirty_sectors ||
new_u.cached_sectors > old_u.cached_sectors) {
new_u.read_time = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
new_u.write_time = max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
new_u.need_inc_gen = true;
new_u.need_discard = true;
need_repack = true;
/*
* Deletion only happens in the device removal path, with
* BTREE_TRIGGER_NORUN:
*/
BUG_ON(new->k.type != KEY_TYPE_alloc_v4);
bch2_alloc_to_v4(old, &old_a);
new_a = &bkey_i_to_alloc_v4(new)->v;
if (new_a->dirty_sectors > old_a.dirty_sectors ||
new_a->cached_sectors > old_a.cached_sectors) {
new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
}
if (old_u.data_type && !new_u.data_type &&
old_u.gen == new_u.gen &&
if (old_a.data_type && !new_a->data_type &&
old_a.gen == new_a->gen &&
!bch2_bucket_is_open_safe(c, new->k.p.inode, new->k.p.offset)) {
new_u.gen++;
new_u.need_inc_gen = false;
need_repack = true;
new_a->gen++;
SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
}
if (bucket_state(old_u) != bucket_state(new_u) ||
(bucket_state(new_u) == BUCKET_free &&
alloc_freespace_genbits(old_u) != alloc_freespace_genbits(new_u))) {
ret = bch2_bucket_do_index(trans, old, old_u, false) ?:
bch2_bucket_do_index(trans, bkey_i_to_s_c(new), new_u, true);
if (bucket_state(old_a) != bucket_state(*new_a) ||
(bucket_state(*new_a) == BUCKET_free &&
alloc_freespace_genbits(old_a) != alloc_freespace_genbits(*new_a))) {
ret = bch2_bucket_do_index(trans, old, old_a, false) ?:
bch2_bucket_do_index(trans, bkey_i_to_s_c(new), *new_a, true);
if (ret)
return ret;
}
old_lru = alloc_lru_idx(old_u);
new_lru = alloc_lru_idx(new_u);
old_lru = alloc_lru_idx(old_a);
new_lru = alloc_lru_idx(*new_a);
if (old_lru != new_lru) {
ret = bch2_lru_change(trans, new->k.p.inode, new->k.p.offset,
@ -484,15 +551,10 @@ int bch2_trans_mark_alloc(struct btree_trans *trans,
if (ret)
return ret;
if (new_lru && new_u.read_time != new_lru) {
new_u.read_time = new_lru;
need_repack = true;
}
if (new_lru && new_a->io_time[READ] != new_lru)
new_a->io_time[READ] = new_lru;
}
if (need_repack && !bkey_deleted(&new->k))
bch2_alloc_pack_v3((void *) new, new_u);
return 0;
}
@ -501,7 +563,7 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree_iter discard_iter, freespace_iter, lru_iter;
struct bkey_alloc_unpacked a;
struct bch_alloc_v4 a;
unsigned discard_key_type, freespace_key_type;
struct bkey_s_c alloc_k, k;
struct printbuf buf = PRINTBUF;
@ -516,7 +578,7 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
if (ret)
return ret;
a = bch2_alloc_unpack(alloc_k);
bch2_alloc_to_v4(alloc_k, &a);
discard_key_type = bucket_state(a) == BUCKET_need_discard
? KEY_TYPE_set : 0;
freespace_key_type = bucket_state(a) == BUCKET_free
@ -525,9 +587,9 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard,
alloc_k.k->p, 0);
bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace,
alloc_freespace_pos(a), 0);
alloc_freespace_pos(alloc_k.k->p, a), 0);
bch2_trans_iter_init(trans, &lru_iter, BTREE_ID_lru,
POS(a.dev, a.read_time), 0);
POS(alloc_k.k->p.inode, a.io_time[READ]), 0);
k = bch2_btree_iter_peek_slot(&discard_iter);
ret = bkey_err(k);
@ -588,43 +650,51 @@ static int bch2_check_alloc_key(struct btree_trans *trans,
}
if (bucket_state(a) == BUCKET_cached) {
if (fsck_err_on(!a.read_time, c,
"cached bucket with read_time 0\n"
" %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
a.read_time = atomic64_read(&c->io_clock[READ].now);
ret = bch2_lru_change(trans, a.dev, a.bucket,
0, &a.read_time) ?:
bch2_alloc_write(trans, alloc_iter, &a, BTREE_TRIGGER_NORUN);
bch2_trans_commit(trans, NULL, NULL, 0);
if (ret)
goto err;
}
k = bch2_btree_iter_peek_slot(&lru_iter);
ret = bkey_err(k);
if (ret)
goto err;
if (fsck_err_on(k.k->type != KEY_TYPE_lru ||
le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != a.bucket, c,
"incorrect/missing lru entry\n"
" %s\n"
" %s",
if (fsck_err_on(!a.io_time[READ], c,
"cached bucket with read_time 0\n"
" %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
(bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
u64 read_time = a.read_time;
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf)) ||
fsck_err_on(k.k->type != KEY_TYPE_lru ||
le64_to_cpu(bkey_s_c_to_lru(k).v->idx) != alloc_k.k->p.offset, c,
"incorrect/missing lru entry\n"
" %s\n"
" %s",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
(bch2_bkey_val_to_text(&buf2, c, k), buf2.buf))) {
u64 read_time = a.io_time[READ];
ret = bch2_lru_change(trans, a.dev, a.bucket,
0, &a.read_time) ?:
(a.read_time != read_time
? bch2_alloc_write(trans, alloc_iter, &a, BTREE_TRIGGER_NORUN)
: 0) ?:
bch2_trans_commit(trans, NULL, NULL, 0);
if (!a.io_time[READ])
a.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
ret = bch2_lru_change(trans,
alloc_k.k->p.inode,
alloc_k.k->p.offset,
0, &a.io_time[READ]);
if (ret)
goto err;
if (a.io_time[READ] != read_time) {
struct bkey_i_alloc_v4 *a_mut =
bch2_alloc_to_v4_mut(trans, alloc_k);
ret = PTR_ERR_OR_ZERO(a_mut);
if (ret)
goto err;
a_mut->v.io_time[READ] = a.io_time[READ];
ret = bch2_trans_update(trans, alloc_iter,
&a_mut->k_i, BTREE_TRIGGER_NORUN);
if (ret)
goto err;
}
ret = bch2_trans_commit(trans, NULL, NULL, 0);
if (ret)
goto err;
}
@ -658,7 +728,7 @@ static int bch2_check_freespace_key(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct btree_iter alloc_iter;
struct bkey_s_c k, freespace_k;
struct bkey_alloc_unpacked a;
struct bch_alloc_v4 a;
u64 genbits;
struct bpos pos;
struct bkey_i *update;
@ -689,7 +759,7 @@ static int bch2_check_freespace_key(struct btree_trans *trans,
if (ret)
goto err;
a = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &a);
if (fsck_err_on(bucket_state(a) != BUCKET_free ||
genbits != alloc_freespace_genbits(a), c,
@ -773,7 +843,7 @@ static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_alloc_unpacked a;
struct bkey_i_alloc_v4 *a;
struct printbuf buf = PRINTBUF;
int ret;
@ -784,17 +854,20 @@ static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
if (ret)
goto out;
a = bch2_alloc_unpack(k);
a = bch2_alloc_to_v4_mut(trans, k);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
if (a.need_inc_gen) {
a.gen++;
a.need_inc_gen = false;
if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
a->v.gen++;
SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
goto write;
}
BUG_ON(a.journal_seq > c->journal.flushed_seq_ondisk);
BUG_ON(a->v.journal_seq > c->journal.flushed_seq_ondisk);
if (bch2_fs_inconsistent_on(!a.need_discard, c,
if (bch2_fs_inconsistent_on(!BCH_ALLOC_V4_NEED_DISCARD(&a->v), c,
"%s\n incorrectly set in need_discard btree",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = -EIO;
@ -818,9 +891,9 @@ static int bch2_clear_need_discard(struct btree_trans *trans, struct bpos pos,
goto out;
}
a.need_discard = false;
SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
write:
ret = bch2_alloc_write(trans, &iter, &a, 0);
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
out:
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
@ -890,7 +963,7 @@ static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca)
struct bch_fs *c = trans->c;
struct btree_iter lru_iter, alloc_iter = { NULL };
struct bkey_s_c k;
struct bkey_alloc_unpacked a;
struct bkey_i_alloc_v4 *a;
u64 bucket, idx;
int ret;
@ -911,32 +984,27 @@ static int invalidate_one_bucket(struct btree_trans *trans, struct bch_dev *ca)
idx = k.k->p.offset;
bucket = le64_to_cpu(bkey_s_c_to_lru(k).v->idx);
bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
POS(ca->dev_idx, bucket),
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&alloc_iter);
ret = bkey_err(k);
a = bch2_trans_start_alloc_update(trans, &alloc_iter,
POS(ca->dev_idx, bucket));
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
a = bch2_alloc_unpack(k);
if (bch2_fs_inconsistent_on(idx != alloc_lru_idx(a), c,
if (bch2_fs_inconsistent_on(idx != alloc_lru_idx(a->v), c,
"invalidating bucket with wrong lru idx (got %llu should be %llu",
idx, alloc_lru_idx(a)))
idx, alloc_lru_idx(a->v)))
goto out;
a.gen++;
a.need_inc_gen = false;
a.data_type = 0;
a.dirty_sectors = 0;
a.cached_sectors = 0;
a.read_time = atomic64_read(&c->io_clock[READ].now);
a.write_time = atomic64_read(&c->io_clock[WRITE].now);
SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
a->v.gen++;
a->v.data_type = 0;
a->v.dirty_sectors = 0;
a->v.cached_sectors = 0;
a->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
a->v.io_time[WRITE] = atomic64_read(&c->io_clock[WRITE].now);
ret = bch2_alloc_write(trans, &alloc_iter, &a,
BTREE_TRIGGER_BUCKET_INVALIDATE);
ret = bch2_trans_update(trans, &alloc_iter, &a->k_i,
BTREE_TRIGGER_BUCKET_INVALIDATE);
out:
bch2_trans_iter_exit(trans, &alloc_iter);
bch2_trans_iter_exit(trans, &lru_iter);
@ -975,7 +1043,7 @@ static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_alloc_unpacked a;
struct bch_alloc_v4 a;
struct bch_member *m;
int ret;
@ -988,7 +1056,7 @@ static int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca)
if (iter.pos.offset >= ca->mi.nbuckets)
break;
a = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &a);
ret = __bch2_trans_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW,
bch2_bucket_do_index(&trans, k, a, true));
@ -1058,29 +1126,22 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_alloc_unpacked u;
u64 *time, now;
struct bkey_i_alloc_v4 *a;
u64 now;
int ret = 0;
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS(dev, bucket_nr),
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
a = bch2_trans_start_alloc_update(trans, &iter, POS(dev, bucket_nr));
ret = PTR_ERR_OR_ZERO(a);
if (ret)
goto out;
return ret;
u = bch2_alloc_unpack(k);
time = rw == READ ? &u.read_time : &u.write_time;
now = atomic64_read(&c->io_clock[rw].now);
if (*time == now)
if (a->v.io_time[rw] == now)
goto out;
*time = now;
a->v.io_time[rw] = now;
ret = bch2_alloc_write(trans, &iter, &u, 0) ?:
ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
bch2_trans_commit(trans, NULL, NULL, 0);
out:
bch2_trans_iter_exit(trans, &iter);

View File

@ -8,24 +8,10 @@
#include "debug.h"
#include "super.h"
struct bkey_alloc_unpacked {
u64 journal_seq;
u64 bucket;
u8 dev;
u8 gen;
u8 oldest_gen;
u8 data_type;
bool need_discard:1;
bool need_inc_gen:1;
#define x(_name, _bits) u##_bits _name;
BCH_ALLOC_FIELDS_V2()
#undef x
};
/* How out of date a pointer gen is allowed to be: */
#define BUCKET_GC_GEN_MAX 96U
static inline u8 alloc_gc_gen(struct bkey_alloc_unpacked a)
static inline u8 alloc_gc_gen(struct bch_alloc_v4 a)
{
return a.gen - a.oldest_gen;
}
@ -40,62 +26,41 @@ enum bucket_state {
extern const char * const bch2_bucket_states[];
static inline enum bucket_state bucket_state(struct bkey_alloc_unpacked a)
static inline enum bucket_state bucket_state(struct bch_alloc_v4 a)
{
if (a.dirty_sectors || a.stripe)
return BUCKET_dirty;
if (a.cached_sectors)
return BUCKET_cached;
BUG_ON(a.data_type);
if (a.need_discard)
if (BCH_ALLOC_V4_NEED_DISCARD(&a))
return BUCKET_need_discard;
if (alloc_gc_gen(a) >= BUCKET_GC_GEN_MAX)
return BUCKET_need_gc_gens;
return BUCKET_free;
}
static inline u64 alloc_lru_idx(struct bkey_alloc_unpacked a)
static inline u64 alloc_lru_idx(struct bch_alloc_v4 a)
{
return bucket_state(a) == BUCKET_cached ? a.read_time : 0;
return bucket_state(a) == BUCKET_cached ? a.io_time[READ] : 0;
}
static inline u64 alloc_freespace_genbits(struct bkey_alloc_unpacked a)
static inline u64 alloc_freespace_genbits(struct bch_alloc_v4 a)
{
return ((u64) alloc_gc_gen(a) >> 4) << 56;
}
static inline struct bpos alloc_freespace_pos(struct bkey_alloc_unpacked a)
static inline struct bpos alloc_freespace_pos(struct bpos pos, struct bch_alloc_v4 a)
{
return POS(a.dev, a.bucket | alloc_freespace_genbits(a));
pos.offset |= alloc_freespace_genbits(a);
return pos;
}
/* returns true if not equal */
static inline bool bkey_alloc_unpacked_cmp(struct bkey_alloc_unpacked l,
struct bkey_alloc_unpacked r)
{
return l.gen != r.gen ||
l.oldest_gen != r.oldest_gen ||
l.data_type != r.data_type
#define x(_name, ...) || l._name != r._name
BCH_ALLOC_FIELDS_V2()
#undef x
;
}
struct bkey_i_alloc_v4 *
bch2_trans_start_alloc_update(struct btree_trans *, struct btree_iter *, struct bpos);
struct bkey_alloc_buf {
struct bkey_i k;
struct bch_alloc_v3 v;
#define x(_name, _bits) + _bits / 8
u8 _pad[0 + BCH_ALLOC_FIELDS_V2()];
#undef x
} __attribute__((packed, aligned(8)));
struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c);
struct bkey_alloc_buf *bch2_alloc_pack(struct btree_trans *,
const struct bkey_alloc_unpacked);
int bch2_alloc_write(struct btree_trans *, struct btree_iter *,
struct bkey_alloc_unpacked *, unsigned);
void bch2_alloc_to_v4(struct bkey_s_c, struct bch_alloc_v4 *);
struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s_c);
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
@ -104,6 +69,8 @@ int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
const char *bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c);
const char *bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c);
const char *bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c);
const char *bch2_alloc_v4_invalid(const struct bch_fs *, struct bkey_s_c k);
void bch2_alloc_v4_swab(struct bkey_s);
void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
#define bch2_bkey_ops_alloc (struct bkey_ops) { \
@ -127,6 +94,14 @@ void bch2_alloc_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
.atomic_trigger = bch2_mark_alloc, \
}
#define bch2_bkey_ops_alloc_v4 (struct bkey_ops) { \
.key_invalid = bch2_alloc_v4_invalid, \
.val_to_text = bch2_alloc_to_text, \
.swab = bch2_alloc_v4_swab, \
.trans_trigger = bch2_trans_mark_alloc, \
.atomic_trigger = bch2_mark_alloc, \
}
static inline bool bkey_is_alloc(const struct bkey *k)
{
return k->type == KEY_TYPE_alloc ||

View File

@ -190,8 +190,9 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
}
static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
u64 bucket,
enum alloc_reserve reserve,
struct bkey_alloc_unpacked a,
struct bch_alloc_v4 *a,
u64 *skipped_open,
u64 *skipped_need_journal_commit,
u64 *skipped_nouse,
@ -199,18 +200,18 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
{
struct open_bucket *ob;
if (unlikely(ca->buckets_nouse && test_bit(a.bucket, ca->buckets_nouse))) {
if (unlikely(ca->buckets_nouse && test_bit(bucket, ca->buckets_nouse))) {
(*skipped_nouse)++;
return NULL;
}
if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket)) {
if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
(*skipped_open)++;
return NULL;
}
if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk, ca->dev_idx, a.bucket)) {
c->journal.flushed_seq_ondisk, ca->dev_idx, bucket)) {
(*skipped_need_journal_commit)++;
return NULL;
}
@ -231,7 +232,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
}
/* Recheck under lock: */
if (bch2_bucket_is_open(c, ca->dev_idx, a.bucket)) {
if (bch2_bucket_is_open(c, ca->dev_idx, bucket)) {
spin_unlock(&c->freelist_lock);
(*skipped_open)++;
return NULL;
@ -245,8 +246,8 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
ob->sectors_free = ca->mi.bucket_size;
ob->alloc_reserve = reserve;
ob->dev = ca->dev_idx;
ob->gen = a.gen;
ob->bucket = a.bucket;
ob->gen = a->gen;
ob->bucket = bucket;
spin_unlock(&ob->lock);
ca->nr_open_buckets++;
@ -283,7 +284,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
struct btree_iter iter;
struct bkey_s_c k;
struct open_bucket *ob;
struct bkey_alloc_unpacked a;
struct bch_alloc_v4 a;
u64 b = free_entry & ~(~0ULL << 56);
unsigned genbits = free_entry >> 56;
struct printbuf buf = PRINTBUF;
@ -297,7 +298,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
goto err;
}
a = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &a);
if (bch2_fs_inconsistent_on(bucket_state(a) != BUCKET_free, c,
"non free bucket in freespace btree (state %s)\n"
@ -326,7 +327,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
goto err;
}
ob = __try_alloc_bucket(c, ca, reserve, a,
ob = __try_alloc_bucket(c, ca, b, reserve, &a,
skipped_open,
skipped_need_journal_commit,
skipped_nouse,
@ -390,7 +391,7 @@ bch2_bucket_alloc_trans_early(struct btree_trans *trans,
for_each_btree_key(trans, iter, BTREE_ID_alloc, POS(ca->dev_idx, *cur_bucket),
BTREE_ITER_SLOTS, k, ret) {
struct bkey_alloc_unpacked a;
struct bch_alloc_v4 a;
if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
break;
@ -399,14 +400,14 @@ bch2_bucket_alloc_trans_early(struct btree_trans *trans,
is_superblock_bucket(ca, k.k->p.offset))
continue;
a = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &a);
if (bucket_state(a) != BUCKET_free)
continue;
(*buckets_seen)++;
ob = __try_alloc_bucket(trans->c, ca, reserve, a,
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, &a,
skipped_open,
skipped_need_journal_commit,
skipped_nouse,

View File

@ -394,6 +394,7 @@ enum gc_phase {
GC_PHASE_BTREE_lru,
GC_PHASE_BTREE_freespace,
GC_PHASE_BTREE_need_discard,
GC_PHASE_BTREE_backpointers,
GC_PHASE_PENDING_DELETE,
};

View File

@ -78,6 +78,21 @@
#include <linux/uuid.h>
#include "vstructs.h"
#define BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
\
static inline __u64 name(const type *k) \
{ \
return (k->field >> offset) & ~(~0ULL << (end - offset)); \
} \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
k->field &= ~(~(~0ULL << (end - offset)) << offset); \
k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
}
#define LE_BITMASK(_bits, name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
@ -349,7 +364,8 @@ static inline void bkey_init(struct bkey *k)
x(inode_v2, 23) \
x(alloc_v3, 24) \
x(set, 25) \
x(lru, 26)
x(lru, 26) \
x(alloc_v4, 27)
enum bch_bkey_type {
#define x(name, nr) KEY_TYPE_##name = nr,
@ -899,8 +915,29 @@ struct bch_alloc_v3 {
__u8 data[];
} __attribute__((packed, aligned(8)));
LE32_BITMASK(BCH_ALLOC_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
LE32_BITMASK(BCH_ALLOC_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
struct bch_alloc_v4 {
struct bch_val v;
__u64 journal_seq;
__u32 flags;
__u8 gen;
__u8 oldest_gen;
__u8 data_type;
__u8 stripe_redundancy;
__u32 dirty_sectors;
__u32 cached_sectors;
__u64 io_time[2];
__u32 stripe;
__u32 nr_external_backpointers;
struct bpos backpointers[0];
} __attribute__((packed, aligned(8)));
LE32_BITMASK(BCH_ALLOC_V3_NEED_DISCARD,struct bch_alloc_v3, flags, 0, 1)
LE32_BITMASK(BCH_ALLOC_V3_NEED_INC_GEN,struct bch_alloc_v3, flags, 1, 2)
BITMASK(BCH_ALLOC_V4_NEED_DISCARD, struct bch_alloc_v4, flags, 0, 1)
BITMASK(BCH_ALLOC_V4_NEED_INC_GEN, struct bch_alloc_v4, flags, 1, 2)
BITMASK(BCH_ALLOC_V4_BACKPOINTERS_START,struct bch_alloc_v4, flags, 2, 8)
BITMASK(BCH_ALLOC_V4_NR_BACKPOINTERS, struct bch_alloc_v4, flags, 8, 14)
enum {
#define x(name, _bits) BCH_ALLOC_FIELD_V1_##name,
@ -1322,7 +1359,8 @@ struct bch_sb_field_journal_seq_blacklist {
x(reflink_p_fix, 16) \
x(subvol_dirent, 17) \
x(inode_v2, 18) \
x(freespace, 19)
x(freespace, 19) \
x(alloc_v4, 20)
enum bcachefs_metadata_version {
bcachefs_metadata_version_min = 9,
@ -1849,7 +1887,8 @@ LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
x(snapshots, 9) \
x(lru, 10) \
x(freespace, 11) \
x(need_discard, 12)
x(need_discard, 12) \
x(backpointers, 13)
enum btree_id {
#define x(kwd, val) BTREE_ID_##kwd = val,

View File

@ -149,7 +149,8 @@ static unsigned bch2_key_types_allowed[] = {
(1U << KEY_TYPE_deleted)|
(1U << KEY_TYPE_alloc)|
(1U << KEY_TYPE_alloc_v2)|
(1U << KEY_TYPE_alloc_v3),
(1U << KEY_TYPE_alloc_v3)|
(1U << KEY_TYPE_alloc_v4),
[BKEY_TYPE_quotas] =
(1U << KEY_TYPE_deleted)|
(1U << KEY_TYPE_quota),

View File

@ -1309,6 +1309,19 @@ static int bch2_gc_start(struct bch_fs *c,
return 0;
}
/* returns true if not equal */
static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
struct bch_alloc_v4 r)
{
return l.gen != r.gen ||
l.oldest_gen != r.oldest_gen ||
l.data_type != r.data_type ||
l.dirty_sectors != r.dirty_sectors ||
l.cached_sectors != r.cached_sectors ||
l.stripe_redundancy != r.stripe_redundancy ||
l.stripe != r.stripe;
}
static int bch2_alloc_write_key(struct btree_trans *trans,
struct btree_iter *iter,
bool metadata_only)
@ -1317,8 +1330,8 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
struct bucket gc;
struct bkey_s_c k;
struct bkey_alloc_unpacked old_u, new_u;
struct bkey_alloc_buf *a;
struct bkey_i_alloc_v4 *a;
struct bch_alloc_v4 old, new;
int ret;
k = bch2_btree_iter_peek_slot(iter);
@ -1326,7 +1339,8 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
if (ret)
return ret;
old_u = new_u = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &old);
new = old;
percpu_down_read(&c->mark_lock);
gc = *gc_bucket(ca, iter->pos.offset);
@ -1338,36 +1352,38 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
gc.data_type != BCH_DATA_btree)
return 0;
if (gen_after(old_u.gen, gc.gen))
if (gen_after(old.gen, gc.gen))
return 0;
#define copy_bucket_field(_f) \
if (fsck_err_on(new_u._f != gc._f, c, \
if (fsck_err_on(new._f != gc._f, c, \
"bucket %llu:%llu gen %u data type %s has wrong " #_f \
": got %u, should be %u", \
iter->pos.inode, iter->pos.offset, \
gc.gen, \
bch2_data_types[gc.data_type], \
new_u._f, gc._f)) \
new_u._f = gc._f; \
new._f, gc._f)) \
new._f = gc._f; \
copy_bucket_field(gen);
copy_bucket_field(data_type);
copy_bucket_field(stripe);
copy_bucket_field(dirty_sectors);
copy_bucket_field(cached_sectors);
copy_bucket_field(stripe_redundancy);
copy_bucket_field(stripe);
#undef copy_bucket_field
if (!bkey_alloc_unpacked_cmp(old_u, new_u))
if (!bch2_alloc_v4_cmp(old, new))
return 0;
a = bch2_alloc_pack(trans, new_u);
if (IS_ERR(a))
return PTR_ERR(a);
a = bch2_alloc_to_v4_mut(trans, k);
ret = PTR_ERR_OR_ZERO(a);
if (ret)
return ret;
ret = bch2_trans_update(trans, iter, &a->k, BTREE_TRIGGER_NORUN);
a->v = new;
ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
fsck_err:
return ret;
}
@ -1418,7 +1434,7 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
struct btree_iter iter;
struct bkey_s_c k;
struct bucket *g;
struct bkey_alloc_unpacked u;
struct bch_alloc_v4 a;
unsigned i;
int ret;
@ -1443,20 +1459,21 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
BTREE_ITER_PREFETCH, k, ret) {
ca = bch_dev_bkey_exists(c, k.k->p.inode);
g = gc_bucket(ca, k.k->p.offset);
u = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &a);
g->gen_valid = 1;
g->gen = u.gen;
g->gen = a.gen;
if (metadata_only &&
(u.data_type == BCH_DATA_user ||
u.data_type == BCH_DATA_cached ||
u.data_type == BCH_DATA_parity)) {
g->data_type = u.data_type;
g->dirty_sectors = u.dirty_sectors;
g->cached_sectors = u.cached_sectors;
g->stripe = u.stripe;
g->stripe_redundancy = u.stripe_redundancy;
(a.data_type == BCH_DATA_user ||
a.data_type == BCH_DATA_cached ||
a.data_type == BCH_DATA_parity)) {
g->data_type = a.data_type;
g->dirty_sectors = a.dirty_sectors;
g->cached_sectors = a.cached_sectors;
g->stripe = a.stripe;
g->stripe_redundancy = a.stripe_redundancy;
}
}
bch2_trans_iter_exit(&trans, &iter);
@ -1890,7 +1907,8 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
{
struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
struct bkey_s_c k;
struct bkey_alloc_unpacked u;
struct bch_alloc_v4 a;
struct bkey_i_alloc_v4 *a_mut;
int ret;
k = bch2_btree_iter_peek_slot(iter);
@ -1898,14 +1916,19 @@ static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_i
if (ret)
return ret;
u = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &a);
if (u.oldest_gen == ca->oldest_gen[iter->pos.offset])
if (a.oldest_gen == ca->oldest_gen[iter->pos.offset])
return 0;
u.oldest_gen = ca->oldest_gen[iter->pos.offset];
a_mut = bch2_alloc_to_v4_mut(trans, k);
ret = PTR_ERR_OR_ZERO(a_mut);
if (ret)
return ret;
return bch2_alloc_write(trans, iter, &u, 0);
a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
}
int bch2_gc_gens(struct bch_fs *c)

View File

@ -669,6 +669,7 @@ enum btree_update_flags {
((1U << KEY_TYPE_alloc)| \
(1U << KEY_TYPE_alloc_v2)| \
(1U << KEY_TYPE_alloc_v3)| \
(1U << KEY_TYPE_alloc_v4)| \
(1U << KEY_TYPE_stripe)| \
(1U << KEY_TYPE_inode)| \
(1U << KEY_TYPE_inode_v2)| \

View File

@ -457,7 +457,7 @@ static int run_one_mem_trigger(struct btree_trans *trans,
}
static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_entry *i,
bool overwrite)
bool overwrite)
{
/*
* Transactional triggers create new btree_insert_entries, so we can't
@ -466,42 +466,31 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
*/
struct bkey old_k = i->old_k;
struct bkey_s_c old = { &old_k, i->old_v };
int ret = 0;
if ((i->flags & BTREE_TRIGGER_NORUN) ||
!(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)))
return 0;
if (!overwrite) {
if (i->insert_trigger_run)
return 0;
BUG_ON(i->overwrite_trigger_run);
i->insert_trigger_run = true;
} else {
if (i->overwrite_trigger_run)
return 0;
BUG_ON(!i->insert_trigger_run);
i->overwrite_trigger_run = true;
}
if (overwrite) {
ret = bch2_trans_mark_old(trans, old, i->flags);
} else if (bch2_bkey_ops[old.k->type].trans_trigger ==
bch2_bkey_ops[i->k->k.type].trans_trigger &&
if (!i->insert_trigger_run &&
!i->overwrite_trigger_run &&
bch2_bkey_ops[old.k->type].trans_trigger ==
bch2_bkey_ops[i->k->k.type].trans_trigger &&
((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
i->overwrite_trigger_run = true;
ret = bch2_trans_mark_key(trans, old, i->k,
BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|i->flags);
i->insert_trigger_run = true;
return bch2_trans_mark_key(trans, old, i->k,
BTREE_TRIGGER_INSERT|
BTREE_TRIGGER_OVERWRITE|
i->flags) ?: 1;
} else if (overwrite && !i->overwrite_trigger_run) {
i->overwrite_trigger_run = true;
return bch2_trans_mark_old(trans, old, i->flags) ?: 1;
} else if (!i->insert_trigger_run) {
i->insert_trigger_run = true;
return bch2_trans_mark_new(trans, i->k, i->flags) ?: 1;
} else {
ret = bch2_trans_mark_new(trans, i->k, i->flags);
return 0;
}
if (ret == -EINTR)
trace_trans_restart_mark(trans->fn, _RET_IP_,
i->btree_id, &i->path->pos);
return ret ?: 1;
}
static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
@ -511,7 +500,7 @@ static int run_btree_triggers(struct btree_trans *trans, enum btree_id btree_id,
bool trans_trigger_run;
int ret, overwrite;
for (overwrite = 0; overwrite < 2; overwrite++) {
for (overwrite = 1; overwrite >= 0; --overwrite) {
/*
* Running triggers will append more updates to the list of updates as

View File

@ -279,20 +279,20 @@ bch2_fs_usage_read_short(struct bch_fs *c)
return ret;
}
static inline int is_unavailable_bucket(struct bkey_alloc_unpacked a)
static inline int is_unavailable_bucket(struct bch_alloc_v4 a)
{
return a.dirty_sectors || a.stripe;
}
static inline int bucket_sectors_fragmented(struct bch_dev *ca,
struct bkey_alloc_unpacked a)
struct bch_alloc_v4 a)
{
return a.dirty_sectors
? max(0, (int) ca->mi.bucket_size - (int) a.dirty_sectors)
: 0;
}
static inline enum bch_data_type bucket_type(struct bkey_alloc_unpacked a)
static inline enum bch_data_type bucket_type(struct bch_alloc_v4 a)
{
return a.cached_sectors && !a.dirty_sectors
? BCH_DATA_cached
@ -311,8 +311,8 @@ static inline void account_bucket(struct bch_fs_usage *fs_usage,
}
static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
struct bkey_alloc_unpacked old,
struct bkey_alloc_unpacked new,
struct bch_alloc_v4 old,
struct bch_alloc_v4 new,
u64 journal_seq, bool gc)
{
struct bch_fs_usage *fs_usage;
@ -349,14 +349,14 @@ static void bch2_dev_usage_update_m(struct bch_fs *c, struct bch_dev *ca,
struct bucket old, struct bucket new,
u64 journal_seq, bool gc)
{
struct bkey_alloc_unpacked old_a = {
struct bch_alloc_v4 old_a = {
.gen = old.gen,
.data_type = old.data_type,
.dirty_sectors = old.dirty_sectors,
.cached_sectors = old.cached_sectors,
.stripe = old.stripe,
};
struct bkey_alloc_unpacked new_a = {
struct bch_alloc_v4 new_a = {
.gen = new.gen,
.data_type = new.data_type,
.dirty_sectors = new.dirty_sectors,
@ -506,13 +506,12 @@ int bch2_mark_alloc(struct btree_trans *trans,
bool gc = flags & BTREE_TRIGGER_GC;
u64 journal_seq = trans->journal_res.seq;
struct bch_fs *c = trans->c;
struct bkey_alloc_unpacked old_u = bch2_alloc_unpack(old);
struct bkey_alloc_unpacked new_u = bch2_alloc_unpack(new);
struct bch_dev *ca = bch_dev_bkey_exists(c, new_u.dev);
struct bch_alloc_v4 old_a, new_a;
struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
int ret = 0;
if (bch2_trans_inconsistent_on(new_u.bucket < ca->mi.first_bucket ||
new_u.bucket >= ca->mi.nbuckets, trans,
if (bch2_trans_inconsistent_on(new.k->p.offset < ca->mi.first_bucket ||
new.k->p.offset >= ca->mi.nbuckets, trans,
"alloc key outside range of device's buckets"))
return -EIO;
@ -523,11 +522,13 @@ int bch2_mark_alloc(struct btree_trans *trans,
!(flags & BTREE_TRIGGER_BUCKET_INVALIDATE))
return 0;
bch2_alloc_to_v4(old, &old_a);
bch2_alloc_to_v4(new, &new_a);
if ((flags & BTREE_TRIGGER_INSERT) &&
!old_u.data_type != !new_u.data_type &&
new.k->type == KEY_TYPE_alloc_v3) {
struct bch_alloc_v3 *v = (struct bch_alloc_v3 *) new.v;
u64 old_journal_seq = le64_to_cpu(v->journal_seq);
!old_a.data_type != !new_a.data_type &&
new.k->type == KEY_TYPE_alloc_v4) {
struct bch_alloc_v4 *v = (struct bch_alloc_v4 *) new.v;
BUG_ON(!journal_seq);
@ -536,18 +537,18 @@ int bch2_mark_alloc(struct btree_trans *trans,
* before the bucket became empty again, then the we don't have
* to wait on a journal flush before we can reuse the bucket:
*/
new_u.journal_seq = !new_u.data_type &&
(journal_seq == old_journal_seq ||
bch2_journal_noflush_seq(&c->journal, old_journal_seq))
new_a.journal_seq = !new_a.data_type &&
(journal_seq == v->journal_seq ||
bch2_journal_noflush_seq(&c->journal, v->journal_seq))
? 0 : journal_seq;
v->journal_seq = cpu_to_le64(new_u.journal_seq);
v->journal_seq = new_a.journal_seq;
}
if (old_u.data_type && !new_u.data_type && new_u.journal_seq) {
if (old_a.data_type && !new_a.data_type && new_a.journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new_u.dev, new_u.bucket,
new_u.journal_seq);
new.k->p.inode, new.k->p.offset,
new_a.journal_seq);
if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
@ -555,43 +556,43 @@ int bch2_mark_alloc(struct btree_trans *trans,
}
}
if (!new_u.data_type &&
(!new_u.journal_seq || new_u.journal_seq < c->journal.flushed_seq_ondisk))
if (!new_a.data_type &&
(!new_a.journal_seq || new_a.journal_seq < c->journal.flushed_seq_ondisk))
closure_wake_up(&c->freelist_wait);
if ((flags & BTREE_TRIGGER_INSERT) &&
new_u.need_discard &&
!new_u.journal_seq)
BCH_ALLOC_V4_NEED_DISCARD(&new_a) &&
!new_a.journal_seq)
bch2_do_discards(c);
if (!old_u.data_type &&
new_u.data_type &&
if (!old_a.data_type &&
new_a.data_type &&
should_invalidate_buckets(ca))
bch2_do_invalidates(c);
if (bucket_state(new_u) == BUCKET_need_gc_gens) {
if (bucket_state(new_a) == BUCKET_need_gc_gens) {
atomic_inc(&c->kick_gc);
wake_up_process(c->gc_thread);
}
percpu_down_read(&c->mark_lock);
if (!gc && new_u.gen != old_u.gen)
*bucket_gen(ca, new_u.bucket) = new_u.gen;
if (!gc && new_a.gen != old_a.gen)
*bucket_gen(ca, new.k->p.offset) = new_a.gen;
bch2_dev_usage_update(c, ca, old_u, new_u, journal_seq, gc);
bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, gc);
if (gc) {
struct bucket *g = gc_bucket(ca, new_u.bucket);
struct bucket *g = gc_bucket(ca, new.k->p.offset);
bucket_lock(g);
g->gen_valid = 1;
g->gen = new_u.gen;
g->data_type = new_u.data_type;
g->stripe = new_u.stripe;
g->stripe_redundancy = new_u.stripe_redundancy;
g->dirty_sectors = new_u.dirty_sectors;
g->cached_sectors = new_u.cached_sectors;
g->gen = new_a.gen;
g->data_type = new_a.data_type;
g->stripe = new_a.stripe;
g->stripe_redundancy = new_a.stripe_redundancy;
g->dirty_sectors = new_a.dirty_sectors;
g->cached_sectors = new_a.cached_sectors;
bucket_unlock(g);
}
@ -603,17 +604,17 @@ int bch2_mark_alloc(struct btree_trans *trans,
*/
if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
old_u.cached_sectors) {
old_a.cached_sectors) {
ret = update_cached_sectors(c, new, ca->dev_idx,
-old_u.cached_sectors,
-old_a.cached_sectors,
journal_seq, gc);
if (ret) {
bch2_fs_fatal_error(c, "bch2_mark_alloc(): no replicas entry while updating cached sectors");
return ret;
}
trace_invalidate(ca, bucket_to_sector(ca, new_u.bucket),
old_u.cached_sectors);
trace_invalidate(ca, bucket_to_sector(ca, new.k->p.offset),
old_a.cached_sectors);
}
return 0;
@ -1385,50 +1386,25 @@ need_mark:
/* trans_mark: */
static int bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
const struct bch_extent_ptr *ptr,
struct bkey_alloc_unpacked *u)
{
struct bch_fs *c = trans->c;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bkey_s_c k;
int ret;
bch2_trans_iter_init(trans, iter, BTREE_ID_alloc,
POS(ptr->dev, PTR_BUCKET_NR(ca, ptr)),
BTREE_ITER_WITH_UPDATES|
BTREE_ITER_CACHED|
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret) {
bch2_trans_iter_exit(trans, iter);
return ret;
}
*u = bch2_alloc_unpack(k);
return 0;
}
static int bch2_trans_mark_pointer(struct btree_trans *trans,
struct bkey_s_c k, struct extent_ptr_decoded p,
s64 sectors, enum bch_data_type data_type)
{
struct btree_iter iter;
struct bkey_alloc_unpacked u;
struct bkey_i_alloc_v4 *a;
int ret;
ret = bch2_trans_start_alloc_update(trans, &iter, &p.ptr, &u);
if (ret)
return ret;
a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(trans->c, &p.ptr));
if (IS_ERR(a))
return PTR_ERR(a);
ret = __mark_pointer(trans, k, &p.ptr, sectors, data_type,
u.gen, &u.data_type,
&u.dirty_sectors, &u.cached_sectors);
a->v.gen, &a->v.data_type,
&a->v.dirty_sectors, &a->v.cached_sectors);
if (ret)
goto out;
ret = bch2_alloc_write(trans, &iter, &u, 0);
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
if (ret)
goto out;
out:
@ -1561,7 +1537,7 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
struct bch_fs *c = trans->c;
const struct bch_extent_ptr *ptr = &s.v->ptrs[idx];
struct btree_iter iter;
struct bkey_alloc_unpacked u;
struct bkey_i_alloc_v4 *a;
enum bch_data_type data_type = idx >= s.v->nr_blocks - s.v->nr_redundant
? BCH_DATA_parity : 0;
s64 sectors = data_type ? le16_to_cpu(s.v->sectors) : 0;
@ -1570,59 +1546,59 @@ static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
if (deleting)
sectors = -sectors;
ret = bch2_trans_start_alloc_update(trans, &iter, ptr, &u);
if (ret)
return ret;
a = bch2_trans_start_alloc_update(trans, &iter, PTR_BUCKET_POS(c, ptr));
if (IS_ERR(a))
return PTR_ERR(a);
ret = check_bucket_ref(c, s.s_c, ptr, sectors, data_type,
u.gen, u.data_type,
u.dirty_sectors, u.cached_sectors);
a->v.gen, a->v.data_type,
a->v.dirty_sectors, a->v.cached_sectors);
if (ret)
goto err;
if (!deleting) {
if (bch2_trans_inconsistent_on(u.stripe ||
u.stripe_redundancy, trans,
if (bch2_trans_inconsistent_on(a->v.stripe ||
a->v.stripe_redundancy, trans,
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)",
iter.pos.inode, iter.pos.offset, u.gen,
bch2_data_types[u.data_type],
u.dirty_sectors,
u.stripe, s.k->p.offset)) {
iter.pos.inode, iter.pos.offset, a->v.gen,
bch2_data_types[a->v.data_type],
a->v.dirty_sectors,
a->v.stripe, s.k->p.offset)) {
ret = -EIO;
goto err;
}
if (bch2_trans_inconsistent_on(data_type && u.dirty_sectors, trans,
if (bch2_trans_inconsistent_on(data_type && a->v.dirty_sectors, trans,
"bucket %llu:%llu gen %u data type %s dirty_sectors %u: data already in stripe bucket %llu",
iter.pos.inode, iter.pos.offset, u.gen,
bch2_data_types[u.data_type],
u.dirty_sectors,
iter.pos.inode, iter.pos.offset, a->v.gen,
bch2_data_types[a->v.data_type],
a->v.dirty_sectors,
s.k->p.offset)) {
ret = -EIO;
goto err;
}
u.stripe = s.k->p.offset;
u.stripe_redundancy = s.v->nr_redundant;
a->v.stripe = s.k->p.offset;
a->v.stripe_redundancy = s.v->nr_redundant;
} else {
if (bch2_trans_inconsistent_on(u.stripe != s.k->p.offset ||
u.stripe_redundancy != s.v->nr_redundant, trans,
if (bch2_trans_inconsistent_on(a->v.stripe != s.k->p.offset ||
a->v.stripe_redundancy != s.v->nr_redundant, trans,
"bucket %llu:%llu gen %u: not marked as stripe when deleting stripe %llu (got %u)",
iter.pos.inode, iter.pos.offset, u.gen,
s.k->p.offset, u.stripe)) {
iter.pos.inode, iter.pos.offset, a->v.gen,
s.k->p.offset, a->v.stripe)) {
ret = -EIO;
goto err;
}
u.stripe = 0;
u.stripe_redundancy = 0;
a->v.stripe = 0;
a->v.stripe_redundancy = 0;
}
u.dirty_sectors += sectors;
a->v.dirty_sectors += sectors;
if (data_type)
u.data_type = !deleting ? data_type : 0;
a->v.data_type = !deleting ? data_type : 0;
ret = bch2_alloc_write(trans, &iter, &u, 0);
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
if (ret)
goto err;
err:
@ -1805,11 +1781,6 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
le64_add_cpu(refcount, add);
if (!*refcount) {
n->k.type = KEY_TYPE_deleted;
set_bkey_val_u64s(&n->k, 0);
}
bch2_btree_iter_set_pos_to_extent_start(&iter);
ret = bch2_trans_update(trans, &iter, n, 0);
if (ret)
@ -1857,11 +1828,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_alloc_unpacked u;
struct bch_extent_ptr ptr = {
.dev = ca->dev_idx,
.offset = bucket_to_sector(ca, b),
};
struct bkey_i_alloc_v4 *a;
int ret = 0;
/*
@ -1870,26 +1837,26 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
if (b >= ca->mi.nbuckets)
return 0;
ret = bch2_trans_start_alloc_update(trans, &iter, &ptr, &u);
if (ret)
return ret;
a = bch2_trans_start_alloc_update(trans, &iter, POS(ca->dev_idx, b));
if (IS_ERR(a))
return PTR_ERR(a);
if (u.data_type && u.data_type != type) {
if (a->v.data_type && a->v.data_type != type) {
bch2_fsck_err(c, FSCK_CAN_IGNORE|FSCK_NEED_FSCK,
"bucket %llu:%llu gen %u different types of data in same bucket: %s, %s\n"
"while marking %s",
iter.pos.inode, iter.pos.offset, u.gen,
bch2_data_types[u.data_type],
iter.pos.inode, iter.pos.offset, a->v.gen,
bch2_data_types[a->v.data_type],
bch2_data_types[type],
bch2_data_types[type]);
ret = -EIO;
goto out;
}
u.data_type = type;
u.dirty_sectors = sectors;
a->v.data_type = type;
a->v.dirty_sectors = sectors;
ret = bch2_alloc_write(trans, &iter, &u, 0);
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
if (ret)
goto out;
out:

View File

@ -66,6 +66,14 @@ static inline size_t PTR_BUCKET_NR(const struct bch_dev *ca,
return sector_to_bucket(ca, ptr->offset);
}
static inline struct bpos PTR_BUCKET_POS(const struct bch_fs *c,
const struct bch_extent_ptr *ptr)
{
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
return POS(ptr->dev, PTR_BUCKET_NR(ca, ptr));
}
static inline struct bucket *PTR_GC_BUCKET(struct bch_dev *ca,
const struct bch_extent_ptr *ptr)
{

View File

@ -954,15 +954,21 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
switch (__extent_entry_type(entry)) {
case BCH_EXTENT_ENTRY_ptr:
ptr = entry_to_ptr(entry);
ca = c && ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
? bch_dev_bkey_exists(c, ptr->dev)
: NULL;
pr_buf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
(u64) ptr->offset, ptr->gen,
ptr->cached ? " cached" : "");
if (!ca) {
pr_buf(out, "ptr: %u:%llu gen %u%s", ptr->dev,
(u64) ptr->offset, ptr->gen,
ptr->cached ? " cached" : "");
} else {
u32 offset;
u64 b = sector_to_bucket_and_offset(ca, ptr->offset, &offset);
if (c) {
ca = ptr->dev < c->sb.nr_devices && c->devs[ptr->dev]
? bch_dev_bkey_exists(c, ptr->dev)
: NULL;
pr_buf(out, "ptr: %u:%llu:%u gen %u%s", ptr->dev,
b, offset, ptr->gen,
ptr->cached ? " cached" : "");
if (ca && ptr_stale(ca, ptr))
pr_buf(out, " stale");

View File

@ -126,7 +126,7 @@ static int bch2_check_lru_key(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c lru_k, k;
struct bkey_alloc_unpacked a;
struct bch_alloc_v4 a;
struct printbuf buf1 = PRINTBUF;
struct printbuf buf2 = PRINTBUF;
u64 idx;
@ -149,10 +149,10 @@ static int bch2_check_lru_key(struct btree_trans *trans,
if (ret)
goto err;
a = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &a);
if (fsck_err_on(bucket_state(a) != BUCKET_cached ||
a.read_time != lru_k.k->p.offset, c,
a.io_time[READ] != lru_k.k->p.offset, c,
"incorrect lru entry %s\n"
" for %s",
(bch2_bkey_val_to_text(&buf1, c, lru_k), buf1.buf),

View File

@ -117,7 +117,7 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_alloc_unpacked u;
struct bch_alloc_v4 a;
int ret;
bch2_trans_init(&trans, c, 0, 0);
@ -127,20 +127,20 @@ static int walk_buckets_to_copygc(struct bch_fs *c)
struct bch_dev *ca = bch_dev_bkey_exists(c, iter.pos.inode);
struct copygc_heap_entry e;
u = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &a);
if (u.data_type != BCH_DATA_user ||
u.dirty_sectors >= ca->mi.bucket_size ||
if (a.data_type != BCH_DATA_user ||
a.dirty_sectors >= ca->mi.bucket_size ||
bch2_bucket_is_open(c, iter.pos.inode, iter.pos.offset))
continue;
e = (struct copygc_heap_entry) {
.dev = iter.pos.inode,
.gen = u.gen,
.replicas = 1 + u.stripe_redundancy,
.fragmentation = (u64) u.dirty_sectors * (1ULL << 31)
.gen = a.gen,
.replicas = 1 + a.stripe_redundancy,
.fragmentation = (u64) a.dirty_sectors * (1ULL << 31)
/ ca->mi.bucket_size,
.sectors = u.dirty_sectors,
.sectors = a.dirty_sectors,
.offset = bucket_to_sector(ca, iter.pos.offset),
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
@ -168,7 +168,7 @@ static int check_copygc_was_done(struct bch_fs *c,
struct btree_trans trans;
struct btree_iter iter;
struct bkey_s_c k;
struct bkey_alloc_unpacked u;
struct bch_alloc_v4 a;
struct copygc_heap_entry *i;
int ret = 0;
@ -187,10 +187,10 @@ static int check_copygc_was_done(struct bch_fs *c,
if (ret)
break;
u = bch2_alloc_unpack(k);
bch2_alloc_to_v4(k, &a);
if (u.gen == i->gen && u.dirty_sectors) {
*sectors_not_moved += u.dirty_sectors;
if (a.gen == i->gen && a.dirty_sectors) {
*sectors_not_moved += a.dirty_sectors;
*buckets_not_moved += 1;
}
}

View File

@ -1029,8 +1029,8 @@ int bch2_fs_recovery(struct bch_fs *c)
bch_info(c, "filesystem version is prior to subvol_dirent - upgrading");
c->opts.version_upgrade = true;
c->opts.fsck = true;
} else if (c->sb.version < bcachefs_metadata_version_freespace) {
bch_info(c, "filesystem version is prior to freespace - upgrading");
} else if (c->sb.version < bcachefs_metadata_version_alloc_v4) {
bch_info(c, "filesystem version is prior to alloc_v4 - upgrading");
c->opts.version_upgrade = true;
}
}

View File

@ -98,6 +98,24 @@ bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r);
}
int bch2_trans_mark_reflink_v(struct btree_trans *trans,
struct bkey_s_c old, struct bkey_i *new,
unsigned flags)
{
if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
struct bkey_i_reflink_v *r = bkey_i_to_reflink_v(new);
if (!r->v.refcount) {
r->k.type = KEY_TYPE_deleted;
r->k.size = 0;
set_bkey_val_u64s(&r->k, 0);
return 0;
}
}
return bch2_trans_mark_extent(trans, old, new, flags);
}
/* indirect inline data */
const char *bch2_indirect_inline_data_invalid(const struct bch_fs *c,
@ -119,6 +137,24 @@ void bch2_indirect_inline_data_to_text(struct printbuf *out,
min(datalen, 32U), d.v->data);
}
int bch2_trans_mark_indirect_inline_data(struct btree_trans *trans,
struct bkey_s_c old, struct bkey_i *new,
unsigned flags)
{
if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
struct bkey_i_indirect_inline_data *r =
bkey_i_to_indirect_inline_data(new);
if (!r->v.refcount) {
r->k.type = KEY_TYPE_deleted;
r->k.size = 0;
set_bkey_val_u64s(&r->k, 0);
}
}
return 0;
}
static int bch2_make_extent_indirect(struct btree_trans *trans,
struct btree_iter *extent_iter,
struct bkey_i *orig)

View File

@ -18,12 +18,14 @@ bool bch2_reflink_p_merge(struct bch_fs *, struct bkey_s, struct bkey_s_c);
const char *bch2_reflink_v_invalid(const struct bch_fs *, struct bkey_s_c);
void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
struct bkey_s_c);
int bch2_trans_mark_reflink_v(struct btree_trans *, struct bkey_s_c,
struct bkey_i *, unsigned);
#define bch2_bkey_ops_reflink_v (struct bkey_ops) { \
.key_invalid = bch2_reflink_v_invalid, \
.val_to_text = bch2_reflink_v_to_text, \
.swab = bch2_ptr_swab, \
.trans_trigger = bch2_trans_mark_extent, \
.trans_trigger = bch2_trans_mark_reflink_v, \
.atomic_trigger = bch2_mark_extent, \
}
@ -31,10 +33,14 @@ const char *bch2_indirect_inline_data_invalid(const struct bch_fs *,
struct bkey_s_c);
void bch2_indirect_inline_data_to_text(struct printbuf *,
struct bch_fs *, struct bkey_s_c);
int bch2_trans_mark_indirect_inline_data(struct btree_trans *,
struct bkey_s_c, struct bkey_i *,
unsigned);
#define bch2_bkey_ops_indirect_inline_data (struct bkey_ops) { \
.key_invalid = bch2_indirect_inline_data_invalid, \
.val_to_text = bch2_indirect_inline_data_to_text, \
.trans_trigger = bch2_trans_mark_indirect_inline_data, \
}
static inline const __le64 *bkey_refcount_c(struct bkey_s_c k)

View File

@ -26,6 +26,12 @@ static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
return remainder;
}
static inline size_t sector_to_bucket_and_offset(const struct bch_dev *ca, sector_t s,
u32 *offset)
{
return div_u64_rem(s, ca->mi.bucket_size, offset);
}
static inline bool bch2_dev_is_online(struct bch_dev *ca)
{
return !percpu_ref_is_zero(&ca->io_ref);