mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
Update bcachefs sources to 171da96d76 bcachefs: Drop some anonymous structs, unions
This commit is contained in:
parent
9fc4b5d675
commit
b0c9ad15f4
@ -1 +1 @@
|
||||
c28937622fbd373f152df01f29efa2d79af99633
|
||||
171da96d76d03a12872c8c9e2d02602c3ddfcb5f
|
||||
|
4
Makefile
4
Makefile
@ -190,6 +190,10 @@ update-bcachefs-sources:
|
||||
git add libbcachefs/*.[ch]
|
||||
cp $(LINUX_DIR)/include/trace/events/bcachefs.h include/trace/events/
|
||||
git add include/trace/events/bcachefs.h
|
||||
cp $(LINUX_DIR)/include/linux/closure.h include/linux/
|
||||
git add include/linux/closure.h
|
||||
cp $(LINUX_DIR)/lib/closure.c linux/
|
||||
git add linux/closure.c
|
||||
cp $(LINUX_DIR)/include/linux/xxhash.h include/linux/
|
||||
git add include/linux/xxhash.h
|
||||
cp $(LINUX_DIR)/lib/xxhash.c linux/
|
||||
|
@ -190,7 +190,7 @@ static void print_node_ondisk(struct bch_fs *c, struct btree *b)
|
||||
le64_to_cpu(i->journal_seq));
|
||||
offset += sectors;
|
||||
|
||||
for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) {
|
||||
for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) {
|
||||
struct bkey u;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
|
@ -81,7 +81,7 @@ static bool entry_matches_transaction_filter(struct jset_entry *entry,
|
||||
entry->type == BCH_JSET_ENTRY_overwrite) {
|
||||
struct bkey_i *k;
|
||||
|
||||
vstruct_for_each(entry, k)
|
||||
jset_entry_for_each_key(entry, k)
|
||||
if (bkey_matches_filter(filter, entry, k))
|
||||
return true;
|
||||
}
|
||||
@ -117,7 +117,7 @@ static bool should_print_entry(struct jset_entry *entry, d_btree_id filter)
|
||||
entry->type != BCH_JSET_ENTRY_overwrite)
|
||||
return true;
|
||||
|
||||
vstruct_for_each(entry, k)
|
||||
jset_entry_for_each_key(entry, k)
|
||||
darray_for_each(filter, id)
|
||||
if (entry->btree_id == *id)
|
||||
return true;
|
||||
|
@ -311,7 +311,7 @@ static void link_data(struct bch_fs *c, struct bch_inode_unpacked *dst,
|
||||
|
||||
while (length) {
|
||||
struct bkey_i_extent *e;
|
||||
__BKEY_PADDED(k, BKEY_EXTENT_VAL_U64s_MAX) k;
|
||||
BKEY_PADDED_ONSTACK(k, BKEY_EXTENT_VAL_U64s_MAX) k;
|
||||
u64 b = sector_to_bucket(ca, physical);
|
||||
struct disk_reservation res;
|
||||
unsigned sectors;
|
||||
|
@ -3,7 +3,6 @@
|
||||
#define _LINUX_CLOSURE_H
|
||||
|
||||
#include <linux/llist.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/workqueue.h>
|
||||
@ -173,6 +172,11 @@ void __closure_wake_up(struct closure_waitlist *list);
|
||||
bool closure_wait(struct closure_waitlist *list, struct closure *cl);
|
||||
void __closure_sync(struct closure *cl);
|
||||
|
||||
static inline unsigned closure_nr_remaining(struct closure *cl)
|
||||
{
|
||||
return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
|
||||
}
|
||||
|
||||
/**
|
||||
* closure_sync - sleep until a closure a closure has nothing left to wait on
|
||||
*
|
||||
@ -181,7 +185,7 @@ void __closure_sync(struct closure *cl);
|
||||
*/
|
||||
static inline void closure_sync(struct closure *cl)
|
||||
{
|
||||
if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1)
|
||||
if (closure_nr_remaining(cl) != 1)
|
||||
__closure_sync(cl);
|
||||
}
|
||||
|
||||
|
@ -7,4 +7,6 @@
|
||||
#define freezable_schedule() schedule()
|
||||
#define freezable_schedule_timeout(_t) schedule_timeout(_t)
|
||||
|
||||
static inline void __refrigerator(bool f) {}
|
||||
|
||||
#endif /* __TOOLS_LINUX_FREEZER_H */
|
||||
|
@ -1123,4 +1123,7 @@ static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
|
||||
return dev < c->sb.nr_devices && c->devs[dev];
|
||||
}
|
||||
|
||||
#define BKEY_PADDED_ONSTACK(key, pad) \
|
||||
struct { struct bkey_i key; __u64 key ## _pad[pad]; }
|
||||
|
||||
#endif /* _BCACHEFS_H */
|
||||
|
@ -290,16 +290,8 @@ enum bch_bkey_fields {
|
||||
struct bkey_i {
|
||||
__u64 _data[0];
|
||||
|
||||
union {
|
||||
struct {
|
||||
/* Size of combined key and value, in u64s */
|
||||
__u8 u64s;
|
||||
};
|
||||
struct {
|
||||
struct bkey k;
|
||||
struct bch_val v;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
#define KEY(_inode, _offset, _size) \
|
||||
@ -318,7 +310,7 @@ static inline void bkey_init(struct bkey *k)
|
||||
#define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
|
||||
|
||||
#define __BKEY_PADDED(key, pad) \
|
||||
struct { struct bkey_i key; __u64 key ## _pad[pad]; }
|
||||
struct bkey_i key; __u64 key ## _pad[pad]
|
||||
|
||||
/*
|
||||
* - DELETED keys are used internally to mark keys that should be ignored but
|
||||
|
@ -34,7 +34,12 @@ struct bkey_s {
|
||||
};
|
||||
};
|
||||
|
||||
#define bkey_next(_k) vstruct_next(_k)
|
||||
#define bkey_p_next(_k) vstruct_next(_k)
|
||||
|
||||
static inline struct bkey_i *bkey_next(struct bkey_i *k)
|
||||
{
|
||||
return (struct bkey_i *) (k->_data + k->k.u64s);
|
||||
}
|
||||
|
||||
#define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s)
|
||||
|
||||
|
@ -46,7 +46,7 @@ static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
|
||||
|
||||
BUG_ON(!iter->used);
|
||||
|
||||
i->k = bkey_next(i->k);
|
||||
i->k = bkey_p_next(i->k);
|
||||
|
||||
BUG_ON(i->k > i->end);
|
||||
|
||||
@ -108,7 +108,7 @@ bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
|
||||
!should_drop_next_key(iter)) {
|
||||
bkey_copy(out, k);
|
||||
btree_keys_account_key_add(&nr, 0, out);
|
||||
out = bkey_next(out);
|
||||
out = bkey_p_next(out);
|
||||
}
|
||||
|
||||
sort_iter_advance(iter, key_sort_fix_overlapping_cmp);
|
||||
@ -147,7 +147,7 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
|
||||
out->needs_whiteout = false;
|
||||
|
||||
btree_keys_account_key_add(&nr, 0, out);
|
||||
out = bkey_next(out);
|
||||
out = bkey_p_next(out);
|
||||
}
|
||||
|
||||
dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
|
||||
@ -194,7 +194,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
|
||||
bkey_copy(out, in);
|
||||
}
|
||||
out->needs_whiteout |= needs_whiteout;
|
||||
out = bkey_next(out);
|
||||
out = bkey_p_next(out);
|
||||
}
|
||||
|
||||
return (u64 *) out - (u64 *) dst;
|
||||
|
@ -69,7 +69,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
|
||||
for (_k = i->start;
|
||||
_k < vstruct_last(i);
|
||||
_k = _n) {
|
||||
_n = bkey_next(_k);
|
||||
_n = bkey_p_next(_k);
|
||||
|
||||
k = bkey_disassemble(b, _k, &uk);
|
||||
|
||||
@ -542,7 +542,7 @@ start:
|
||||
rw_aux_tree(b, t)[j - 1].offset);
|
||||
}
|
||||
|
||||
k = bkey_next(k);
|
||||
k = bkey_p_next(k);
|
||||
BUG_ON(k >= btree_bkey_last(b, t));
|
||||
}
|
||||
}
|
||||
@ -733,7 +733,7 @@ retry:
|
||||
/* First we figure out where the first key in each cacheline is */
|
||||
eytzinger1_for_each(j, t->size - 1) {
|
||||
while (bkey_to_cacheline(b, t, k) < cacheline)
|
||||
prev = k, k = bkey_next(k);
|
||||
prev = k, k = bkey_p_next(k);
|
||||
|
||||
if (k >= btree_bkey_last(b, t)) {
|
||||
/* XXX: this path sucks */
|
||||
@ -750,7 +750,7 @@ retry:
|
||||
}
|
||||
|
||||
while (k != btree_bkey_last(b, t))
|
||||
prev = k, k = bkey_next(k);
|
||||
prev = k, k = bkey_p_next(k);
|
||||
|
||||
if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
|
||||
bkey_init(&min_key.k);
|
||||
@ -888,7 +888,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
|
||||
struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
|
||||
|
||||
while ((p = __bkey_prev(b, t, k)) && !ret) {
|
||||
for (i = p; i != k; i = bkey_next(i))
|
||||
for (i = p; i != k; i = bkey_p_next(i))
|
||||
if (i->type >= min_key_type)
|
||||
ret = i;
|
||||
|
||||
@ -899,10 +899,10 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
|
||||
BUG_ON(ret >= orig_k);
|
||||
|
||||
for (i = ret
|
||||
? bkey_next(ret)
|
||||
? bkey_p_next(ret)
|
||||
: btree_bkey_first(b, t);
|
||||
i != orig_k;
|
||||
i = bkey_next(i))
|
||||
i = bkey_p_next(i))
|
||||
BUG_ON(i->type >= min_key_type);
|
||||
}
|
||||
|
||||
@ -974,7 +974,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
|
||||
struct bkey_packed *k = start;
|
||||
|
||||
while (1) {
|
||||
k = bkey_next(k);
|
||||
k = bkey_p_next(k);
|
||||
if (k == end)
|
||||
break;
|
||||
|
||||
@ -1208,12 +1208,12 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b,
|
||||
while (m != btree_bkey_last(b, t) &&
|
||||
bkey_iter_cmp_p_or_unp(b, m,
|
||||
lossy_packed_search, search) < 0)
|
||||
m = bkey_next(m);
|
||||
m = bkey_p_next(m);
|
||||
|
||||
if (!packed_search)
|
||||
while (m != btree_bkey_last(b, t) &&
|
||||
bkey_iter_pos_cmp(b, m, search) < 0)
|
||||
m = bkey_next(m);
|
||||
m = bkey_p_next(m);
|
||||
|
||||
if (bch2_expensive_debug_checks) {
|
||||
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);
|
||||
|
@ -211,7 +211,7 @@ static inline size_t btree_aux_data_u64s(const struct btree *b)
|
||||
#define bset_tree_for_each_key(_b, _t, _k) \
|
||||
for (_k = btree_bkey_first(_b, _t); \
|
||||
_k != btree_bkey_last(_b, _t); \
|
||||
_k = bkey_next(_k))
|
||||
_k = bkey_p_next(_k))
|
||||
|
||||
static inline bool bset_has_ro_aux_tree(struct bset_tree *t)
|
||||
{
|
||||
|
@ -77,9 +77,9 @@ static void verify_no_dups(struct btree *b,
|
||||
if (start == end)
|
||||
return;
|
||||
|
||||
for (p = start, k = bkey_next(start);
|
||||
for (p = start, k = bkey_p_next(start);
|
||||
k != end;
|
||||
p = k, k = bkey_next(k)) {
|
||||
p = k, k = bkey_p_next(k)) {
|
||||
struct bkey l = bkey_unpack_key(b, p);
|
||||
struct bkey r = bkey_unpack_key(b, k);
|
||||
|
||||
@ -92,7 +92,7 @@ static void set_needs_whiteout(struct bset *i, int v)
|
||||
{
|
||||
struct bkey_packed *k;
|
||||
|
||||
for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
|
||||
for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
|
||||
k->needs_whiteout = v;
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
|
||||
|
||||
for (k = unwritten_whiteouts_start(c, b);
|
||||
k != unwritten_whiteouts_end(c, b);
|
||||
k = bkey_next(k))
|
||||
k = bkey_p_next(k))
|
||||
*--ptrs = k;
|
||||
|
||||
sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
|
||||
@ -184,7 +184,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
|
||||
|
||||
while (ptrs != ptrs_end) {
|
||||
bkey_copy(k, *ptrs);
|
||||
k = bkey_next(k);
|
||||
k = bkey_p_next(k);
|
||||
ptrs++;
|
||||
}
|
||||
|
||||
@ -256,11 +256,11 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
|
||||
out = i->start;
|
||||
|
||||
for (k = start; k != end; k = n) {
|
||||
n = bkey_next(k);
|
||||
n = bkey_p_next(k);
|
||||
|
||||
if (!bkey_deleted(k)) {
|
||||
bkey_copy(out, k);
|
||||
out = bkey_next(out);
|
||||
out = bkey_p_next(out);
|
||||
} else {
|
||||
BUG_ON(k->needs_whiteout);
|
||||
}
|
||||
@ -652,7 +652,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
|
||||
struct bset *i = bset(b, t);
|
||||
struct bkey_packed *k;
|
||||
|
||||
for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
|
||||
for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
|
||||
if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
|
||||
break;
|
||||
|
||||
@ -665,7 +665,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
|
||||
set_btree_bset_end(b, t);
|
||||
}
|
||||
|
||||
for (k = i->start; k != vstruct_last(i); k = bkey_next(k))
|
||||
for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
|
||||
if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
|
||||
break;
|
||||
|
||||
@ -843,7 +843,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
struct bkey_s u;
|
||||
struct bkey tmp;
|
||||
|
||||
if (btree_err_on(bkey_next(k) > vstruct_last(i),
|
||||
if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
|
||||
BTREE_ERR_FIXABLE, c, NULL, b, i,
|
||||
"key extends past end of bset")) {
|
||||
i->u64s = cpu_to_le16((u64 *) k - i->_data);
|
||||
@ -854,7 +854,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
BTREE_ERR_FIXABLE, c, NULL, b, i,
|
||||
"invalid bkey format %u", k->format)) {
|
||||
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
|
||||
memmove_u64s_down(k, bkey_next(k),
|
||||
memmove_u64s_down(k, bkey_p_next(k),
|
||||
(u64 *) vstruct_end(i) - (u64 *) k);
|
||||
continue;
|
||||
}
|
||||
@ -878,7 +878,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
|
||||
|
||||
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
|
||||
memmove_u64s_down(k, bkey_next(k),
|
||||
memmove_u64s_down(k, bkey_p_next(k),
|
||||
(u64 *) vstruct_end(i) - (u64 *) k);
|
||||
continue;
|
||||
}
|
||||
@ -901,14 +901,14 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
|
||||
if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) {
|
||||
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
|
||||
memmove_u64s_down(k, bkey_next(k),
|
||||
memmove_u64s_down(k, bkey_p_next(k),
|
||||
(u64 *) vstruct_end(i) - (u64 *) k);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
prev = k;
|
||||
k = bkey_next(k);
|
||||
k = bkey_p_next(k);
|
||||
}
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
@ -1139,7 +1139,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
btree_keys_account_key_drop(&b->nr, 0, k);
|
||||
|
||||
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
|
||||
memmove_u64s_down(k, bkey_next(k),
|
||||
memmove_u64s_down(k, bkey_p_next(k),
|
||||
(u64 *) vstruct_end(i) - (u64 *) k);
|
||||
set_btree_bset_end(b, b->set);
|
||||
continue;
|
||||
@ -1151,7 +1151,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
bp.v->mem_ptr = 0;
|
||||
}
|
||||
|
||||
k = bkey_next(k);
|
||||
k = bkey_p_next(k);
|
||||
}
|
||||
|
||||
bch2_bset_build_aux_tree(b, b->set, false);
|
||||
@ -1847,7 +1847,7 @@ static void btree_write_submit(struct work_struct *work)
|
||||
{
|
||||
struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
|
||||
struct bch_extent_ptr *ptr;
|
||||
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
|
||||
bkey_copy(&tmp.k, &wbio->key);
|
||||
|
||||
|
@ -777,14 +777,14 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
|
||||
struct bkey_cached *ck = (void *) path->l[0].b;
|
||||
bool kick_reclaim = false;
|
||||
|
||||
BUG_ON(insert->u64s > ck->u64s);
|
||||
BUG_ON(insert->k.u64s > ck->u64s);
|
||||
|
||||
if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
|
||||
int difference;
|
||||
|
||||
BUG_ON(jset_u64s(insert->u64s) > trans->journal_preres.u64s);
|
||||
BUG_ON(jset_u64s(insert->k.u64s) > trans->journal_preres.u64s);
|
||||
|
||||
difference = jset_u64s(insert->u64s) - ck->res.u64s;
|
||||
difference = jset_u64s(insert->k.u64s) - ck->res.u64s;
|
||||
if (difference > 0) {
|
||||
trans->journal_preres.u64s -= difference;
|
||||
ck->res.u64s += difference;
|
||||
|
@ -242,7 +242,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
struct write_point *wp;
|
||||
struct btree *b;
|
||||
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
struct open_buckets ob = { .nr = 0 };
|
||||
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
|
||||
unsigned nr_reserve;
|
||||
@ -1412,7 +1412,7 @@ static void __btree_split_node(struct btree_update *as,
|
||||
out[i]->needs_whiteout = false;
|
||||
|
||||
btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
|
||||
out[i] = bkey_next(out[i]);
|
||||
out[i] = bkey_p_next(out[i]);
|
||||
}
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
@ -2444,7 +2444,7 @@ bch2_btree_roots_to_journal_entries(struct bch_fs *c,
|
||||
BCH_JSET_ENTRY_btree_root,
|
||||
i, c->btree_roots[i].level,
|
||||
&c->btree_roots[i].key,
|
||||
c->btree_roots[i].key.u64s);
|
||||
c->btree_roots[i].key.k.u64s);
|
||||
end = vstruct_next(end);
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ next:
|
||||
}
|
||||
continue;
|
||||
nomatch:
|
||||
if (m->ctxt) {
|
||||
if (m->ctxt && m->ctxt->stats) {
|
||||
BUG_ON(k.k->p.offset <= iter.pos.offset);
|
||||
atomic64_inc(&m->ctxt->stats->keys_raced);
|
||||
atomic64_add(k.k->p.offset - iter.pos.offset,
|
||||
|
@ -153,7 +153,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
|
||||
|
||||
BUG_ON(b->nsets != 1);
|
||||
|
||||
for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_next(k))
|
||||
for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k))
|
||||
if (k->type == KEY_TYPE_btree_ptr_v2) {
|
||||
struct bch_btree_ptr_v2 *v = (void *) bkeyp_val(&b->format, k);
|
||||
v->mem_ptr = 0;
|
||||
|
@ -869,12 +869,10 @@ static int ec_stripe_key_update(struct btree_trans *trans,
|
||||
for (i = 0; i < new->v.nr_blocks; i++) {
|
||||
unsigned v = stripe_blockcount_get(old, i);
|
||||
|
||||
if (!v)
|
||||
continue;
|
||||
|
||||
BUG_ON(old->ptrs[i].dev != new->v.ptrs[i].dev ||
|
||||
BUG_ON(v &&
|
||||
(old->ptrs[i].dev != new->v.ptrs[i].dev ||
|
||||
old->ptrs[i].gen != new->v.ptrs[i].gen ||
|
||||
old->ptrs[i].offset != new->v.ptrs[i].offset);
|
||||
old->ptrs[i].offset != new->v.ptrs[i].offset));
|
||||
|
||||
stripe_blockcount_set(&new->v, i, v);
|
||||
}
|
||||
@ -1594,8 +1592,6 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
|
||||
bkey_copy(&h->s->new_stripe.key.k_i, &h->s->existing_stripe.key.k_i);
|
||||
h->s->have_existing_stripe = true;
|
||||
|
||||
pr_info("reused %llu", h->s->idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1687,9 +1683,9 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
if (h->s->allocated)
|
||||
goto allocated;
|
||||
|
||||
if (h->s->idx)
|
||||
if (h->s->have_existing_stripe)
|
||||
goto alloc_existing;
|
||||
#if 0
|
||||
|
||||
/* First, try to allocate a full stripe: */
|
||||
ret = new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?:
|
||||
__bch2_ec_stripe_head_reserve(trans, h);
|
||||
@ -1699,24 +1695,17 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
bch2_err_matches(ret, ENOMEM))
|
||||
goto err;
|
||||
|
||||
if (ret == -BCH_ERR_open_buckets_empty) {
|
||||
/* don't want to reuse in this case */
|
||||
}
|
||||
#endif
|
||||
/*
|
||||
* Not enough buckets available for a full stripe: we must reuse an
|
||||
* existing stripe:
|
||||
*/
|
||||
while (1) {
|
||||
ret = __bch2_ec_stripe_head_reuse(trans, h);
|
||||
if (ret)
|
||||
ret = __bch2_ec_stripe_head_reserve(trans, h);
|
||||
if (!ret)
|
||||
break;
|
||||
pr_info("err %s", bch2_err_str(ret));
|
||||
if (ret == -BCH_ERR_ENOSPC_stripe_reuse && cl)
|
||||
ret = -BCH_ERR_stripe_alloc_blocked;
|
||||
if (waiting || !cl)
|
||||
if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
|
||||
goto err;
|
||||
|
||||
/* XXX freelist_wait? */
|
||||
|
@ -633,7 +633,7 @@ static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr
|
||||
memcpy((void *) &k->v + bkey_val_bytes(&k->k),
|
||||
&ptr,
|
||||
sizeof(ptr));
|
||||
k->u64s++;
|
||||
k->k.u64s++;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
|
@ -734,7 +734,7 @@ static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
|
||||
}
|
||||
|
||||
if (dst != src)
|
||||
memmove_u64s_down(dst, src, src->u64s);
|
||||
memmove_u64s_down(dst, src, src->k.u64s);
|
||||
dst = bkey_next(dst);
|
||||
}
|
||||
|
||||
|
@ -357,7 +357,7 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
|
||||
struct bkey_i *k;
|
||||
bool first = true;
|
||||
|
||||
vstruct_for_each(entry, k) {
|
||||
jset_entry_for_each_key(entry, k) {
|
||||
if (!first) {
|
||||
prt_newline(out);
|
||||
prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);
|
||||
|
@ -40,9 +40,14 @@ static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
|
||||
(entry = __jset_entry_type_next(jset, entry, type)); \
|
||||
entry = vstruct_next(entry))
|
||||
|
||||
#define for_each_jset_key(k, _n, entry, jset) \
|
||||
for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys) \
|
||||
vstruct_for_each_safe(entry, k, _n)
|
||||
#define jset_entry_for_each_key(_e, _k) \
|
||||
for (_k = (_e)->start; \
|
||||
_k < vstruct_last(_e); \
|
||||
_k = bkey_next(_k))
|
||||
|
||||
#define for_each_jset_key(k, entry, jset) \
|
||||
for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys)\
|
||||
jset_entry_for_each_key(entry, k)
|
||||
|
||||
int bch2_journal_entry_validate(struct bch_fs *, struct jset *,
|
||||
struct jset_entry *, unsigned, int, int);
|
||||
|
@ -304,12 +304,6 @@ static int bch2_move_extent(struct btree_trans *trans,
|
||||
if (ret && ret != -BCH_ERR_unwritten_extent_update)
|
||||
goto err_free_pages;
|
||||
|
||||
io->write.ctxt = ctxt;
|
||||
io->write.op.end_io = move_write_done;
|
||||
|
||||
atomic64_inc(&ctxt->stats->keys_moved);
|
||||
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
|
||||
|
||||
if (ret == -BCH_ERR_unwritten_extent_update) {
|
||||
bch2_update_unwritten_extent(trans, &io->write);
|
||||
move_free(io);
|
||||
@ -318,6 +312,14 @@ static int bch2_move_extent(struct btree_trans *trans,
|
||||
|
||||
BUG_ON(ret);
|
||||
|
||||
io->write.ctxt = ctxt;
|
||||
io->write.op.end_io = move_write_done;
|
||||
|
||||
if (ctxt->stats) {
|
||||
atomic64_inc(&ctxt->stats->keys_moved);
|
||||
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
|
||||
}
|
||||
|
||||
this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
|
||||
this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
|
||||
trace_move_extent_read(k.k);
|
||||
@ -469,9 +471,11 @@ static int __bch2_move_data(struct moving_context *ctxt,
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
if (ctxt->stats) {
|
||||
ctxt->stats->data_type = BCH_DATA_user;
|
||||
ctxt->stats->btree_id = btree_id;
|
||||
ctxt->stats->pos = start;
|
||||
}
|
||||
|
||||
bch2_trans_iter_init(&trans, &iter, btree_id, start,
|
||||
BTREE_ITER_PREFETCH|
|
||||
@ -496,6 +500,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
|
||||
if (bkey_ge(bkey_start_pos(k.k), end))
|
||||
break;
|
||||
|
||||
if (ctxt->stats)
|
||||
ctxt->stats->pos = iter.pos;
|
||||
|
||||
if (!bkey_extent_is_direct_data(k.k))
|
||||
@ -536,6 +541,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
|
||||
if (ctxt->rate)
|
||||
bch2_ratelimit_increment(ctxt->rate, k.k->size);
|
||||
next:
|
||||
if (ctxt->stats)
|
||||
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
|
||||
next_nondata:
|
||||
bch2_btree_iter_advance(&iter);
|
||||
@ -585,7 +591,7 @@ int bch2_move_data(struct bch_fs *c,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static noinline void verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen)
|
||||
void bch2_verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
@ -620,6 +626,9 @@ again:
|
||||
failed_to_evacuate:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
|
||||
return;
|
||||
|
||||
prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket "));
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
|
||||
@ -760,6 +769,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
|
||||
|
||||
if (ctxt->rate)
|
||||
bch2_ratelimit_increment(ctxt->rate, k.k->size);
|
||||
if (ctxt->stats)
|
||||
atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
|
||||
} else {
|
||||
struct btree *b;
|
||||
@ -787,9 +797,11 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
|
||||
if (ctxt->rate)
|
||||
bch2_ratelimit_increment(ctxt->rate,
|
||||
c->opts.btree_node_size >> 9);
|
||||
if (ctxt->stats) {
|
||||
atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
|
||||
atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
|
||||
}
|
||||
}
|
||||
next:
|
||||
bp_offset++;
|
||||
}
|
||||
@ -801,7 +813,7 @@ next:
|
||||
move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads));
|
||||
closure_sync(&ctxt->cl);
|
||||
if (!ctxt->write_error)
|
||||
verify_bucket_evacuated(trans, bucket, gen);
|
||||
bch2_verify_bucket_evacuated(trans, bucket, gen);
|
||||
}
|
||||
err:
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
|
@ -30,6 +30,8 @@ struct moving_context {
|
||||
wait_queue_head_t wait;
|
||||
};
|
||||
|
||||
void bch2_verify_bucket_evacuated(struct btree_trans *, struct bpos, int);
|
||||
|
||||
#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
|
||||
do { \
|
||||
bool cond_finished = false; \
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "super-io.h"
|
||||
|
||||
#include <trace/events/bcachefs.h>
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/math64.h>
|
||||
@ -71,62 +72,147 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_copygc_next_bucket(struct btree_trans *trans,
|
||||
struct bpos *bucket, u8 *gen, struct bpos *pos)
|
||||
struct copygc_bucket_in_flight {
|
||||
struct bpos bucket;
|
||||
u8 gen;
|
||||
struct moving_context ctxt;
|
||||
};
|
||||
|
||||
typedef FIFO(struct copygc_bucket_in_flight) copygc_buckets_in_flight;
|
||||
|
||||
struct copygc_bucket {
|
||||
struct bpos bucket;
|
||||
u8 gen;
|
||||
};
|
||||
|
||||
typedef DARRAY(struct copygc_bucket) copygc_buckets;
|
||||
|
||||
static int copygc_bucket_cmp(const void *_l, const void *_r)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
const struct copygc_bucket *l = _l;
|
||||
const struct copygc_bucket *r = _r;
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
|
||||
bpos_max(*pos, lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0)),
|
||||
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
|
||||
0, k, ({
|
||||
*bucket = u64_to_bucket(k.k->p.offset);
|
||||
|
||||
bch2_bucket_is_movable(trans, *bucket, lru_pos_time(k.k->p), gen);
|
||||
}));
|
||||
|
||||
*pos = iter.pos;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return ret ? 0 : -ENOENT;
|
||||
return bpos_cmp(l->bucket, r->bucket) ?: cmp_int(l->gen, r->gen);
|
||||
}
|
||||
|
||||
static int bch2_copygc(struct bch_fs *c)
|
||||
static bool bucket_in_flight(copygc_buckets *buckets_sorted, struct copygc_bucket b)
|
||||
{
|
||||
struct bch_move_stats move_stats;
|
||||
struct btree_trans trans;
|
||||
struct moving_context ctxt;
|
||||
return bsearch(&b,
|
||||
buckets_sorted->data,
|
||||
buckets_sorted->nr,
|
||||
sizeof(buckets_sorted->data[0]),
|
||||
copygc_bucket_cmp) != NULL;
|
||||
}
|
||||
|
||||
static void copygc_buckets_wait(struct btree_trans *trans,
|
||||
copygc_buckets_in_flight *buckets_in_flight,
|
||||
size_t nr, bool verify_evacuated)
|
||||
{
|
||||
while (!fifo_empty(buckets_in_flight)) {
|
||||
struct copygc_bucket_in_flight *i = &fifo_peek_front(buckets_in_flight);
|
||||
|
||||
if (fifo_used(buckets_in_flight) <= nr &&
|
||||
closure_nr_remaining(&i->ctxt.cl) != 1)
|
||||
break;
|
||||
|
||||
/*
|
||||
* moving_ctxt_exit calls bch2_write as it flushes pending
|
||||
* reads, which inits another btree_trans; this one must be
|
||||
* unlocked:
|
||||
*/
|
||||
bch2_trans_unlock(trans);
|
||||
bch2_moving_ctxt_exit(&i->ctxt);
|
||||
if (verify_evacuated)
|
||||
bch2_verify_bucket_evacuated(trans, i->bucket, i->gen);
|
||||
buckets_in_flight->front++;
|
||||
}
|
||||
}
|
||||
|
||||
static int bch2_copygc_get_buckets(struct btree_trans *trans,
|
||||
copygc_buckets_in_flight *buckets_in_flight,
|
||||
copygc_buckets *buckets)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
copygc_buckets buckets_sorted = { 0 };
|
||||
struct copygc_bucket_in_flight *i;
|
||||
struct bkey_s_c k;
|
||||
size_t fifo_iter;
|
||||
int ret;
|
||||
|
||||
copygc_buckets_wait(trans, buckets_in_flight, buckets_in_flight->size / 2, true);
|
||||
|
||||
fifo_for_each_entry_ptr(i, buckets_in_flight, fifo_iter) {
|
||||
ret = darray_push(&buckets_sorted, ((struct copygc_bucket) {i->bucket, i->gen}));
|
||||
if (ret) {
|
||||
bch_err(trans->c, "error allocating copygc_buckets_sorted");
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
sort(buckets_sorted.data,
|
||||
buckets_sorted.nr,
|
||||
sizeof(buckets_sorted.data[0]),
|
||||
copygc_bucket_cmp,
|
||||
NULL);
|
||||
|
||||
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
|
||||
lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
|
||||
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
|
||||
0, k, ({
|
||||
struct copygc_bucket b = { .bucket = u64_to_bucket(k.k->p.offset) };
|
||||
int ret = 0;
|
||||
|
||||
if (!bucket_in_flight(&buckets_sorted, b) &&
|
||||
bch2_bucket_is_movable(trans, b.bucket, lru_pos_time(k.k->p), &b.gen))
|
||||
ret = darray_push(buckets, b) ?: buckets->nr >= fifo_free(buckets_in_flight);
|
||||
|
||||
ret;
|
||||
}));
|
||||
err:
|
||||
darray_exit(&buckets_sorted);
|
||||
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
static int bch2_copygc(struct btree_trans *trans,
|
||||
copygc_buckets_in_flight *buckets_in_flight,
|
||||
struct bch_move_stats *stats)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct data_update_opts data_opts = {
|
||||
.btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
|
||||
};
|
||||
struct bpos bucket;
|
||||
struct bpos pos;
|
||||
u8 gen = 0;
|
||||
unsigned nr_evacuated;
|
||||
copygc_buckets buckets = { 0 };
|
||||
struct copygc_bucket_in_flight *f;
|
||||
struct copygc_bucket *i;
|
||||
int ret = 0;
|
||||
|
||||
bch2_move_stats_init(&move_stats, "copygc");
|
||||
bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats,
|
||||
ret = bch2_btree_write_buffer_flush(trans);
|
||||
if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
|
||||
__func__, bch2_err_str(ret)))
|
||||
return ret;
|
||||
|
||||
ret = bch2_copygc_get_buckets(trans, buckets_in_flight, &buckets);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
darray_for_each(buckets, i) {
|
||||
if (unlikely(freezing(current)))
|
||||
break;
|
||||
|
||||
f = fifo_push_ref(buckets_in_flight);
|
||||
f->bucket = i->bucket;
|
||||
f->gen = i->gen;
|
||||
bch2_moving_ctxt_init(&f->ctxt, c, NULL, NULL, //stats,
|
||||
writepoint_ptr(&c->copygc_write_point),
|
||||
false);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
ret = bch2_btree_write_buffer_flush(&trans);
|
||||
BUG_ON(ret);
|
||||
|
||||
for (nr_evacuated = 0, pos = POS_MIN;
|
||||
nr_evacuated < 32 && !ret;
|
||||
nr_evacuated++, pos = bpos_nosnap_successor(pos)) {
|
||||
ret = bch2_copygc_next_bucket(&trans, &bucket, &gen, &pos) ?:
|
||||
__bch2_evacuate_bucket(&trans, &ctxt, bucket, gen, data_opts);
|
||||
if (bkey_eq(pos, POS_MAX))
|
||||
break;
|
||||
ret = __bch2_evacuate_bucket(trans, &f->ctxt, f->bucket, f->gen, data_opts);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
bch2_moving_ctxt_exit(&ctxt);
|
||||
err:
|
||||
darray_exit(&buckets);
|
||||
|
||||
/* no entries in LRU btree found, or got to end: */
|
||||
if (ret == -ENOENT)
|
||||
@ -135,7 +221,7 @@ static int bch2_copygc(struct bch_fs *c)
|
||||
if (ret < 0 && !bch2_err_matches(ret, EROFS))
|
||||
bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret));
|
||||
|
||||
trace_and_count(c, copygc, c, atomic64_read(&move_stats.sectors_moved), 0, 0, 0);
|
||||
trace_and_count(c, copygc, c, atomic64_read(&stats->sectors_moved), 0, 0, 0);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -162,7 +248,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
|
||||
for_each_rw_member(ca, c, dev_idx) {
|
||||
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
|
||||
|
||||
fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) *
|
||||
fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) *
|
||||
ca->mi.bucket_size) >> 1);
|
||||
fragmented = usage.d[BCH_DATA_user].fragmented;
|
||||
|
||||
@ -187,17 +273,36 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
static int bch2_copygc_thread(void *arg)
|
||||
{
|
||||
struct bch_fs *c = arg;
|
||||
struct btree_trans trans;
|
||||
struct bch_move_stats move_stats;
|
||||
struct io_clock *clock = &c->io_clock[WRITE];
|
||||
copygc_buckets_in_flight copygc_buckets;
|
||||
u64 last, wait;
|
||||
int ret = 0;
|
||||
|
||||
if (!init_fifo(©gc_buckets, 1 << 14, GFP_KERNEL)) {
|
||||
bch_err(c, "error allocating copygc buckets in flight");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
set_freezable();
|
||||
bch2_move_stats_init(&move_stats, "copygc");
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
while (!ret && !kthread_should_stop()) {
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
try_to_freeze();
|
||||
cond_resched();
|
||||
|
||||
if (kthread_wait_freezable(c->copy_gc_enabled))
|
||||
break;
|
||||
kthread_wait(freezing(current) || c->copy_gc_enabled);
|
||||
|
||||
if (unlikely(freezing(current))) {
|
||||
copygc_buckets_wait(&trans, ©gc_buckets, 0, true);
|
||||
bch2_trans_unlock(&trans);
|
||||
__refrigerator(false);
|
||||
continue;
|
||||
}
|
||||
|
||||
last = atomic64_read(&clock->now);
|
||||
wait = bch2_copygc_wait_amount(c);
|
||||
@ -213,12 +318,16 @@ static int bch2_copygc_thread(void *arg)
|
||||
c->copygc_wait = 0;
|
||||
|
||||
c->copygc_running = true;
|
||||
ret = bch2_copygc(c);
|
||||
ret = bch2_copygc(&trans, ©gc_buckets, &move_stats);
|
||||
c->copygc_running = false;
|
||||
|
||||
wake_up(&c->copygc_running_wq);
|
||||
}
|
||||
|
||||
copygc_buckets_wait(&trans, ©gc_buckets, 0, !ret);
|
||||
free_fifo(©gc_buckets);
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -481,7 +481,7 @@ static int journal_keys_sort(struct bch_fs *c)
|
||||
struct genradix_iter iter;
|
||||
struct journal_replay *i, **_i;
|
||||
struct jset_entry *entry;
|
||||
struct bkey_i *k, *_n;
|
||||
struct bkey_i *k;
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
struct journal_key *src, *dst;
|
||||
size_t nr_keys = 0;
|
||||
@ -492,7 +492,7 @@ static int journal_keys_sort(struct bch_fs *c)
|
||||
if (!i || i->ignore)
|
||||
continue;
|
||||
|
||||
for_each_jset_key(k, _n, entry, &i->j)
|
||||
for_each_jset_key(k, entry, &i->j)
|
||||
nr_keys++;
|
||||
}
|
||||
|
||||
@ -511,7 +511,7 @@ static int journal_keys_sort(struct bch_fs *c)
|
||||
if (!i || i->ignore)
|
||||
continue;
|
||||
|
||||
for_each_jset_key(k, _n, entry, &i->j)
|
||||
for_each_jset_key(k, entry, &i->j)
|
||||
keys->d[keys->nr++] = (struct journal_key) {
|
||||
.btree_id = entry->btree_id,
|
||||
.level = entry->level,
|
||||
@ -871,7 +871,7 @@ static int verify_superblock_clean(struct bch_fs *c,
|
||||
IS_ERR(k1) ||
|
||||
IS_ERR(k2) ||
|
||||
k1->k.u64s != k2->k.u64s ||
|
||||
memcmp(k1, k2, bkey_bytes(k1)) ||
|
||||
memcmp(k1, k2, bkey_bytes(&k1->k)) ||
|
||||
l1 != l2, c,
|
||||
"superblock btree root %u doesn't match journal after clean shutdown\n"
|
||||
"sb: l=%u %s\n"
|
||||
|
@ -543,9 +543,10 @@ do { \
|
||||
submit_bio(bio); \
|
||||
} while (0)
|
||||
|
||||
#define kthread_wait_freezable(cond) \
|
||||
#define kthread_wait(cond) \
|
||||
({ \
|
||||
int _ret = 0; \
|
||||
\
|
||||
while (1) { \
|
||||
set_current_state(TASK_INTERRUPTIBLE); \
|
||||
if (kthread_should_stop()) { \
|
||||
@ -557,7 +558,27 @@ do { \
|
||||
break; \
|
||||
\
|
||||
schedule(); \
|
||||
try_to_freeze(); \
|
||||
} \
|
||||
set_current_state(TASK_RUNNING); \
|
||||
_ret; \
|
||||
})
|
||||
|
||||
#define kthread_wait_freezable(cond) \
|
||||
({ \
|
||||
int _ret = 0; \
|
||||
bool frozen; \
|
||||
\
|
||||
while (1) { \
|
||||
set_current_state(TASK_INTERRUPTIBLE); \
|
||||
if (kthread_freezable_should_stop(&frozen)) { \
|
||||
_ret = -1; \
|
||||
break; \
|
||||
} \
|
||||
\
|
||||
if (cond) \
|
||||
break; \
|
||||
\
|
||||
schedule(); \
|
||||
} \
|
||||
set_current_state(TASK_RUNNING); \
|
||||
_ret; \
|
||||
|
@ -9,6 +9,7 @@
|
||||
#include <linux/closure.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/sched/debug.h>
|
||||
|
||||
|
@ -99,6 +99,11 @@ bool kthread_should_stop(void)
|
||||
return test_bit(KTHREAD_SHOULD_STOP, ¤t->kthread_flags);
|
||||
}
|
||||
|
||||
bool kthread_freezable_should_stop(bool *was_frozen)
|
||||
{
|
||||
return test_bit(KTHREAD_SHOULD_STOP, ¤t->kthread_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* kthread_stop - stop a thread created by kthread_create().
|
||||
* @k: thread created by kthread_create().
|
||||
|
Loading…
Reference in New Issue
Block a user