Update bcachefs sources to 171da96d76 bcachefs: Drop some anonymous structs, unions

This commit is contained in:
Kent Overstreet 2023-03-04 22:47:04 -05:00
parent 9fc4b5d675
commit b0c9ad15f4
30 changed files with 306 additions and 152 deletions

View File

@ -1 +1 @@
c28937622fbd373f152df01f29efa2d79af99633 171da96d76d03a12872c8c9e2d02602c3ddfcb5f

View File

@ -190,6 +190,10 @@ update-bcachefs-sources:
git add libbcachefs/*.[ch] git add libbcachefs/*.[ch]
cp $(LINUX_DIR)/include/trace/events/bcachefs.h include/trace/events/ cp $(LINUX_DIR)/include/trace/events/bcachefs.h include/trace/events/
git add include/trace/events/bcachefs.h git add include/trace/events/bcachefs.h
cp $(LINUX_DIR)/include/linux/closure.h include/linux/
git add include/linux/closure.h
cp $(LINUX_DIR)/lib/closure.c linux/
git add linux/closure.c
cp $(LINUX_DIR)/include/linux/xxhash.h include/linux/ cp $(LINUX_DIR)/include/linux/xxhash.h include/linux/
git add include/linux/xxhash.h git add include/linux/xxhash.h
cp $(LINUX_DIR)/lib/xxhash.c linux/ cp $(LINUX_DIR)/lib/xxhash.c linux/

View File

@ -190,7 +190,7 @@ static void print_node_ondisk(struct bch_fs *c, struct btree *b)
le64_to_cpu(i->journal_seq)); le64_to_cpu(i->journal_seq));
offset += sectors; offset += sectors;
for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) { for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k)) {
struct bkey u; struct bkey u;
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;

View File

@ -81,7 +81,7 @@ static bool entry_matches_transaction_filter(struct jset_entry *entry,
entry->type == BCH_JSET_ENTRY_overwrite) { entry->type == BCH_JSET_ENTRY_overwrite) {
struct bkey_i *k; struct bkey_i *k;
vstruct_for_each(entry, k) jset_entry_for_each_key(entry, k)
if (bkey_matches_filter(filter, entry, k)) if (bkey_matches_filter(filter, entry, k))
return true; return true;
} }
@ -117,7 +117,7 @@ static bool should_print_entry(struct jset_entry *entry, d_btree_id filter)
entry->type != BCH_JSET_ENTRY_overwrite) entry->type != BCH_JSET_ENTRY_overwrite)
return true; return true;
vstruct_for_each(entry, k) jset_entry_for_each_key(entry, k)
darray_for_each(filter, id) darray_for_each(filter, id)
if (entry->btree_id == *id) if (entry->btree_id == *id)
return true; return true;

View File

@ -311,7 +311,7 @@ static void link_data(struct bch_fs *c, struct bch_inode_unpacked *dst,
while (length) { while (length) {
struct bkey_i_extent *e; struct bkey_i_extent *e;
__BKEY_PADDED(k, BKEY_EXTENT_VAL_U64s_MAX) k; BKEY_PADDED_ONSTACK(k, BKEY_EXTENT_VAL_U64s_MAX) k;
u64 b = sector_to_bucket(ca, physical); u64 b = sector_to_bucket(ca, physical);
struct disk_reservation res; struct disk_reservation res;
unsigned sectors; unsigned sectors;

View File

@ -3,7 +3,6 @@
#define _LINUX_CLOSURE_H #define _LINUX_CLOSURE_H
#include <linux/llist.h> #include <linux/llist.h>
#include <linux/rcupdate.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
@ -173,6 +172,11 @@ void __closure_wake_up(struct closure_waitlist *list);
bool closure_wait(struct closure_waitlist *list, struct closure *cl); bool closure_wait(struct closure_waitlist *list, struct closure *cl);
void __closure_sync(struct closure *cl); void __closure_sync(struct closure *cl);
static inline unsigned closure_nr_remaining(struct closure *cl)
{
return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
}
/** /**
* closure_sync - sleep until a closure a closure has nothing left to wait on * closure_sync - sleep until a closure a closure has nothing left to wait on
* *
@ -181,7 +185,7 @@ void __closure_sync(struct closure *cl);
*/ */
static inline void closure_sync(struct closure *cl) static inline void closure_sync(struct closure *cl)
{ {
if ((atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK) != 1) if (closure_nr_remaining(cl) != 1)
__closure_sync(cl); __closure_sync(cl);
} }

View File

@ -7,4 +7,6 @@
#define freezable_schedule() schedule() #define freezable_schedule() schedule()
#define freezable_schedule_timeout(_t) schedule_timeout(_t) #define freezable_schedule_timeout(_t) schedule_timeout(_t)
static inline void __refrigerator(bool f) {}
#endif /* __TOOLS_LINUX_FREEZER_H */ #endif /* __TOOLS_LINUX_FREEZER_H */

View File

@ -1123,4 +1123,7 @@ static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
return dev < c->sb.nr_devices && c->devs[dev]; return dev < c->sb.nr_devices && c->devs[dev];
} }
#define BKEY_PADDED_ONSTACK(key, pad) \
struct { struct bkey_i key; __u64 key ## _pad[pad]; }
#endif /* _BCACHEFS_H */ #endif /* _BCACHEFS_H */

View File

@ -290,16 +290,8 @@ enum bch_bkey_fields {
struct bkey_i { struct bkey_i {
__u64 _data[0]; __u64 _data[0];
union {
struct {
/* Size of combined key and value, in u64s */
__u8 u64s;
};
struct {
struct bkey k; struct bkey k;
struct bch_val v; struct bch_val v;
};
};
}; };
#define KEY(_inode, _offset, _size) \ #define KEY(_inode, _offset, _size) \
@ -318,7 +310,7 @@ static inline void bkey_init(struct bkey *k)
#define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64)) #define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
#define __BKEY_PADDED(key, pad) \ #define __BKEY_PADDED(key, pad) \
struct { struct bkey_i key; __u64 key ## _pad[pad]; } struct bkey_i key; __u64 key ## _pad[pad]
/* /*
* - DELETED keys are used internally to mark keys that should be ignored but * - DELETED keys are used internally to mark keys that should be ignored but

View File

@ -34,7 +34,12 @@ struct bkey_s {
}; };
}; };
#define bkey_next(_k) vstruct_next(_k) #define bkey_p_next(_k) vstruct_next(_k)
static inline struct bkey_i *bkey_next(struct bkey_i *k)
{
return (struct bkey_i *) (k->_data + k->k.u64s);
}
#define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s) #define bkey_val_u64s(_k) ((_k)->u64s - BKEY_U64s)

View File

@ -46,7 +46,7 @@ static inline void sort_iter_advance(struct sort_iter *iter, sort_cmp_fn cmp)
BUG_ON(!iter->used); BUG_ON(!iter->used);
i->k = bkey_next(i->k); i->k = bkey_p_next(i->k);
BUG_ON(i->k > i->end); BUG_ON(i->k > i->end);
@ -108,7 +108,7 @@ bch2_key_sort_fix_overlapping(struct bch_fs *c, struct bset *dst,
!should_drop_next_key(iter)) { !should_drop_next_key(iter)) {
bkey_copy(out, k); bkey_copy(out, k);
btree_keys_account_key_add(&nr, 0, out); btree_keys_account_key_add(&nr, 0, out);
out = bkey_next(out); out = bkey_p_next(out);
} }
sort_iter_advance(iter, key_sort_fix_overlapping_cmp); sort_iter_advance(iter, key_sort_fix_overlapping_cmp);
@ -147,7 +147,7 @@ bch2_sort_repack(struct bset *dst, struct btree *src,
out->needs_whiteout = false; out->needs_whiteout = false;
btree_keys_account_key_add(&nr, 0, out); btree_keys_account_key_add(&nr, 0, out);
out = bkey_next(out); out = bkey_p_next(out);
} }
dst->u64s = cpu_to_le16((u64 *) out - dst->_data); dst->u64s = cpu_to_le16((u64 *) out - dst->_data);
@ -194,7 +194,7 @@ unsigned bch2_sort_keys(struct bkey_packed *dst,
bkey_copy(out, in); bkey_copy(out, in);
} }
out->needs_whiteout |= needs_whiteout; out->needs_whiteout |= needs_whiteout;
out = bkey_next(out); out = bkey_p_next(out);
} }
return (u64 *) out - (u64 *) dst; return (u64 *) out - (u64 *) dst;

View File

@ -69,7 +69,7 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
for (_k = i->start; for (_k = i->start;
_k < vstruct_last(i); _k < vstruct_last(i);
_k = _n) { _k = _n) {
_n = bkey_next(_k); _n = bkey_p_next(_k);
k = bkey_disassemble(b, _k, &uk); k = bkey_disassemble(b, _k, &uk);
@ -542,7 +542,7 @@ start:
rw_aux_tree(b, t)[j - 1].offset); rw_aux_tree(b, t)[j - 1].offset);
} }
k = bkey_next(k); k = bkey_p_next(k);
BUG_ON(k >= btree_bkey_last(b, t)); BUG_ON(k >= btree_bkey_last(b, t));
} }
} }
@ -733,7 +733,7 @@ retry:
/* First we figure out where the first key in each cacheline is */ /* First we figure out where the first key in each cacheline is */
eytzinger1_for_each(j, t->size - 1) { eytzinger1_for_each(j, t->size - 1) {
while (bkey_to_cacheline(b, t, k) < cacheline) while (bkey_to_cacheline(b, t, k) < cacheline)
prev = k, k = bkey_next(k); prev = k, k = bkey_p_next(k);
if (k >= btree_bkey_last(b, t)) { if (k >= btree_bkey_last(b, t)) {
/* XXX: this path sucks */ /* XXX: this path sucks */
@ -750,7 +750,7 @@ retry:
} }
while (k != btree_bkey_last(b, t)) while (k != btree_bkey_last(b, t))
prev = k, k = bkey_next(k); prev = k, k = bkey_p_next(k);
if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) { if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
bkey_init(&min_key.k); bkey_init(&min_key.k);
@ -888,7 +888,7 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
struct bkey_packed *p, *i, *ret = NULL, *orig_k = k; struct bkey_packed *p, *i, *ret = NULL, *orig_k = k;
while ((p = __bkey_prev(b, t, k)) && !ret) { while ((p = __bkey_prev(b, t, k)) && !ret) {
for (i = p; i != k; i = bkey_next(i)) for (i = p; i != k; i = bkey_p_next(i))
if (i->type >= min_key_type) if (i->type >= min_key_type)
ret = i; ret = i;
@ -899,10 +899,10 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
BUG_ON(ret >= orig_k); BUG_ON(ret >= orig_k);
for (i = ret for (i = ret
? bkey_next(ret) ? bkey_p_next(ret)
: btree_bkey_first(b, t); : btree_bkey_first(b, t);
i != orig_k; i != orig_k;
i = bkey_next(i)) i = bkey_p_next(i))
BUG_ON(i->type >= min_key_type); BUG_ON(i->type >= min_key_type);
} }
@ -974,7 +974,7 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
struct bkey_packed *k = start; struct bkey_packed *k = start;
while (1) { while (1) {
k = bkey_next(k); k = bkey_p_next(k);
if (k == end) if (k == end)
break; break;
@ -1208,12 +1208,12 @@ struct bkey_packed *bch2_bset_search_linear(struct btree *b,
while (m != btree_bkey_last(b, t) && while (m != btree_bkey_last(b, t) &&
bkey_iter_cmp_p_or_unp(b, m, bkey_iter_cmp_p_or_unp(b, m,
lossy_packed_search, search) < 0) lossy_packed_search, search) < 0)
m = bkey_next(m); m = bkey_p_next(m);
if (!packed_search) if (!packed_search)
while (m != btree_bkey_last(b, t) && while (m != btree_bkey_last(b, t) &&
bkey_iter_pos_cmp(b, m, search) < 0) bkey_iter_pos_cmp(b, m, search) < 0)
m = bkey_next(m); m = bkey_p_next(m);
if (bch2_expensive_debug_checks) { if (bch2_expensive_debug_checks) {
struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m); struct bkey_packed *prev = bch2_bkey_prev_all(b, t, m);

View File

@ -211,7 +211,7 @@ static inline size_t btree_aux_data_u64s(const struct btree *b)
#define bset_tree_for_each_key(_b, _t, _k) \ #define bset_tree_for_each_key(_b, _t, _k) \
for (_k = btree_bkey_first(_b, _t); \ for (_k = btree_bkey_first(_b, _t); \
_k != btree_bkey_last(_b, _t); \ _k != btree_bkey_last(_b, _t); \
_k = bkey_next(_k)) _k = bkey_p_next(_k))
static inline bool bset_has_ro_aux_tree(struct bset_tree *t) static inline bool bset_has_ro_aux_tree(struct bset_tree *t)
{ {

View File

@ -77,9 +77,9 @@ static void verify_no_dups(struct btree *b,
if (start == end) if (start == end)
return; return;
for (p = start, k = bkey_next(start); for (p = start, k = bkey_p_next(start);
k != end; k != end;
p = k, k = bkey_next(k)) { p = k, k = bkey_p_next(k)) {
struct bkey l = bkey_unpack_key(b, p); struct bkey l = bkey_unpack_key(b, p);
struct bkey r = bkey_unpack_key(b, k); struct bkey r = bkey_unpack_key(b, k);
@ -92,7 +92,7 @@ static void set_needs_whiteout(struct bset *i, int v)
{ {
struct bkey_packed *k; struct bkey_packed *k;
for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
k->needs_whiteout = v; k->needs_whiteout = v;
} }
@ -175,7 +175,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
for (k = unwritten_whiteouts_start(c, b); for (k = unwritten_whiteouts_start(c, b);
k != unwritten_whiteouts_end(c, b); k != unwritten_whiteouts_end(c, b);
k = bkey_next(k)) k = bkey_p_next(k))
*--ptrs = k; *--ptrs = k;
sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs); sort_bkey_ptrs(b, ptrs, ptrs_end - ptrs);
@ -184,7 +184,7 @@ static void bch2_sort_whiteouts(struct bch_fs *c, struct btree *b)
while (ptrs != ptrs_end) { while (ptrs != ptrs_end) {
bkey_copy(k, *ptrs); bkey_copy(k, *ptrs);
k = bkey_next(k); k = bkey_p_next(k);
ptrs++; ptrs++;
} }
@ -256,11 +256,11 @@ static bool bch2_drop_whiteouts(struct btree *b, enum compact_mode mode)
out = i->start; out = i->start;
for (k = start; k != end; k = n) { for (k = start; k != end; k = n) {
n = bkey_next(k); n = bkey_p_next(k);
if (!bkey_deleted(k)) { if (!bkey_deleted(k)) {
bkey_copy(out, k); bkey_copy(out, k);
out = bkey_next(out); out = bkey_p_next(out);
} else { } else {
BUG_ON(k->needs_whiteout); BUG_ON(k->needs_whiteout);
} }
@ -652,7 +652,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
struct bset *i = bset(b, t); struct bset *i = bset(b, t);
struct bkey_packed *k; struct bkey_packed *k;
for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0) if (bkey_cmp_left_packed(b, k, &b->data->min_key) >= 0)
break; break;
@ -665,7 +665,7 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
set_btree_bset_end(b, t); set_btree_bset_end(b, t);
} }
for (k = i->start; k != vstruct_last(i); k = bkey_next(k)) for (k = i->start; k != vstruct_last(i); k = bkey_p_next(k))
if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0) if (bkey_cmp_left_packed(b, k, &b->data->max_key) > 0)
break; break;
@ -843,7 +843,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
struct bkey_s u; struct bkey_s u;
struct bkey tmp; struct bkey tmp;
if (btree_err_on(bkey_next(k) > vstruct_last(i), if (btree_err_on(bkey_p_next(k) > vstruct_last(i),
BTREE_ERR_FIXABLE, c, NULL, b, i, BTREE_ERR_FIXABLE, c, NULL, b, i,
"key extends past end of bset")) { "key extends past end of bset")) {
i->u64s = cpu_to_le16((u64 *) k - i->_data); i->u64s = cpu_to_le16((u64 *) k - i->_data);
@ -854,7 +854,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
BTREE_ERR_FIXABLE, c, NULL, b, i, BTREE_ERR_FIXABLE, c, NULL, b, i,
"invalid bkey format %u", k->format)) { "invalid bkey format %u", k->format)) {
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k), memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k); (u64 *) vstruct_end(i) - (u64 *) k);
continue; continue;
} }
@ -878,7 +878,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf); btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k), memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k); (u64 *) vstruct_end(i) - (u64 *) k);
continue; continue;
} }
@ -901,14 +901,14 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) { if (btree_err(BTREE_ERR_FIXABLE, c, NULL, b, i, "%s", buf.buf)) {
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k), memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k); (u64 *) vstruct_end(i) - (u64 *) k);
continue; continue;
} }
} }
prev = k; prev = k;
k = bkey_next(k); k = bkey_p_next(k);
} }
fsck_err: fsck_err:
printbuf_exit(&buf); printbuf_exit(&buf);
@ -1139,7 +1139,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
btree_keys_account_key_drop(&b->nr, 0, k); btree_keys_account_key_drop(&b->nr, 0, k);
i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s); i->u64s = cpu_to_le16(le16_to_cpu(i->u64s) - k->u64s);
memmove_u64s_down(k, bkey_next(k), memmove_u64s_down(k, bkey_p_next(k),
(u64 *) vstruct_end(i) - (u64 *) k); (u64 *) vstruct_end(i) - (u64 *) k);
set_btree_bset_end(b, b->set); set_btree_bset_end(b, b->set);
continue; continue;
@ -1151,7 +1151,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
bp.v->mem_ptr = 0; bp.v->mem_ptr = 0;
} }
k = bkey_next(k); k = bkey_p_next(k);
} }
bch2_bset_build_aux_tree(b, b->set, false); bch2_bset_build_aux_tree(b, b->set, false);
@ -1847,7 +1847,7 @@ static void btree_write_submit(struct work_struct *work)
{ {
struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work); struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
struct bch_extent_ptr *ptr; struct bch_extent_ptr *ptr;
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
bkey_copy(&tmp.k, &wbio->key); bkey_copy(&tmp.k, &wbio->key);

View File

@ -777,14 +777,14 @@ bool bch2_btree_insert_key_cached(struct btree_trans *trans,
struct bkey_cached *ck = (void *) path->l[0].b; struct bkey_cached *ck = (void *) path->l[0].b;
bool kick_reclaim = false; bool kick_reclaim = false;
BUG_ON(insert->u64s > ck->u64s); BUG_ON(insert->k.u64s > ck->u64s);
if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) { if (likely(!(flags & BTREE_INSERT_JOURNAL_REPLAY))) {
int difference; int difference;
BUG_ON(jset_u64s(insert->u64s) > trans->journal_preres.u64s); BUG_ON(jset_u64s(insert->k.u64s) > trans->journal_preres.u64s);
difference = jset_u64s(insert->u64s) - ck->res.u64s; difference = jset_u64s(insert->k.u64s) - ck->res.u64s;
if (difference > 0) { if (difference > 0) {
trans->journal_preres.u64s -= difference; trans->journal_preres.u64s -= difference;
ck->res.u64s += difference; ck->res.u64s += difference;

View File

@ -242,7 +242,7 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct write_point *wp; struct write_point *wp;
struct btree *b; struct btree *b;
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp; BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
struct open_buckets ob = { .nr = 0 }; struct open_buckets ob = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 }; struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
unsigned nr_reserve; unsigned nr_reserve;
@ -1412,7 +1412,7 @@ static void __btree_split_node(struct btree_update *as,
out[i]->needs_whiteout = false; out[i]->needs_whiteout = false;
btree_keys_account_key_add(&n[i]->nr, 0, out[i]); btree_keys_account_key_add(&n[i]->nr, 0, out[i]);
out[i] = bkey_next(out[i]); out[i] = bkey_p_next(out[i]);
} }
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
@ -2444,7 +2444,7 @@ bch2_btree_roots_to_journal_entries(struct bch_fs *c,
BCH_JSET_ENTRY_btree_root, BCH_JSET_ENTRY_btree_root,
i, c->btree_roots[i].level, i, c->btree_roots[i].level,
&c->btree_roots[i].key, &c->btree_roots[i].key,
c->btree_roots[i].key.u64s); c->btree_roots[i].key.k.u64s);
end = vstruct_next(end); end = vstruct_next(end);
} }

View File

@ -274,7 +274,7 @@ next:
} }
continue; continue;
nomatch: nomatch:
if (m->ctxt) { if (m->ctxt && m->ctxt->stats) {
BUG_ON(k.k->p.offset <= iter.pos.offset); BUG_ON(k.k->p.offset <= iter.pos.offset);
atomic64_inc(&m->ctxt->stats->keys_raced); atomic64_inc(&m->ctxt->stats->keys_raced);
atomic64_add(k.k->p.offset - iter.pos.offset, atomic64_add(k.k->p.offset - iter.pos.offset,

View File

@ -153,7 +153,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
BUG_ON(b->nsets != 1); BUG_ON(b->nsets != 1);
for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_next(k)) for (k = inmemory->start; k != vstruct_last(inmemory); k = bkey_p_next(k))
if (k->type == KEY_TYPE_btree_ptr_v2) { if (k->type == KEY_TYPE_btree_ptr_v2) {
struct bch_btree_ptr_v2 *v = (void *) bkeyp_val(&b->format, k); struct bch_btree_ptr_v2 *v = (void *) bkeyp_val(&b->format, k);
v->mem_ptr = 0; v->mem_ptr = 0;

View File

@ -869,12 +869,10 @@ static int ec_stripe_key_update(struct btree_trans *trans,
for (i = 0; i < new->v.nr_blocks; i++) { for (i = 0; i < new->v.nr_blocks; i++) {
unsigned v = stripe_blockcount_get(old, i); unsigned v = stripe_blockcount_get(old, i);
if (!v) BUG_ON(v &&
continue; (old->ptrs[i].dev != new->v.ptrs[i].dev ||
BUG_ON(old->ptrs[i].dev != new->v.ptrs[i].dev ||
old->ptrs[i].gen != new->v.ptrs[i].gen || old->ptrs[i].gen != new->v.ptrs[i].gen ||
old->ptrs[i].offset != new->v.ptrs[i].offset); old->ptrs[i].offset != new->v.ptrs[i].offset));
stripe_blockcount_set(&new->v, i, v); stripe_blockcount_set(&new->v, i, v);
} }
@ -1594,8 +1592,6 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
bkey_copy(&h->s->new_stripe.key.k_i, &h->s->existing_stripe.key.k_i); bkey_copy(&h->s->new_stripe.key.k_i, &h->s->existing_stripe.key.k_i);
h->s->have_existing_stripe = true; h->s->have_existing_stripe = true;
pr_info("reused %llu", h->s->idx);
return 0; return 0;
} }
@ -1687,9 +1683,9 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
if (h->s->allocated) if (h->s->allocated)
goto allocated; goto allocated;
if (h->s->idx) if (h->s->have_existing_stripe)
goto alloc_existing; goto alloc_existing;
#if 0
/* First, try to allocate a full stripe: */ /* First, try to allocate a full stripe: */
ret = new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?: ret = new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?:
__bch2_ec_stripe_head_reserve(trans, h); __bch2_ec_stripe_head_reserve(trans, h);
@ -1699,24 +1695,17 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
bch2_err_matches(ret, ENOMEM)) bch2_err_matches(ret, ENOMEM))
goto err; goto err;
if (ret == -BCH_ERR_open_buckets_empty) {
/* don't want to reuse in this case */
}
#endif
/* /*
* Not enough buckets available for a full stripe: we must reuse an * Not enough buckets available for a full stripe: we must reuse an
* existing stripe: * existing stripe:
*/ */
while (1) { while (1) {
ret = __bch2_ec_stripe_head_reuse(trans, h); ret = __bch2_ec_stripe_head_reuse(trans, h);
if (ret)
ret = __bch2_ec_stripe_head_reserve(trans, h);
if (!ret) if (!ret)
break; break;
pr_info("err %s", bch2_err_str(ret));
if (ret == -BCH_ERR_ENOSPC_stripe_reuse && cl) if (ret == -BCH_ERR_ENOSPC_stripe_reuse && cl)
ret = -BCH_ERR_stripe_alloc_blocked; ret = -BCH_ERR_stripe_alloc_blocked;
if (waiting || !cl) if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
goto err; goto err;
/* XXX freelist_wait? */ /* XXX freelist_wait? */

View File

@ -633,7 +633,7 @@ static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr
memcpy((void *) &k->v + bkey_val_bytes(&k->k), memcpy((void *) &k->v + bkey_val_bytes(&k->k),
&ptr, &ptr,
sizeof(ptr)); sizeof(ptr));
k->u64s++; k->k.u64s++;
break; break;
default: default:
BUG(); BUG();

View File

@ -734,7 +734,7 @@ static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
} }
if (dst != src) if (dst != src)
memmove_u64s_down(dst, src, src->u64s); memmove_u64s_down(dst, src, src->k.u64s);
dst = bkey_next(dst); dst = bkey_next(dst);
} }

View File

@ -357,7 +357,7 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
struct bkey_i *k; struct bkey_i *k;
bool first = true; bool first = true;
vstruct_for_each(entry, k) { jset_entry_for_each_key(entry, k) {
if (!first) { if (!first) {
prt_newline(out); prt_newline(out);
prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]); prt_printf(out, "%s: ", bch2_jset_entry_types[entry->type]);

View File

@ -40,9 +40,14 @@ static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
(entry = __jset_entry_type_next(jset, entry, type)); \ (entry = __jset_entry_type_next(jset, entry, type)); \
entry = vstruct_next(entry)) entry = vstruct_next(entry))
#define for_each_jset_key(k, _n, entry, jset) \ #define jset_entry_for_each_key(_e, _k) \
for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys) \ for (_k = (_e)->start; \
vstruct_for_each_safe(entry, k, _n) _k < vstruct_last(_e); \
_k = bkey_next(_k))
#define for_each_jset_key(k, entry, jset) \
for_each_jset_entry_type(entry, jset, BCH_JSET_ENTRY_btree_keys)\
jset_entry_for_each_key(entry, k)
int bch2_journal_entry_validate(struct bch_fs *, struct jset *, int bch2_journal_entry_validate(struct bch_fs *, struct jset *,
struct jset_entry *, unsigned, int, int); struct jset_entry *, unsigned, int, int);

View File

@ -304,12 +304,6 @@ static int bch2_move_extent(struct btree_trans *trans,
if (ret && ret != -BCH_ERR_unwritten_extent_update) if (ret && ret != -BCH_ERR_unwritten_extent_update)
goto err_free_pages; goto err_free_pages;
io->write.ctxt = ctxt;
io->write.op.end_io = move_write_done;
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
if (ret == -BCH_ERR_unwritten_extent_update) { if (ret == -BCH_ERR_unwritten_extent_update) {
bch2_update_unwritten_extent(trans, &io->write); bch2_update_unwritten_extent(trans, &io->write);
move_free(io); move_free(io);
@ -318,6 +312,14 @@ static int bch2_move_extent(struct btree_trans *trans,
BUG_ON(ret); BUG_ON(ret);
io->write.ctxt = ctxt;
io->write.op.end_io = move_write_done;
if (ctxt->stats) {
atomic64_inc(&ctxt->stats->keys_moved);
atomic64_add(k.k->size, &ctxt->stats->sectors_moved);
}
this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size); this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size); this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
trace_move_extent_read(k.k); trace_move_extent_read(k.k);
@ -469,9 +471,11 @@ static int __bch2_move_data(struct moving_context *ctxt,
bch2_bkey_buf_init(&sk); bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
if (ctxt->stats) {
ctxt->stats->data_type = BCH_DATA_user; ctxt->stats->data_type = BCH_DATA_user;
ctxt->stats->btree_id = btree_id; ctxt->stats->btree_id = btree_id;
ctxt->stats->pos = start; ctxt->stats->pos = start;
}
bch2_trans_iter_init(&trans, &iter, btree_id, start, bch2_trans_iter_init(&trans, &iter, btree_id, start,
BTREE_ITER_PREFETCH| BTREE_ITER_PREFETCH|
@ -496,6 +500,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
if (bkey_ge(bkey_start_pos(k.k), end)) if (bkey_ge(bkey_start_pos(k.k), end))
break; break;
if (ctxt->stats)
ctxt->stats->pos = iter.pos; ctxt->stats->pos = iter.pos;
if (!bkey_extent_is_direct_data(k.k)) if (!bkey_extent_is_direct_data(k.k))
@ -536,6 +541,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
if (ctxt->rate) if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size); bch2_ratelimit_increment(ctxt->rate, k.k->size);
next: next:
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen); atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
next_nondata: next_nondata:
bch2_btree_iter_advance(&iter); bch2_btree_iter_advance(&iter);
@ -585,7 +591,7 @@ int bch2_move_data(struct bch_fs *c,
return ret; return ret;
} }
static noinline void verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen) void bch2_verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_iter iter; struct btree_iter iter;
@ -620,6 +626,9 @@ again:
failed_to_evacuate: failed_to_evacuate:
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
return;
prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket ")); prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket "));
bch2_bkey_val_to_text(&buf, c, k); bch2_bkey_val_to_text(&buf, c, k);
@ -760,6 +769,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
if (ctxt->rate) if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, k.k->size); bch2_ratelimit_increment(ctxt->rate, k.k->size);
if (ctxt->stats)
atomic64_add(k.k->size, &ctxt->stats->sectors_seen); atomic64_add(k.k->size, &ctxt->stats->sectors_seen);
} else { } else {
struct btree *b; struct btree *b;
@ -787,9 +797,11 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
if (ctxt->rate) if (ctxt->rate)
bch2_ratelimit_increment(ctxt->rate, bch2_ratelimit_increment(ctxt->rate,
c->opts.btree_node_size >> 9); c->opts.btree_node_size >> 9);
if (ctxt->stats) {
atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen); atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_seen);
atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved); atomic64_add(c->opts.btree_node_size >> 9, &ctxt->stats->sectors_moved);
} }
}
next: next:
bp_offset++; bp_offset++;
} }
@ -801,7 +813,7 @@ next:
move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads)); move_ctxt_wait_event(ctxt, NULL, list_empty(&ctxt->reads));
closure_sync(&ctxt->cl); closure_sync(&ctxt->cl);
if (!ctxt->write_error) if (!ctxt->write_error)
verify_bucket_evacuated(trans, bucket, gen); bch2_verify_bucket_evacuated(trans, bucket, gen);
} }
err: err:
bch2_bkey_buf_exit(&sk, c); bch2_bkey_buf_exit(&sk, c);

View File

@ -30,6 +30,8 @@ struct moving_context {
wait_queue_head_t wait; wait_queue_head_t wait;
}; };
void bch2_verify_bucket_evacuated(struct btree_trans *, struct bpos, int);
#define move_ctxt_wait_event(_ctxt, _trans, _cond) \ #define move_ctxt_wait_event(_ctxt, _trans, _cond) \
do { \ do { \
bool cond_finished = false; \ bool cond_finished = false; \

View File

@ -26,6 +26,7 @@
#include "super-io.h" #include "super-io.h"
#include <trace/events/bcachefs.h> #include <trace/events/bcachefs.h>
#include <linux/bsearch.h>
#include <linux/freezer.h> #include <linux/freezer.h>
#include <linux/kthread.h> #include <linux/kthread.h>
#include <linux/math64.h> #include <linux/math64.h>
@ -71,62 +72,147 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
return ret; return ret;
} }
static int bch2_copygc_next_bucket(struct btree_trans *trans, struct copygc_bucket_in_flight {
struct bpos *bucket, u8 *gen, struct bpos *pos) struct bpos bucket;
u8 gen;
struct moving_context ctxt;
};
typedef FIFO(struct copygc_bucket_in_flight) copygc_buckets_in_flight;
struct copygc_bucket {
struct bpos bucket;
u8 gen;
};
typedef DARRAY(struct copygc_bucket) copygc_buckets;
static int copygc_bucket_cmp(const void *_l, const void *_r)
{ {
struct btree_iter iter; const struct copygc_bucket *l = _l;
struct bkey_s_c k; const struct copygc_bucket *r = _r;
int ret;
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru, return bpos_cmp(l->bucket, r->bucket) ?: cmp_int(l->gen, r->gen);
bpos_max(*pos, lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0)),
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
0, k, ({
*bucket = u64_to_bucket(k.k->p.offset);
bch2_bucket_is_movable(trans, *bucket, lru_pos_time(k.k->p), gen);
}));
*pos = iter.pos;
if (ret < 0)
return ret;
return ret ? 0 : -ENOENT;
} }
static int bch2_copygc(struct bch_fs *c) static bool bucket_in_flight(copygc_buckets *buckets_sorted, struct copygc_bucket b)
{ {
struct bch_move_stats move_stats; return bsearch(&b,
struct btree_trans trans; buckets_sorted->data,
struct moving_context ctxt; buckets_sorted->nr,
sizeof(buckets_sorted->data[0]),
copygc_bucket_cmp) != NULL;
}
static void copygc_buckets_wait(struct btree_trans *trans,
copygc_buckets_in_flight *buckets_in_flight,
size_t nr, bool verify_evacuated)
{
while (!fifo_empty(buckets_in_flight)) {
struct copygc_bucket_in_flight *i = &fifo_peek_front(buckets_in_flight);
if (fifo_used(buckets_in_flight) <= nr &&
closure_nr_remaining(&i->ctxt.cl) != 1)
break;
/*
* moving_ctxt_exit calls bch2_write as it flushes pending
* reads, which inits another btree_trans; this one must be
* unlocked:
*/
bch2_trans_unlock(trans);
bch2_moving_ctxt_exit(&i->ctxt);
if (verify_evacuated)
bch2_verify_bucket_evacuated(trans, i->bucket, i->gen);
buckets_in_flight->front++;
}
}
static int bch2_copygc_get_buckets(struct btree_trans *trans,
copygc_buckets_in_flight *buckets_in_flight,
copygc_buckets *buckets)
{
struct btree_iter iter;
copygc_buckets buckets_sorted = { 0 };
struct copygc_bucket_in_flight *i;
struct bkey_s_c k;
size_t fifo_iter;
int ret;
copygc_buckets_wait(trans, buckets_in_flight, buckets_in_flight->size / 2, true);
fifo_for_each_entry_ptr(i, buckets_in_flight, fifo_iter) {
ret = darray_push(&buckets_sorted, ((struct copygc_bucket) {i->bucket, i->gen}));
if (ret) {
bch_err(trans->c, "error allocating copygc_buckets_sorted");
goto err;
}
}
sort(buckets_sorted.data,
buckets_sorted.nr,
sizeof(buckets_sorted.data[0]),
copygc_bucket_cmp,
NULL);
ret = for_each_btree_key2_upto(trans, iter, BTREE_ID_lru,
lru_pos(BCH_LRU_FRAGMENTATION_START, 0, 0),
lru_pos(BCH_LRU_FRAGMENTATION_START, U64_MAX, LRU_TIME_MAX),
0, k, ({
struct copygc_bucket b = { .bucket = u64_to_bucket(k.k->p.offset) };
int ret = 0;
if (!bucket_in_flight(&buckets_sorted, b) &&
bch2_bucket_is_movable(trans, b.bucket, lru_pos_time(k.k->p), &b.gen))
ret = darray_push(buckets, b) ?: buckets->nr >= fifo_free(buckets_in_flight);
ret;
}));
err:
darray_exit(&buckets_sorted);
return ret < 0 ? ret : 0;
}
static int bch2_copygc(struct btree_trans *trans,
copygc_buckets_in_flight *buckets_in_flight,
struct bch_move_stats *stats)
{
struct bch_fs *c = trans->c;
struct data_update_opts data_opts = { struct data_update_opts data_opts = {
.btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc, .btree_insert_flags = BTREE_INSERT_USE_RESERVE|JOURNAL_WATERMARK_copygc,
}; };
struct bpos bucket; copygc_buckets buckets = { 0 };
struct bpos pos; struct copygc_bucket_in_flight *f;
u8 gen = 0; struct copygc_bucket *i;
unsigned nr_evacuated;
int ret = 0; int ret = 0;
bch2_move_stats_init(&move_stats, "copygc"); ret = bch2_btree_write_buffer_flush(trans);
bch2_moving_ctxt_init(&ctxt, c, NULL, &move_stats, if (bch2_fs_fatal_err_on(ret, c, "%s: error %s from bch2_btree_write_buffer_flush()",
__func__, bch2_err_str(ret)))
return ret;
ret = bch2_copygc_get_buckets(trans, buckets_in_flight, &buckets);
if (ret)
goto err;
darray_for_each(buckets, i) {
if (unlikely(freezing(current)))
break;
f = fifo_push_ref(buckets_in_flight);
f->bucket = i->bucket;
f->gen = i->gen;
bch2_moving_ctxt_init(&f->ctxt, c, NULL, NULL, //stats,
writepoint_ptr(&c->copygc_write_point), writepoint_ptr(&c->copygc_write_point),
false); false);
bch2_trans_init(&trans, c, 0, 0);
ret = bch2_btree_write_buffer_flush(&trans); ret = __bch2_evacuate_bucket(trans, &f->ctxt, f->bucket, f->gen, data_opts);
BUG_ON(ret); if (ret)
goto err;
for (nr_evacuated = 0, pos = POS_MIN;
nr_evacuated < 32 && !ret;
nr_evacuated++, pos = bpos_nosnap_successor(pos)) {
ret = bch2_copygc_next_bucket(&trans, &bucket, &gen, &pos) ?:
__bch2_evacuate_bucket(&trans, &ctxt, bucket, gen, data_opts);
if (bkey_eq(pos, POS_MAX))
break;
} }
err:
bch2_trans_exit(&trans); darray_exit(&buckets);
bch2_moving_ctxt_exit(&ctxt);
/* no entries in LRU btree found, or got to end: */ /* no entries in LRU btree found, or got to end: */
if (ret == -ENOENT) if (ret == -ENOENT)
@ -135,7 +221,7 @@ static int bch2_copygc(struct bch_fs *c)
if (ret < 0 && !bch2_err_matches(ret, EROFS)) if (ret < 0 && !bch2_err_matches(ret, EROFS))
bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret)); bch_err(c, "error from bch2_move_data() in copygc: %s", bch2_err_str(ret));
trace_and_count(c, copygc, c, atomic64_read(&move_stats.sectors_moved), 0, 0, 0); trace_and_count(c, copygc, c, atomic64_read(&stats->sectors_moved), 0, 0, 0);
return ret; return ret;
} }
@ -162,7 +248,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
for_each_rw_member(ca, c, dev_idx) { for_each_rw_member(ca, c, dev_idx) {
struct bch_dev_usage usage = bch2_dev_usage_read(ca); struct bch_dev_usage usage = bch2_dev_usage_read(ca);
fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_none) * fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) *
ca->mi.bucket_size) >> 1); ca->mi.bucket_size) >> 1);
fragmented = usage.d[BCH_DATA_user].fragmented; fragmented = usage.d[BCH_DATA_user].fragmented;
@ -187,17 +273,36 @@ void bch2_copygc_wait_to_text(struct printbuf *out, struct bch_fs *c)
static int bch2_copygc_thread(void *arg) static int bch2_copygc_thread(void *arg)
{ {
struct bch_fs *c = arg; struct bch_fs *c = arg;
struct btree_trans trans;
struct bch_move_stats move_stats;
struct io_clock *clock = &c->io_clock[WRITE]; struct io_clock *clock = &c->io_clock[WRITE];
copygc_buckets_in_flight copygc_buckets;
u64 last, wait; u64 last, wait;
int ret = 0; int ret = 0;
if (!init_fifo(&copygc_buckets, 1 << 14, GFP_KERNEL)) {
bch_err(c, "error allocating copygc buckets in flight");
return -ENOMEM;
}
set_freezable(); set_freezable();
bch2_move_stats_init(&move_stats, "copygc");
bch2_trans_init(&trans, c, 0, 0);
while (!ret && !kthread_should_stop()) { while (!ret && !kthread_should_stop()) {
bch2_trans_unlock(&trans);
try_to_freeze();
cond_resched(); cond_resched();
if (kthread_wait_freezable(c->copy_gc_enabled)) kthread_wait(freezing(current) || c->copy_gc_enabled);
break;
if (unlikely(freezing(current))) {
copygc_buckets_wait(&trans, &copygc_buckets, 0, true);
bch2_trans_unlock(&trans);
__refrigerator(false);
continue;
}
last = atomic64_read(&clock->now); last = atomic64_read(&clock->now);
wait = bch2_copygc_wait_amount(c); wait = bch2_copygc_wait_amount(c);
@ -213,12 +318,16 @@ static int bch2_copygc_thread(void *arg)
c->copygc_wait = 0; c->copygc_wait = 0;
c->copygc_running = true; c->copygc_running = true;
ret = bch2_copygc(c); ret = bch2_copygc(&trans, &copygc_buckets, &move_stats);
c->copygc_running = false; c->copygc_running = false;
wake_up(&c->copygc_running_wq); wake_up(&c->copygc_running_wq);
} }
copygc_buckets_wait(&trans, &copygc_buckets, 0, !ret);
free_fifo(&copygc_buckets);
bch2_trans_exit(&trans);
return 0; return 0;
} }

View File

@ -481,7 +481,7 @@ static int journal_keys_sort(struct bch_fs *c)
struct genradix_iter iter; struct genradix_iter iter;
struct journal_replay *i, **_i; struct journal_replay *i, **_i;
struct jset_entry *entry; struct jset_entry *entry;
struct bkey_i *k, *_n; struct bkey_i *k;
struct journal_keys *keys = &c->journal_keys; struct journal_keys *keys = &c->journal_keys;
struct journal_key *src, *dst; struct journal_key *src, *dst;
size_t nr_keys = 0; size_t nr_keys = 0;
@ -492,7 +492,7 @@ static int journal_keys_sort(struct bch_fs *c)
if (!i || i->ignore) if (!i || i->ignore)
continue; continue;
for_each_jset_key(k, _n, entry, &i->j) for_each_jset_key(k, entry, &i->j)
nr_keys++; nr_keys++;
} }
@ -511,7 +511,7 @@ static int journal_keys_sort(struct bch_fs *c)
if (!i || i->ignore) if (!i || i->ignore)
continue; continue;
for_each_jset_key(k, _n, entry, &i->j) for_each_jset_key(k, entry, &i->j)
keys->d[keys->nr++] = (struct journal_key) { keys->d[keys->nr++] = (struct journal_key) {
.btree_id = entry->btree_id, .btree_id = entry->btree_id,
.level = entry->level, .level = entry->level,
@ -871,7 +871,7 @@ static int verify_superblock_clean(struct bch_fs *c,
IS_ERR(k1) || IS_ERR(k1) ||
IS_ERR(k2) || IS_ERR(k2) ||
k1->k.u64s != k2->k.u64s || k1->k.u64s != k2->k.u64s ||
memcmp(k1, k2, bkey_bytes(k1)) || memcmp(k1, k2, bkey_bytes(&k1->k)) ||
l1 != l2, c, l1 != l2, c,
"superblock btree root %u doesn't match journal after clean shutdown\n" "superblock btree root %u doesn't match journal after clean shutdown\n"
"sb: l=%u %s\n" "sb: l=%u %s\n"

View File

@ -543,9 +543,10 @@ do { \
submit_bio(bio); \ submit_bio(bio); \
} while (0) } while (0)
#define kthread_wait_freezable(cond) \ #define kthread_wait(cond) \
({ \ ({ \
int _ret = 0; \ int _ret = 0; \
\
while (1) { \ while (1) { \
set_current_state(TASK_INTERRUPTIBLE); \ set_current_state(TASK_INTERRUPTIBLE); \
if (kthread_should_stop()) { \ if (kthread_should_stop()) { \
@ -557,7 +558,27 @@ do { \
break; \ break; \
\ \
schedule(); \ schedule(); \
try_to_freeze(); \ } \
set_current_state(TASK_RUNNING); \
_ret; \
})
#define kthread_wait_freezable(cond) \
({ \
int _ret = 0; \
bool frozen; \
\
while (1) { \
set_current_state(TASK_INTERRUPTIBLE); \
if (kthread_freezable_should_stop(&frozen)) { \
_ret = -1; \
break; \
} \
\
if (cond) \
break; \
\
schedule(); \
} \ } \
set_current_state(TASK_RUNNING); \ set_current_state(TASK_RUNNING); \
_ret; \ _ret; \

View File

@ -9,6 +9,7 @@
#include <linux/closure.h> #include <linux/closure.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/sched/debug.h> #include <linux/sched/debug.h>

View File

@ -99,6 +99,11 @@ bool kthread_should_stop(void)
return test_bit(KTHREAD_SHOULD_STOP, &current->kthread_flags); return test_bit(KTHREAD_SHOULD_STOP, &current->kthread_flags);
} }
bool kthread_freezable_should_stop(bool *was_frozen)
{
return test_bit(KTHREAD_SHOULD_STOP, &current->kthread_flags);
}
/** /**
* kthread_stop - stop a thread created by kthread_create(). * kthread_stop - stop a thread created by kthread_create().
* @k: thread created by kthread_create(). * @k: thread created by kthread_create().