mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to 8e1519ccb6 bcachefs: Add tracepoint & counter for btree split race
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
46a6b9210c
commit
da6a356895
@ -1 +1 @@
|
||||
1b149940290c0ef39070b4afaadab84a65bba034
|
||||
8e1519ccb62b76736d5b9ca97e58b41ed9a11274
|
||||
|
@ -70,4 +70,19 @@ static inline void list_splice_init(struct list_head *list,
|
||||
#define hlist_head cds_hlist_head
|
||||
#define hlist_node cds_hlist_node
|
||||
|
||||
#define hlist_add_head(n, h) cds_hlist_add_head(n, h)
|
||||
#define hlist_del(n) cds_hlist_del(n)
|
||||
|
||||
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
|
||||
|
||||
#define hlist_entry_safe(ptr, type, member) \
|
||||
({ typeof(ptr) ____ptr = (ptr); \
|
||||
____ptr ? hlist_entry(____ptr, type, member) : NULL; \
|
||||
})
|
||||
|
||||
#define hlist_for_each_entry(pos, head, member) \
|
||||
for (pos = hlist_entry_safe((head)->next, typeof(*(pos)), member);\
|
||||
pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
#endif /* _LIST_LIST_H */
|
||||
|
@ -837,6 +837,12 @@ DEFINE_EVENT(transaction_event, trans_restart_injected,
|
||||
TP_ARGS(trans, caller_ip)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(transaction_event, trans_restart_split_race,
|
||||
TP_PROTO(struct btree_trans *trans,
|
||||
unsigned long caller_ip),
|
||||
TP_ARGS(trans, caller_ip)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(transaction_event, trans_blocked_journal_reclaim,
|
||||
TP_PROTO(struct btree_trans *trans,
|
||||
unsigned long caller_ip),
|
||||
|
@ -1785,6 +1785,9 @@ static int invalidate_one_bucket(struct btree_trans *trans,
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
|
||||
return 0;
|
||||
|
||||
a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
|
||||
ret = PTR_ERR_OR_ZERO(a);
|
||||
if (ret)
|
||||
|
@ -713,7 +713,7 @@ static void add_new_bucket(struct bch_fs *c,
|
||||
ob_push(c, ptrs, ob);
|
||||
}
|
||||
|
||||
static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
||||
int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
||||
struct open_buckets *ptrs,
|
||||
struct dev_stripe_state *stripe,
|
||||
struct bch_devs_mask *devs_may_alloc,
|
||||
@ -779,24 +779,6 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_bucket_alloc_set(struct bch_fs *c,
|
||||
struct open_buckets *ptrs,
|
||||
struct dev_stripe_state *stripe,
|
||||
struct bch_devs_mask *devs_may_alloc,
|
||||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum alloc_reserve reserve,
|
||||
unsigned flags,
|
||||
struct closure *cl)
|
||||
{
|
||||
return bch2_trans_do(c, NULL, NULL, 0,
|
||||
bch2_bucket_alloc_set_trans(&trans, ptrs, stripe,
|
||||
devs_may_alloc, nr_replicas,
|
||||
nr_effective, have_cache, reserve,
|
||||
flags, cl));
|
||||
}
|
||||
|
||||
/* Allocate from stripes: */
|
||||
|
||||
/*
|
||||
@ -805,7 +787,7 @@ int bch2_bucket_alloc_set(struct bch_fs *c,
|
||||
* it's to a device we don't want:
|
||||
*/
|
||||
|
||||
static int bucket_alloc_from_stripe(struct bch_fs *c,
|
||||
static int bucket_alloc_from_stripe(struct btree_trans *trans,
|
||||
struct open_buckets *ptrs,
|
||||
struct write_point *wp,
|
||||
struct bch_devs_mask *devs_may_alloc,
|
||||
@ -817,6 +799,7 @@ static int bucket_alloc_from_stripe(struct bch_fs *c,
|
||||
unsigned flags,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct dev_alloc_list devs_sorted;
|
||||
struct ec_stripe_head *h;
|
||||
struct open_bucket *ob;
|
||||
@ -832,11 +815,11 @@ static int bucket_alloc_from_stripe(struct bch_fs *c,
|
||||
if (ec_open_bucket(c, ptrs))
|
||||
return 0;
|
||||
|
||||
h = bch2_ec_stripe_head_get(c, target, 0, nr_replicas - 1,
|
||||
h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1,
|
||||
wp == &c->copygc_write_point,
|
||||
cl);
|
||||
if (IS_ERR(h))
|
||||
return -PTR_ERR(h);
|
||||
return PTR_ERR(h);
|
||||
if (!h)
|
||||
return 0;
|
||||
|
||||
@ -942,7 +925,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (!ec_open_bucket(c, ptrs)) {
|
||||
ret = bucket_alloc_from_stripe(c, ptrs, wp, &devs,
|
||||
ret = bucket_alloc_from_stripe(trans, ptrs, wp, &devs,
|
||||
target, erasure_code,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache, flags, _cl);
|
||||
@ -1090,7 +1073,7 @@ static bool try_decrease_writepoints(struct bch_fs *c,
|
||||
return true;
|
||||
}
|
||||
|
||||
static void bch2_trans_mutex_lock(struct btree_trans *trans,
|
||||
static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
|
||||
struct mutex *lock)
|
||||
{
|
||||
if (!mutex_trylock(lock)) {
|
||||
@ -1108,7 +1091,7 @@ static struct write_point *writepoint_find(struct btree_trans *trans,
|
||||
|
||||
if (!(write_point & 1UL)) {
|
||||
wp = (struct write_point *) write_point;
|
||||
bch2_trans_mutex_lock(trans, &wp->lock);
|
||||
bch2_trans_mutex_lock_norelock(trans, &wp->lock);
|
||||
return wp;
|
||||
}
|
||||
|
||||
@ -1117,7 +1100,7 @@ restart_find:
|
||||
wp = __writepoint_find(head, write_point);
|
||||
if (wp) {
|
||||
lock_wp:
|
||||
bch2_trans_mutex_lock(trans, &wp->lock);
|
||||
bch2_trans_mutex_lock_norelock(trans, &wp->lock);
|
||||
if (wp->write_point == write_point)
|
||||
goto out;
|
||||
mutex_unlock(&wp->lock);
|
||||
@ -1130,8 +1113,8 @@ restart_find_oldest:
|
||||
if (!oldest || time_before64(wp->last_used, oldest->last_used))
|
||||
oldest = wp;
|
||||
|
||||
bch2_trans_mutex_lock(trans, &oldest->lock);
|
||||
bch2_trans_mutex_lock(trans, &c->write_points_hash_lock);
|
||||
bch2_trans_mutex_lock_norelock(trans, &oldest->lock);
|
||||
bch2_trans_mutex_lock_norelock(trans, &c->write_points_hash_lock);
|
||||
if (oldest >= c->write_points + c->write_points_nr ||
|
||||
try_increase_writepoints(c)) {
|
||||
mutex_unlock(&c->write_points_hash_lock);
|
||||
|
@ -150,7 +150,7 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_bucket_alloc_set(struct bch_fs *, struct open_buckets *,
|
||||
int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
|
||||
struct dev_stripe_state *, struct bch_devs_mask *,
|
||||
unsigned, unsigned *, bool *, enum alloc_reserve,
|
||||
unsigned, struct closure *);
|
||||
|
@ -940,8 +940,11 @@ struct bch_fs {
|
||||
GENRADIX(struct stripe) stripes;
|
||||
GENRADIX(struct gc_stripe) gc_stripes;
|
||||
|
||||
struct hlist_head ec_stripes_new[32];
|
||||
spinlock_t ec_stripes_new_lock;
|
||||
|
||||
ec_stripes_heap ec_stripes_heap;
|
||||
spinlock_t ec_stripes_heap_lock;
|
||||
struct mutex ec_stripes_heap_lock;
|
||||
|
||||
/* ERASURE CODING */
|
||||
struct list_head ec_stripe_head_list;
|
||||
|
@ -1474,7 +1474,8 @@ struct bch_sb_field_disk_groups {
|
||||
x(transaction_commit, 72) \
|
||||
x(write_super, 73) \
|
||||
x(trans_restart_would_deadlock_recursion_limit, 74) \
|
||||
x(trans_restart_write_buffer_flush, 75)
|
||||
x(trans_restart_write_buffer_flush, 75) \
|
||||
x(trans_restart_split_race, 76)
|
||||
|
||||
enum bch_persistent_counters {
|
||||
#define x(t, n, ...) BCH_COUNTER_##t,
|
||||
|
@ -661,7 +661,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
|
||||
do_update = true;
|
||||
|
||||
if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c,
|
||||
if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
|
||||
"pointer does not match stripe %llu\n"
|
||||
"while marking %s",
|
||||
(u64) p.ec.idx,
|
||||
|
@ -976,6 +976,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
|
||||
trans->in_traverse_all = true;
|
||||
retry_all:
|
||||
trans->restarted = 0;
|
||||
trans->last_restarted_ip = 0;
|
||||
|
||||
trans_for_each_path(trans, path)
|
||||
path->should_be_locked = false;
|
||||
@ -1360,7 +1361,7 @@ void bch2_trans_restart_error(struct btree_trans *trans, u32 restart_count)
|
||||
{
|
||||
panic("trans->restart_count %u, should be %u, last restarted by %pS\n",
|
||||
trans->restart_count, restart_count,
|
||||
(void *) trans->last_restarted_ip);
|
||||
(void *) trans->last_begin_ip);
|
||||
}
|
||||
|
||||
void bch2_trans_in_restart_error(struct btree_trans *trans)
|
||||
@ -2865,7 +2866,7 @@ u32 bch2_trans_begin(struct btree_trans *trans)
|
||||
if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
|
||||
bch2_trans_reset_srcu_lock(trans);
|
||||
|
||||
trans->last_restarted_ip = _RET_IP_;
|
||||
trans->last_begin_ip = _RET_IP_;
|
||||
if (trans->restarted) {
|
||||
bch2_btree_path_traverse_all(trans);
|
||||
trans->notrace_relock_fail = false;
|
||||
@ -2957,6 +2958,15 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_
|
||||
|
||||
mutex_lock(&c->btree_trans_lock);
|
||||
list_for_each_entry(pos, &c->btree_trans_list, list) {
|
||||
/*
|
||||
* We'd much prefer to be stricter here and completely
|
||||
* disallow multiple btree_trans in the same thread -
|
||||
* but the data move path calls bch2_write when we
|
||||
* already have a btree_trans initialized.
|
||||
*/
|
||||
BUG_ON(trans->locking_wait.task->pid == pos->locking_wait.task->pid &&
|
||||
bch2_trans_locked(pos));
|
||||
|
||||
if (trans->locking_wait.task->pid < pos->locking_wait.task->pid) {
|
||||
list_add_tail(&trans->list, &pos->list);
|
||||
goto list_add_done;
|
||||
|
@ -198,6 +198,15 @@ struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
|
||||
|
||||
void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
|
||||
|
||||
int __bch2_trans_mutex_lock(struct btree_trans *, struct mutex *);
|
||||
|
||||
static inline int bch2_trans_mutex_lock(struct btree_trans *trans, struct mutex *lock)
|
||||
{
|
||||
return mutex_trylock(lock)
|
||||
? 0
|
||||
: __bch2_trans_mutex_lock(trans, lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
void bch2_trans_verify_paths(struct btree_trans *);
|
||||
void bch2_assert_pos_locked(struct btree_trans *, enum btree_id,
|
||||
@ -252,6 +261,7 @@ static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int er
|
||||
BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart));
|
||||
|
||||
trans->restarted = err;
|
||||
trans->last_restarted_ip = _THIS_IP_;
|
||||
return -err;
|
||||
}
|
||||
|
||||
|
@ -698,6 +698,19 @@ bool bch2_trans_locked(struct btree_trans *trans)
|
||||
return false;
|
||||
}
|
||||
|
||||
int __bch2_trans_mutex_lock(struct btree_trans *trans,
|
||||
struct mutex *lock)
|
||||
{
|
||||
int ret;
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
mutex_lock(lock);
|
||||
ret = bch2_trans_relock(trans);
|
||||
if (ret)
|
||||
mutex_unlock(lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Debug */
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
|
@ -442,6 +442,7 @@ struct btree_trans {
|
||||
bool notrace_relock_fail:1;
|
||||
enum bch_errcode restarted:16;
|
||||
u32 restart_count;
|
||||
unsigned long last_begin_ip;
|
||||
unsigned long last_restarted_ip;
|
||||
unsigned long srcu_lock_time;
|
||||
|
||||
|
@ -1716,8 +1716,10 @@ split:
|
||||
* We could attempt to avoid the transaction restart, by calling
|
||||
* bch2_btree_path_upgrade() and allocating more nodes:
|
||||
*/
|
||||
if (b->c.level >= as->update_level)
|
||||
if (b->c.level >= as->update_level) {
|
||||
trace_and_count(c, trans_restart_split_race, trans, _THIS_IP_);
|
||||
return btree_trans_restart(trans, BCH_ERR_transaction_restart_split_race);
|
||||
}
|
||||
|
||||
return btree_split(as, trans, path, b, keys, flags);
|
||||
}
|
||||
@ -2401,20 +2403,15 @@ bool bch2_btree_interior_updates_flush(struct bch_fs *c)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_journal_entries_to_btree_roots(struct bch_fs *c, struct jset *jset)
|
||||
void bch2_journal_entry_to_btree_root(struct bch_fs *c, struct jset_entry *entry)
|
||||
{
|
||||
struct btree_root *r;
|
||||
struct jset_entry *entry;
|
||||
struct btree_root *r = &c->btree_roots[entry->btree_id];
|
||||
|
||||
mutex_lock(&c->btree_root_lock);
|
||||
|
||||
vstruct_for_each(jset, entry)
|
||||
if (entry->type == BCH_JSET_ENTRY_btree_root) {
|
||||
r = &c->btree_roots[entry->btree_id];
|
||||
r->level = entry->level;
|
||||
r->alive = true;
|
||||
bkey_copy(&r->key, &entry->start[0]);
|
||||
}
|
||||
|
||||
mutex_unlock(&c->btree_root_lock);
|
||||
}
|
||||
|
@ -314,7 +314,7 @@ void bch2_btree_updates_to_text(struct printbuf *, struct bch_fs *);
|
||||
|
||||
bool bch2_btree_interior_updates_flush(struct bch_fs *);
|
||||
|
||||
void bch2_journal_entries_to_btree_roots(struct bch_fs *, struct jset *);
|
||||
void bch2_journal_entry_to_btree_root(struct bch_fs *, struct jset_entry *);
|
||||
struct jset_entry *bch2_btree_roots_to_journal_entries(struct bch_fs *,
|
||||
struct jset_entry *, struct jset_entry *);
|
||||
|
||||
|
@ -1749,6 +1749,7 @@ int __bch2_btree_insert(struct btree_trans *trans, enum btree_id id,
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, id, bkey_start_pos(&k->k),
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_INTENT);
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, k, flags);
|
||||
|
@ -907,10 +907,10 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
|
||||
if (!m || !m->alive) {
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
bch_err_ratelimited(c, "pointer to nonexistent stripe %llu",
|
||||
(u64) p.idx);
|
||||
bch2_inconsistent_error(c);
|
||||
@ -920,7 +920,7 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
|
||||
m->block_sectors[p.block] += sectors;
|
||||
|
||||
r = m->r;
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
|
||||
r.e.data_type = data_type;
|
||||
update_replicas(c, k, &r.e, sectors, trans->journal_res.seq, true);
|
||||
@ -1031,7 +1031,7 @@ int bch2_mark_stripe(struct btree_trans *trans,
|
||||
if (!gc) {
|
||||
struct stripe *m = genradix_ptr(&c->stripes, idx);
|
||||
|
||||
if (!m || (old_s && !m->alive)) {
|
||||
if (!m) {
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
|
||||
@ -1047,13 +1047,10 @@ int bch2_mark_stripe(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (!new_s) {
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
bch2_stripes_heap_del(c, m, idx);
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
|
||||
memset(m, 0, sizeof(*m));
|
||||
} else {
|
||||
m->alive = true;
|
||||
m->sectors = le16_to_cpu(new_s->sectors);
|
||||
m->algorithm = new_s->algorithm;
|
||||
m->nr_blocks = new_s->nr_blocks;
|
||||
@ -1063,9 +1060,10 @@ int bch2_mark_stripe(struct btree_trans *trans,
|
||||
for (i = 0; i < new_s->nr_blocks; i++)
|
||||
m->blocks_nonempty += !!stripe_blockcount_get(new_s, i);
|
||||
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
if (!old_s)
|
||||
bch2_stripes_heap_insert(c, m, idx);
|
||||
else
|
||||
bch2_stripes_heap_update(c, m, idx);
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
}
|
||||
} else {
|
||||
struct gc_stripe *m =
|
||||
|
516
libbcachefs/ec.c
516
libbcachefs/ec.c
@ -549,13 +549,13 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
|
||||
if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
if (n.size > h->size) {
|
||||
memcpy(n.data, h->data, h->used * sizeof(h->data[0]));
|
||||
n.used = h->used;
|
||||
swap(*h, n);
|
||||
}
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
|
||||
free_heap(&n);
|
||||
}
|
||||
@ -584,12 +584,79 @@ static int ec_stripe_mem_alloc(struct btree_trans *trans,
|
||||
bch2_trans_relock(trans);
|
||||
}
|
||||
|
||||
static ssize_t stripe_idx_to_delete(struct bch_fs *c)
|
||||
/*
|
||||
* Hash table of open stripes:
|
||||
* Stripes that are being created or modified are kept in a hash table, so that
|
||||
* stripe deletion can skip them.
|
||||
*/
|
||||
|
||||
static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
|
||||
{
|
||||
unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
|
||||
struct ec_stripe_new *s;
|
||||
|
||||
hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
|
||||
if (s->idx == idx)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
spin_lock(&c->ec_stripes_new_lock);
|
||||
ret = __bch2_stripe_is_open(c, idx);
|
||||
spin_unlock(&c->ec_stripes_new_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool bch2_try_open_stripe(struct bch_fs *c,
|
||||
struct ec_stripe_new *s,
|
||||
u64 idx)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
spin_lock(&c->ec_stripes_new_lock);
|
||||
ret = !__bch2_stripe_is_open(c, idx);
|
||||
if (ret) {
|
||||
unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
|
||||
|
||||
s->idx = idx;
|
||||
hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
|
||||
}
|
||||
spin_unlock(&c->ec_stripes_new_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
|
||||
{
|
||||
BUG_ON(!s->idx);
|
||||
|
||||
spin_lock(&c->ec_stripes_new_lock);
|
||||
hlist_del(&s->hash);
|
||||
spin_unlock(&c->ec_stripes_new_lock);
|
||||
|
||||
s->idx = 0;
|
||||
}
|
||||
|
||||
/* Heap of all existing stripes, ordered by blocks_nonempty */
|
||||
|
||||
static u64 stripe_idx_to_delete(struct bch_fs *c)
|
||||
{
|
||||
ec_stripes_heap *h = &c->ec_stripes_heap;
|
||||
size_t heap_idx;
|
||||
|
||||
return h->used && h->data[0].blocks_nonempty == 0
|
||||
? h->data[0].idx : -1;
|
||||
lockdep_assert_held(&c->ec_stripes_heap_lock);
|
||||
|
||||
for (heap_idx = 0; heap_idx < h->used; heap_idx++)
|
||||
if (h->data[heap_idx].blocks_nonempty == 0 &&
|
||||
!bch2_stripe_is_open(c, h->data[heap_idx].idx))
|
||||
return h->data[heap_idx].idx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int ec_stripes_heap_cmp(ec_stripes_heap *h,
|
||||
@ -613,7 +680,6 @@ static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
|
||||
ec_stripes_heap *h = &c->ec_stripes_heap;
|
||||
struct stripe *m = genradix_ptr(&c->stripes, idx);
|
||||
|
||||
BUG_ON(!m->alive);
|
||||
BUG_ON(m->heap_idx >= h->used);
|
||||
BUG_ON(h->data[m->heap_idx].idx != idx);
|
||||
}
|
||||
@ -621,28 +687,21 @@ static void heap_verify_backpointer(struct bch_fs *c, size_t idx)
|
||||
void bch2_stripes_heap_del(struct bch_fs *c,
|
||||
struct stripe *m, size_t idx)
|
||||
{
|
||||
if (!m->on_heap)
|
||||
return;
|
||||
|
||||
m->on_heap = false;
|
||||
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
heap_verify_backpointer(c, idx);
|
||||
|
||||
heap_del(&c->ec_stripes_heap, m->heap_idx,
|
||||
ec_stripes_heap_cmp,
|
||||
ec_stripes_heap_set_backpointer);
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
}
|
||||
|
||||
void bch2_stripes_heap_insert(struct bch_fs *c,
|
||||
struct stripe *m, size_t idx)
|
||||
{
|
||||
if (m->on_heap)
|
||||
return;
|
||||
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
BUG_ON(heap_full(&c->ec_stripes_heap));
|
||||
|
||||
m->on_heap = true;
|
||||
|
||||
heap_add(&c->ec_stripes_heap, ((struct ec_stripe_heap_entry) {
|
||||
.idx = idx,
|
||||
.blocks_nonempty = m->blocks_nonempty,
|
||||
@ -651,17 +710,17 @@ void bch2_stripes_heap_insert(struct bch_fs *c,
|
||||
ec_stripes_heap_set_backpointer);
|
||||
|
||||
heap_verify_backpointer(c, idx);
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
}
|
||||
|
||||
void bch2_stripes_heap_update(struct bch_fs *c,
|
||||
struct stripe *m, size_t idx)
|
||||
{
|
||||
ec_stripes_heap *h = &c->ec_stripes_heap;
|
||||
bool do_deletes;
|
||||
size_t i;
|
||||
|
||||
if (!m->on_heap)
|
||||
return;
|
||||
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
heap_verify_backpointer(c, idx);
|
||||
|
||||
h->data[m->heap_idx].blocks_nonempty = m->blocks_nonempty;
|
||||
@ -674,41 +733,82 @@ void bch2_stripes_heap_update(struct bch_fs *c,
|
||||
|
||||
heap_verify_backpointer(c, idx);
|
||||
|
||||
if (stripe_idx_to_delete(c) >= 0)
|
||||
do_deletes = stripe_idx_to_delete(c) != 0;
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
|
||||
if (do_deletes)
|
||||
bch2_do_stripe_deletes(c);
|
||||
}
|
||||
|
||||
/* stripe deletion */
|
||||
|
||||
static int ec_stripe_delete(struct bch_fs *c, size_t idx)
|
||||
static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
|
||||
{
|
||||
return bch2_btree_delete_range(c, BTREE_ID_stripes,
|
||||
POS(0, idx),
|
||||
POS(0, idx),
|
||||
0, NULL);
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_s_c_stripe s;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes, POS(0, idx),
|
||||
BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (k.k->type != KEY_TYPE_stripe) {
|
||||
bch2_fs_inconsistent(c, "attempting to delete nonexistent stripe %llu", idx);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
s = bkey_s_c_to_stripe(k);
|
||||
for (unsigned i = 0; i < s.v->nr_blocks; i++)
|
||||
if (stripe_blockcount_get(s.v, i)) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
bch2_fs_inconsistent(c, "attempting to delete nonempty stripe %s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_btree_delete_at(trans, &iter, 0);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void ec_stripe_delete_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_fs *c =
|
||||
container_of(work, struct bch_fs, ec_stripe_delete_work);
|
||||
ssize_t idx;
|
||||
struct btree_trans trans;
|
||||
int ret;
|
||||
u64 idx;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
while (1) {
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
idx = stripe_idx_to_delete(c);
|
||||
if (idx < 0) {
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
|
||||
if (!idx)
|
||||
break;
|
||||
|
||||
ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL,
|
||||
ec_stripe_delete(&trans, idx));
|
||||
if (ret) {
|
||||
bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
|
||||
break;
|
||||
}
|
||||
|
||||
bch2_stripes_heap_del(c, genradix_ptr(&c->stripes, idx), idx);
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
|
||||
if (ec_stripe_delete(c, idx))
|
||||
break;
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
|
||||
}
|
||||
|
||||
@ -721,60 +821,13 @@ void bch2_do_stripe_deletes(struct bch_fs *c)
|
||||
|
||||
/* stripe creation: */
|
||||
|
||||
static int ec_stripe_bkey_insert(struct btree_trans *trans,
|
||||
struct bkey_i_stripe *stripe,
|
||||
struct disk_reservation *res)
|
||||
static int ec_stripe_key_update(struct btree_trans *trans,
|
||||
struct bkey_i_stripe *new,
|
||||
bool create)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bpos min_pos = POS(0, 1);
|
||||
struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
|
||||
int ret;
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
|
||||
if (start_pos.offset) {
|
||||
start_pos = min_pos;
|
||||
bch2_btree_iter_set_pos(&iter, start_pos);
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = -BCH_ERR_ENOSPC_stripe_create;
|
||||
break;
|
||||
}
|
||||
|
||||
if (bkey_deleted(k.k))
|
||||
break;
|
||||
}
|
||||
|
||||
c->ec_stripe_hint = iter.pos.offset;
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = ec_stripe_mem_alloc(trans, &iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
stripe->k.p = iter.pos;
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, &stripe->k_i, 0);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ec_stripe_bkey_update(struct btree_trans *trans,
|
||||
struct bkey_i_stripe *new,
|
||||
struct disk_reservation *res)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
const struct bch_stripe *existing;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_stripes,
|
||||
@ -784,23 +837,27 @@ static int ec_stripe_bkey_update(struct btree_trans *trans,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!k.k || k.k->type != KEY_TYPE_stripe) {
|
||||
bch_err(trans->c, "error updating stripe: not found");
|
||||
ret = -ENOENT;
|
||||
if (k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe)) {
|
||||
bch2_fs_inconsistent(c, "error %s stripe: got existing key type %s",
|
||||
create ? "creating" : "updating",
|
||||
bch2_bkey_types[k.k->type]);
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
existing = bkey_s_c_to_stripe(k).v;
|
||||
if (k.k->type == KEY_TYPE_stripe) {
|
||||
const struct bch_stripe *old = bkey_s_c_to_stripe(k).v;
|
||||
unsigned i;
|
||||
|
||||
if (existing->nr_blocks != new->v.nr_blocks) {
|
||||
bch_err(trans->c, "error updating stripe: nr_blocks does not match");
|
||||
if (old->nr_blocks != new->v.nr_blocks) {
|
||||
bch_err(c, "error updating stripe: nr_blocks does not match");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
for (i = 0; i < new->v.nr_blocks; i++)
|
||||
stripe_blockcount_set(&new->v, i,
|
||||
stripe_blockcount_get(existing, i));
|
||||
stripe_blockcount_set(&new->v, i, stripe_blockcount_get(old, i));
|
||||
}
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
|
||||
err:
|
||||
@ -828,32 +885,58 @@ static void extent_stripe_ptr_add(struct bkey_s_extent e,
|
||||
}
|
||||
|
||||
static int ec_stripe_update_extent(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k,
|
||||
struct ec_stripe_buf *s)
|
||||
struct bpos bucket, u8 gen,
|
||||
struct ec_stripe_buf *s,
|
||||
u64 *bp_offset)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_backpointer bp;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
const struct bch_extent_ptr *ptr_c;
|
||||
struct bch_extent_ptr *ptr, *ec_ptr = NULL;
|
||||
struct bkey_i *n;
|
||||
int ret, dev, block;
|
||||
|
||||
if (extent_has_stripe_ptr(k, s->key.k.p.offset))
|
||||
ret = bch2_get_next_backpointer(trans, bucket, gen,
|
||||
bp_offset, &bp, BTREE_ITER_CACHED);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (*bp_offset == U64_MAX)
|
||||
return 0;
|
||||
|
||||
if (bch2_fs_inconsistent_on(bp.level, c, "found btree node in erasure coded bucket!?"))
|
||||
return -EIO;
|
||||
|
||||
k = bch2_backpointer_get_key(trans, &iter, bucket, *bp_offset, bp);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!k.k) {
|
||||
/*
|
||||
* extent no longer exists - we could flush the btree
|
||||
* write buffer and retry to verify, but no need:
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (extent_has_stripe_ptr(k, s->key.k.p.offset))
|
||||
goto out;
|
||||
|
||||
ptr_c = bkey_matches_stripe(&s->key.v, k, &block);
|
||||
/*
|
||||
* It doesn't generally make sense to erasure code cached ptrs:
|
||||
* XXX: should we be incrementing a counter?
|
||||
*/
|
||||
if (!ptr_c || ptr_c->cached)
|
||||
return 0;
|
||||
goto out;
|
||||
|
||||
dev = s->key.v.ptrs[block].dev;
|
||||
|
||||
n = bch2_bkey_make_mut(trans, k);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(n), ptr, ptr->dev != dev);
|
||||
ec_ptr = (void *) bch2_bkey_has_device(bkey_i_to_s_c(n), dev);
|
||||
@ -861,7 +944,10 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
||||
|
||||
extent_stripe_ptr_add(bkey_i_to_s_extent(n), s, ec_ptr, block);
|
||||
|
||||
return bch2_trans_update(trans, iter, n, 0);
|
||||
ret = bch2_trans_update(trans, &iter, n, 0);
|
||||
out:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
|
||||
@ -870,46 +956,22 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_extent_ptr bucket = s->key.v.ptrs[block];
|
||||
struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
|
||||
struct bch_backpointer bp;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
u64 bp_offset = 0;
|
||||
int ret = 0;
|
||||
retry:
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret = bch2_get_next_backpointer(trans, bucket_pos, bucket.gen,
|
||||
&bp_offset, &bp,
|
||||
BTREE_ITER_CACHED);
|
||||
while (1) {
|
||||
ret = commit_do(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL,
|
||||
ec_stripe_update_extent(trans, bucket_pos, bucket.gen,
|
||||
s, &bp_offset));
|
||||
if (ret)
|
||||
break;
|
||||
if (bp_offset == U64_MAX)
|
||||
break;
|
||||
|
||||
if (bch2_fs_inconsistent_on(bp.level, c, "found btree node in erasure coded bucket!?")) {
|
||||
ret = -EIO;
|
||||
break;
|
||||
}
|
||||
|
||||
k = bch2_backpointer_get_key(trans, &iter, bucket_pos, bp_offset, bp);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
break;
|
||||
if (!k.k)
|
||||
continue;
|
||||
|
||||
ret = ec_stripe_update_extent(trans, &iter, k, s);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
bp_offset++;
|
||||
}
|
||||
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
goto retry;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -944,7 +1006,6 @@ static void ec_stripe_create(struct ec_stripe_new *s)
|
||||
{
|
||||
struct bch_fs *c = s->c;
|
||||
struct open_bucket *ob;
|
||||
struct stripe *m;
|
||||
struct bch_stripe *v = &s->new_stripe.key.v;
|
||||
unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
|
||||
int ret;
|
||||
@ -977,9 +1038,6 @@ static void ec_stripe_create(struct ec_stripe_new *s)
|
||||
|
||||
BUG_ON(!s->allocated);
|
||||
|
||||
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_create))
|
||||
goto err;
|
||||
|
||||
ec_generate_ec(&s->new_stripe);
|
||||
|
||||
ec_generate_checksums(&s->new_stripe);
|
||||
@ -991,31 +1049,25 @@ static void ec_stripe_create(struct ec_stripe_new *s)
|
||||
|
||||
if (ec_nr_failed(&s->new_stripe)) {
|
||||
bch_err(c, "error creating stripe: error writing redundancy buckets");
|
||||
goto err_put_writes;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_trans_do(c, &s->res, NULL, BTREE_INSERT_NOFAIL,
|
||||
s->have_existing_stripe
|
||||
? ec_stripe_bkey_update(&trans, &s->new_stripe.key, &s->res)
|
||||
: ec_stripe_bkey_insert(&trans, &s->new_stripe.key, &s->res));
|
||||
ec_stripe_key_update(&trans, &s->new_stripe.key,
|
||||
!s->have_existing_stripe));
|
||||
if (ret) {
|
||||
bch_err(c, "error creating stripe: error creating stripe key");
|
||||
goto err_put_writes;
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = ec_stripe_update_extents(c, &s->new_stripe);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bch_err(c, "error creating stripe: error updating pointers: %s",
|
||||
bch2_err_str(ret));
|
||||
goto err;
|
||||
}
|
||||
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
m = genradix_ptr(&c->stripes, s->new_stripe.key.k.p.offset);
|
||||
|
||||
BUG_ON(m->on_heap);
|
||||
bch2_stripes_heap_insert(c, m, s->new_stripe.key.k.p.offset);
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
err_put_writes:
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
|
||||
bch2_stripe_close(c, s);
|
||||
err:
|
||||
bch2_disk_reservation_put(c, &s->res);
|
||||
|
||||
@ -1037,31 +1089,49 @@ err:
|
||||
kfree(s);
|
||||
}
|
||||
|
||||
static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
|
||||
{
|
||||
struct ec_stripe_new *s;
|
||||
|
||||
mutex_lock(&c->ec_stripe_new_lock);
|
||||
list_for_each_entry(s, &c->ec_stripe_new_list, list)
|
||||
if (!atomic_read(&s->pin)) {
|
||||
list_del(&s->list);
|
||||
goto out;
|
||||
}
|
||||
s = NULL;
|
||||
out:
|
||||
mutex_unlock(&c->ec_stripe_new_lock);
|
||||
|
||||
return s;
|
||||
}
|
||||
|
||||
static void ec_stripe_create_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_fs *c = container_of(work,
|
||||
struct bch_fs, ec_stripe_create_work);
|
||||
struct ec_stripe_new *s, *n;
|
||||
restart:
|
||||
mutex_lock(&c->ec_stripe_new_lock);
|
||||
list_for_each_entry_safe(s, n, &c->ec_stripe_new_list, list)
|
||||
if (!atomic_read(&s->pin)) {
|
||||
list_del(&s->list);
|
||||
mutex_unlock(&c->ec_stripe_new_lock);
|
||||
struct ec_stripe_new *s;
|
||||
|
||||
while ((s = get_pending_stripe(c)))
|
||||
ec_stripe_create(s);
|
||||
goto restart;
|
||||
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
|
||||
}
|
||||
mutex_unlock(&c->ec_stripe_new_lock);
|
||||
|
||||
void bch2_ec_do_stripe_creates(struct bch_fs *c)
|
||||
{
|
||||
bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
|
||||
|
||||
if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
|
||||
}
|
||||
|
||||
static void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s)
|
||||
{
|
||||
BUG_ON(atomic_read(&s->pin) <= 0);
|
||||
|
||||
if (atomic_dec_and_test(&s->pin)) {
|
||||
BUG_ON(!s->pending);
|
||||
queue_work(system_long_wq, &c->ec_stripe_create_work);
|
||||
}
|
||||
if (atomic_dec_and_test(&s->pin))
|
||||
bch2_ec_do_stripe_creates(c);
|
||||
}
|
||||
|
||||
static void ec_stripe_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
|
||||
@ -1226,7 +1296,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
|
||||
return NULL;
|
||||
|
||||
mutex_init(&h->lock);
|
||||
mutex_lock(&h->lock);
|
||||
BUG_ON(!mutex_trylock(&h->lock));
|
||||
|
||||
h->target = target;
|
||||
h->algo = algo;
|
||||
@ -1262,24 +1332,31 @@ void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
|
||||
mutex_unlock(&h->lock);
|
||||
}
|
||||
|
||||
struct ec_stripe_head *__bch2_ec_stripe_head_get(struct bch_fs *c,
|
||||
struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
unsigned target,
|
||||
unsigned algo,
|
||||
unsigned redundancy,
|
||||
bool copygc)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct ec_stripe_head *h;
|
||||
int ret;
|
||||
|
||||
if (!redundancy)
|
||||
return NULL;
|
||||
|
||||
mutex_lock(&c->ec_stripe_head_lock);
|
||||
ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
list_for_each_entry(h, &c->ec_stripe_head_list, list)
|
||||
if (h->target == target &&
|
||||
h->algo == algo &&
|
||||
h->redundancy == redundancy &&
|
||||
h->copygc == copygc) {
|
||||
mutex_lock(&h->lock);
|
||||
ret = bch2_trans_mutex_lock(trans, &h->lock);
|
||||
if (ret)
|
||||
h = ERR_PTR(ret);
|
||||
goto found;
|
||||
}
|
||||
|
||||
@ -1289,9 +1366,10 @@ found:
|
||||
return h;
|
||||
}
|
||||
|
||||
static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
|
||||
static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_devs_mask devs = h->devs;
|
||||
struct open_bucket *ob;
|
||||
struct open_buckets buckets;
|
||||
@ -1314,7 +1392,7 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
|
||||
|
||||
buckets.nr = 0;
|
||||
if (nr_have_parity < h->s->nr_parity) {
|
||||
ret = bch2_bucket_alloc_set(c, &buckets,
|
||||
ret = bch2_bucket_alloc_set_trans(trans, &buckets,
|
||||
&h->parity_stripe,
|
||||
&devs,
|
||||
h->s->nr_parity,
|
||||
@ -1343,7 +1421,7 @@ static int new_stripe_alloc_buckets(struct bch_fs *c, struct ec_stripe_head *h,
|
||||
|
||||
buckets.nr = 0;
|
||||
if (nr_have_data < h->s->nr_data) {
|
||||
ret = bch2_bucket_alloc_set(c, &buckets,
|
||||
ret = bch2_bucket_alloc_set_trans(trans, &buckets,
|
||||
&h->block_stripe,
|
||||
&devs,
|
||||
h->s->nr_data,
|
||||
@ -1385,30 +1463,30 @@ static s64 get_existing_stripe(struct bch_fs *c,
|
||||
if (may_create_new_stripe(c))
|
||||
return -1;
|
||||
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
for (heap_idx = 0; heap_idx < h->used; heap_idx++) {
|
||||
/* No blocks worth reusing, stripe will just be deleted: */
|
||||
if (!h->data[heap_idx].blocks_nonempty)
|
||||
continue;
|
||||
|
||||
stripe_idx = h->data[heap_idx].idx;
|
||||
|
||||
m = genradix_ptr(&c->stripes, stripe_idx);
|
||||
|
||||
if (m->algorithm == head->algo &&
|
||||
m->nr_redundant == head->redundancy &&
|
||||
m->sectors == head->blocksize &&
|
||||
m->blocks_nonempty < m->nr_blocks - m->nr_redundant) {
|
||||
bch2_stripes_heap_del(c, m, stripe_idx);
|
||||
m->blocks_nonempty < m->nr_blocks - m->nr_redundant &&
|
||||
bch2_try_open_stripe(c, head->s, stripe_idx)) {
|
||||
ret = stripe_idx;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __bch2_ec_stripe_head_reuse(struct bch_fs *c,
|
||||
struct ec_stripe_head *h)
|
||||
static int __bch2_ec_stripe_head_reuse(struct bch_fs *c, struct ec_stripe_head *h)
|
||||
{
|
||||
unsigned i;
|
||||
s64 idx;
|
||||
@ -1451,30 +1529,78 @@ static int __bch2_ec_stripe_head_reuse(struct bch_fs *c,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __bch2_ec_stripe_head_reserve(struct bch_fs *c,
|
||||
struct ec_stripe_head *h)
|
||||
static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h)
|
||||
{
|
||||
return bch2_disk_reservation_get(c, &h->s->res,
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bpos min_pos = POS(0, 1);
|
||||
struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
|
||||
int ret;
|
||||
|
||||
BUG_ON(h->s->res.sectors);
|
||||
|
||||
ret = bch2_disk_reservation_get(c, &h->s->res,
|
||||
h->blocksize,
|
||||
h->s->nr_parity, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
|
||||
if (start_pos.offset) {
|
||||
start_pos = min_pos;
|
||||
bch2_btree_iter_set_pos(&iter, start_pos);
|
||||
continue;
|
||||
}
|
||||
|
||||
struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c,
|
||||
ret = -BCH_ERR_ENOSPC_stripe_create;
|
||||
break;
|
||||
}
|
||||
|
||||
if (bkey_deleted(k.k) &&
|
||||
bch2_try_open_stripe(c, h->s, k.k->p.offset))
|
||||
break;
|
||||
}
|
||||
|
||||
c->ec_stripe_hint = iter.pos.offset;
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = ec_stripe_mem_alloc(trans, &iter);
|
||||
if (ret) {
|
||||
bch2_stripe_close(c, h->s);
|
||||
goto err;
|
||||
}
|
||||
|
||||
h->s->new_stripe.key.k.p = iter.pos;
|
||||
out:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
err:
|
||||
bch2_disk_reservation_put(c, &h->s->res);
|
||||
goto out;
|
||||
}
|
||||
|
||||
struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
unsigned target,
|
||||
unsigned algo,
|
||||
unsigned redundancy,
|
||||
bool copygc,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct ec_stripe_head *h;
|
||||
int ret;
|
||||
bool needs_stripe_new;
|
||||
|
||||
h = __bch2_ec_stripe_head_get(c, target, algo, redundancy, copygc);
|
||||
if (!h) {
|
||||
h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, copygc);
|
||||
if (!h)
|
||||
bch_err(c, "no stripe head");
|
||||
return NULL;
|
||||
}
|
||||
if (IS_ERR_OR_NULL(h))
|
||||
return h;
|
||||
|
||||
needs_stripe_new = !h->s;
|
||||
if (needs_stripe_new) {
|
||||
@ -1495,7 +1621,10 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c,
|
||||
*/
|
||||
ret = 0;
|
||||
if (!h->s->allocated && !h->s->res.sectors && !h->s->have_existing_stripe)
|
||||
ret = __bch2_ec_stripe_head_reserve(c, h);
|
||||
ret = __bch2_ec_stripe_head_reserve(trans, h);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
goto err;
|
||||
|
||||
if (ret && needs_stripe_new)
|
||||
ret = __bch2_ec_stripe_head_reuse(c, h);
|
||||
if (ret) {
|
||||
@ -1504,15 +1633,15 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *c,
|
||||
}
|
||||
|
||||
if (!h->s->allocated) {
|
||||
ret = new_stripe_alloc_buckets(c, h, cl);
|
||||
ret = new_stripe_alloc_buckets(trans, h, cl);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
h->s->allocated = true;
|
||||
}
|
||||
|
||||
BUG_ON(trans->restarted);
|
||||
return h;
|
||||
|
||||
err:
|
||||
bch2_ec_stripe_head_put(c, h);
|
||||
return ERR_PTR(ret);
|
||||
@ -1549,16 +1678,6 @@ unlock:
|
||||
mutex_unlock(&c->ec_stripe_head_lock);
|
||||
}
|
||||
|
||||
void bch2_stripes_heap_start(struct bch_fs *c)
|
||||
{
|
||||
struct genradix_iter iter;
|
||||
struct stripe *m;
|
||||
|
||||
genradix_for_each(&c->stripes, iter, m)
|
||||
if (m->alive)
|
||||
bch2_stripes_heap_insert(c, m, iter.pos);
|
||||
}
|
||||
|
||||
int bch2_stripes_read(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
@ -1583,7 +1702,6 @@ int bch2_stripes_read(struct bch_fs *c)
|
||||
s = bkey_s_c_to_stripe(k).v;
|
||||
|
||||
m = genradix_ptr(&c->stripes, k.k->p.offset);
|
||||
m->alive = true;
|
||||
m->sectors = le16_to_cpu(s->sectors);
|
||||
m->algorithm = s->algorithm;
|
||||
m->nr_blocks = s->nr_blocks;
|
||||
@ -1593,9 +1711,7 @@ int bch2_stripes_read(struct bch_fs *c)
|
||||
for (i = 0; i < s->nr_blocks; i++)
|
||||
m->blocks_nonempty += !!stripe_blockcount_get(s, i);
|
||||
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
bch2_stripes_heap_update(c, m, k.k->p.offset);
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
bch2_stripes_heap_insert(c, m, k.k->p.offset);
|
||||
}
|
||||
bch2_trans_iter_exit(&trans, &iter);
|
||||
|
||||
@ -1613,7 +1729,7 @@ void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
struct stripe *m;
|
||||
size_t i;
|
||||
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
mutex_lock(&c->ec_stripes_heap_lock);
|
||||
for (i = 0; i < min_t(size_t, h->used, 20); i++) {
|
||||
m = genradix_ptr(&c->stripes, h->data[i].idx);
|
||||
|
||||
@ -1622,7 +1738,7 @@ void bch2_stripes_heap_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
m->nr_blocks - m->nr_redundant,
|
||||
m->nr_redundant);
|
||||
}
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
mutex_unlock(&c->ec_stripes_heap_lock);
|
||||
}
|
||||
|
||||
void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
|
@ -148,6 +148,10 @@ struct ec_stripe_new {
|
||||
struct ec_stripe_head *h;
|
||||
struct mutex lock;
|
||||
struct list_head list;
|
||||
|
||||
struct hlist_node hash;
|
||||
u64 idx;
|
||||
|
||||
struct closure iodone;
|
||||
|
||||
/* counts in flight writes, stripe is created when pin == 0 */
|
||||
@ -200,7 +204,7 @@ void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
|
||||
int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
|
||||
|
||||
void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
|
||||
struct ec_stripe_head *bch2_ec_stripe_head_get(struct bch_fs *,
|
||||
struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
|
||||
unsigned, unsigned, unsigned, bool, struct closure *);
|
||||
|
||||
void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
|
||||
@ -213,8 +217,6 @@ void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
|
||||
|
||||
void bch2_ec_flush_new_stripes(struct bch_fs *);
|
||||
|
||||
void bch2_stripes_heap_start(struct bch_fs *);
|
||||
|
||||
int bch2_stripes_read(struct bch_fs *);
|
||||
|
||||
void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
|
||||
|
@ -11,15 +11,10 @@ struct bch_replicas_padded {
|
||||
|
||||
struct stripe {
|
||||
size_t heap_idx;
|
||||
|
||||
u16 sectors;
|
||||
u8 algorithm;
|
||||
|
||||
u8 nr_blocks;
|
||||
u8 nr_redundant;
|
||||
|
||||
unsigned alive:1; /* does a corresponding key exist in stripes btree? */
|
||||
unsigned on_heap:1;
|
||||
u8 blocks_nonempty;
|
||||
};
|
||||
|
||||
|
@ -609,6 +609,9 @@ static int ref_visible2(struct bch_fs *c,
|
||||
u32 src, struct snapshots_seen *src_seen,
|
||||
u32 dst, struct snapshots_seen *dst_seen)
|
||||
{
|
||||
src = bch2_snapshot_equiv(c, src);
|
||||
dst = bch2_snapshot_equiv(c, dst);
|
||||
|
||||
if (dst > src) {
|
||||
swap(dst, src);
|
||||
swap(dst_seen, src_seen);
|
||||
|
@ -1622,6 +1622,52 @@ static void do_journal_write(struct closure *cl)
|
||||
return;
|
||||
}
|
||||
|
||||
static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset)
|
||||
{
|
||||
struct jset_entry *i, *next, *prev = NULL;
|
||||
|
||||
/*
|
||||
* Simple compaction, dropping empty jset_entries (from journal
|
||||
* reservations that weren't fully used) and merging jset_entries that
|
||||
* can be.
|
||||
*
|
||||
* If we wanted to be really fancy here, we could sort all the keys in
|
||||
* the jset and drop keys that were overwritten - probably not worth it:
|
||||
*/
|
||||
vstruct_for_each_safe(jset, i, next) {
|
||||
unsigned u64s = le16_to_cpu(i->u64s);
|
||||
|
||||
/* Empty entry: */
|
||||
if (!u64s)
|
||||
continue;
|
||||
|
||||
if (i->type == BCH_JSET_ENTRY_btree_root)
|
||||
bch2_journal_entry_to_btree_root(c, i);
|
||||
|
||||
/* Can we merge with previous entry? */
|
||||
if (prev &&
|
||||
i->btree_id == prev->btree_id &&
|
||||
i->level == prev->level &&
|
||||
i->type == prev->type &&
|
||||
i->type == BCH_JSET_ENTRY_btree_keys &&
|
||||
le16_to_cpu(prev->u64s) + u64s <= U16_MAX) {
|
||||
memmove_u64s_down(vstruct_next(prev),
|
||||
i->_data,
|
||||
u64s);
|
||||
le16_add_cpu(&prev->u64s, u64s);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Couldn't merge, move i into new position (after prev): */
|
||||
prev = prev ? vstruct_next(prev) : jset->start;
|
||||
if (i != prev)
|
||||
memmove_u64s_down(prev, i, jset_u64s(u64s));
|
||||
}
|
||||
|
||||
prev = prev ? vstruct_next(prev) : jset->start;
|
||||
jset->u64s = cpu_to_le32((u64 *) prev - jset->_data);
|
||||
}
|
||||
|
||||
void bch2_journal_write(struct closure *cl)
|
||||
{
|
||||
struct journal *j = container_of(cl, struct journal, io);
|
||||
@ -1693,7 +1739,7 @@ void bch2_journal_write(struct closure *cl)
|
||||
* entry:
|
||||
*/
|
||||
|
||||
bch2_journal_entries_to_btree_roots(c, jset);
|
||||
bch2_journal_entries_postprocess(c, jset);
|
||||
|
||||
start = end = vstruct_last(jset);
|
||||
|
||||
|
@ -1260,8 +1260,6 @@ use_clean:
|
||||
goto err;
|
||||
bch_verbose(c, "stripes_read done");
|
||||
|
||||
bch2_stripes_heap_start(c);
|
||||
|
||||
if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
|
||||
err = "error creating root snapshot node";
|
||||
ret = bch2_fs_initialize_subvolumes(c);
|
||||
|
@ -706,7 +706,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
INIT_LIST_HEAD(&c->data_progress_list);
|
||||
mutex_init(&c->data_progress_lock);
|
||||
|
||||
spin_lock_init(&c->ec_stripes_heap_lock);
|
||||
mutex_init(&c->ec_stripes_heap_lock);
|
||||
|
||||
seqcount_init(&c->gc_pos_lock);
|
||||
|
||||
@ -1922,7 +1922,8 @@ out:
|
||||
kfree(sb);
|
||||
printbuf_exit(&errbuf);
|
||||
module_put(THIS_MODULE);
|
||||
pr_verbose_init(opts, "ret %i", PTR_ERR_OR_ZERO(c));
|
||||
pr_verbose_init(opts, "ret %s (%i)", bch2_err_str(PTR_ERR_OR_ZERO(c)),
|
||||
PTR_ERR_OR_ZERO(c));
|
||||
return c;
|
||||
err_print:
|
||||
pr_err("bch_fs_open err opening %s: %s",
|
||||
|
Loading…
Reference in New Issue
Block a user