mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to 1210f6c925 bcachefs: BCH_SB_FEATURES_ALL
This commit is contained in:
parent
ac0d08877a
commit
dbb99e492d
@ -1 +1 @@
|
||||
9017d858547faedabdef6ca21317e317791526bd
|
||||
1210f6c925974abcbd07b6cb7209a24482170d8c
|
||||
|
@ -608,6 +608,7 @@ struct bch_fs {
|
||||
|
||||
mempool_t btree_interior_update_pool;
|
||||
struct list_head btree_interior_update_list;
|
||||
struct list_head btree_interior_updates_unwritten;
|
||||
struct mutex btree_interior_update_lock;
|
||||
struct closure_waitlist btree_interior_update_wait;
|
||||
|
||||
|
@ -1313,6 +1313,11 @@ LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
|
||||
x(incompressible, 10) \
|
||||
x(btree_ptr_v2, 11)
|
||||
|
||||
#define BCH_SB_FEATURES_ALL \
|
||||
((1ULL << BCH_FEATURE_new_siphash)| \
|
||||
(1ULL << BCH_FEATURE_new_extent_overwrite)| \
|
||||
(1ULL << BCH_FEATURE_btree_ptr_v2))
|
||||
|
||||
enum bch_sb_feature {
|
||||
#define x(f, n) BCH_FEATURE_##f,
|
||||
BCH_SB_FEATURES()
|
||||
|
@ -595,12 +595,13 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
struct btree_cache *bc = &c->btree_cache;
|
||||
struct btree *b;
|
||||
|
||||
BUG_ON(level + 1 >= BTREE_MAX_DEPTH);
|
||||
/*
|
||||
* Parent node must be locked, else we could read in a btree node that's
|
||||
* been freed:
|
||||
*/
|
||||
BUG_ON(!btree_node_locked(iter, level + 1));
|
||||
BUG_ON(level >= BTREE_MAX_DEPTH);
|
||||
if (!bch2_btree_node_relock(iter, level + 1))
|
||||
return ERR_PTR(-EINTR);
|
||||
|
||||
b = bch2_btree_node_mem_alloc(c);
|
||||
if (IS_ERR(b))
|
||||
@ -623,13 +624,9 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
}
|
||||
|
||||
/*
|
||||
* If the btree node wasn't cached, we can't drop our lock on
|
||||
* the parent until after it's added to the cache - because
|
||||
* otherwise we could race with a btree_split() freeing the node
|
||||
* we're trying to lock.
|
||||
* Unlock before doing IO:
|
||||
*
|
||||
* But the deadlock described below doesn't exist in this case,
|
||||
* so it's safe to not drop the parent lock until here:
|
||||
* XXX: ideally should be dropping all btree node locks here
|
||||
*/
|
||||
if (btree_node_read_locked(iter, level + 1))
|
||||
btree_node_unlock(iter, level + 1);
|
||||
@ -666,16 +663,11 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
|
||||
struct btree *b;
|
||||
struct bset_tree *t;
|
||||
|
||||
/*
|
||||
* XXX: locking optimization
|
||||
*
|
||||
* we can make the locking looser here - caller can drop lock on parent
|
||||
* node before locking child node (and potentially blocking): we just
|
||||
* have to have bch2_btree_node_fill() call relock on the parent and
|
||||
* return -EINTR if that fails
|
||||
*/
|
||||
EBUG_ON(!btree_node_locked(iter, level + 1));
|
||||
EBUG_ON(level >= BTREE_MAX_DEPTH);
|
||||
|
||||
b = btree_node_mem_ptr(k);
|
||||
if (b)
|
||||
goto lock_node;
|
||||
retry:
|
||||
b = btree_cache_find(bc, k);
|
||||
if (unlikely(!b)) {
|
||||
@ -693,6 +685,7 @@ retry:
|
||||
if (IS_ERR(b))
|
||||
return b;
|
||||
} else {
|
||||
lock_node:
|
||||
/*
|
||||
* There's a potential deadlock with splits and insertions into
|
||||
* interior nodes we have to avoid:
|
||||
@ -739,6 +732,7 @@ retry:
|
||||
}
|
||||
}
|
||||
|
||||
/* XXX: waiting on IO with btree locks held: */
|
||||
wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
@ -753,7 +747,7 @@ retry:
|
||||
}
|
||||
|
||||
/* avoid atomic set bit if it's not needed: */
|
||||
if (btree_node_accessed(b))
|
||||
if (!btree_node_accessed(b))
|
||||
set_btree_node_accessed(b);
|
||||
|
||||
if (unlikely(btree_node_read_error(b))) {
|
||||
|
@ -47,6 +47,13 @@ static inline u64 btree_ptr_hash_val(const struct bkey_i *k)
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct btree *btree_node_mem_ptr(const struct bkey_i *k)
|
||||
{
|
||||
return k->k.type == KEY_TYPE_btree_ptr_v2
|
||||
? (void *)(unsigned long)bkey_i_to_btree_ptr_v2_c(k)->v.mem_ptr
|
||||
: NULL;
|
||||
}
|
||||
|
||||
/* is btree node in hash table? */
|
||||
static inline bool btree_node_hashed(struct btree *b)
|
||||
{
|
||||
|
@ -1647,6 +1647,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
|
||||
b->written += sectors_to_write;
|
||||
|
||||
/* XXX: submitting IO with btree locks held: */
|
||||
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_BTREE, &k.key);
|
||||
return;
|
||||
err:
|
||||
|
@ -912,6 +912,27 @@ static void btree_iter_prefetch(struct btree_iter *iter)
|
||||
btree_node_unlock(iter, iter->level);
|
||||
}
|
||||
|
||||
static noinline void btree_node_mem_ptr_set(struct btree_iter *iter,
|
||||
unsigned plevel, struct btree *b)
|
||||
{
|
||||
struct btree_iter_level *l = &iter->l[plevel];
|
||||
bool locked = btree_node_locked(iter, plevel);
|
||||
struct bkey_packed *k;
|
||||
struct bch_btree_ptr_v2 *bp;
|
||||
|
||||
if (!bch2_btree_node_relock(iter, plevel))
|
||||
return;
|
||||
|
||||
k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
|
||||
BUG_ON(k->type != KEY_TYPE_btree_ptr_v2);
|
||||
|
||||
bp = (void *) bkeyp_val(&l->b->format, k);
|
||||
bp->mem_ptr = (unsigned long)b;
|
||||
|
||||
if (!locked)
|
||||
btree_node_unlock(iter, plevel);
|
||||
}
|
||||
|
||||
static __always_inline int btree_iter_down(struct btree_iter *iter)
|
||||
{
|
||||
struct bch_fs *c = iter->trans->c;
|
||||
@ -933,6 +954,10 @@ static __always_inline int btree_iter_down(struct btree_iter *iter)
|
||||
mark_btree_node_locked(iter, level, lock_type);
|
||||
btree_iter_node_set(iter, b);
|
||||
|
||||
if (tmp.k.k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
unlikely(b != btree_node_mem_ptr(&tmp.k)))
|
||||
btree_node_mem_ptr_set(iter, level + 1, b);
|
||||
|
||||
if (iter->flags & BTREE_ITER_PREFETCH)
|
||||
btree_iter_prefetch(iter);
|
||||
|
||||
@ -1756,6 +1781,8 @@ int bch2_trans_iter_put(struct btree_trans *trans,
|
||||
if (IS_ERR_OR_NULL(iter))
|
||||
return 0;
|
||||
|
||||
BUG_ON(trans->iters + iter->idx != iter);
|
||||
|
||||
ret = btree_iter_err(iter);
|
||||
|
||||
if (!(trans->iters_touched & (1ULL << iter->idx)) &&
|
||||
@ -2080,16 +2107,11 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
|
||||
|
||||
bch2_trans_unlink_iters(trans);
|
||||
|
||||
if (flags & TRANS_RESET_ITERS)
|
||||
trans->iters_live = 0;
|
||||
|
||||
trans->iters_touched &= trans->iters_live;
|
||||
|
||||
trans->need_reset = 0;
|
||||
trans->nr_updates = 0;
|
||||
|
||||
if (flags & TRANS_RESET_MEM)
|
||||
trans->mem_top = 0;
|
||||
trans->mem_top = 0;
|
||||
|
||||
if (trans->fs_usage_deltas) {
|
||||
trans->fs_usage_deltas->used = 0;
|
||||
@ -2108,6 +2130,12 @@ void bch2_trans_init(struct btree_trans *trans, struct bch_fs *c,
|
||||
{
|
||||
memset(trans, 0, offsetof(struct btree_trans, iters_onstack));
|
||||
|
||||
/*
|
||||
* reallocating iterators currently completely breaks
|
||||
* bch2_trans_iter_put():
|
||||
*/
|
||||
expected_nr_iters = BTREE_ITER_MAX;
|
||||
|
||||
trans->c = c;
|
||||
trans->ip = _RET_IP_;
|
||||
trans->size = ARRAY_SIZE(trans->iters_onstack);
|
||||
|
@ -290,15 +290,13 @@ struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *,
|
||||
enum btree_id, struct bpos,
|
||||
unsigned, unsigned, unsigned);
|
||||
|
||||
#define TRANS_RESET_ITERS (1 << 0)
|
||||
#define TRANS_RESET_MEM (1 << 1)
|
||||
#define TRANS_RESET_NOTRAVERSE (1 << 2)
|
||||
#define TRANS_RESET_NOTRAVERSE (1 << 0)
|
||||
|
||||
void bch2_trans_reset(struct btree_trans *, unsigned);
|
||||
|
||||
static inline void bch2_trans_begin(struct btree_trans *trans)
|
||||
{
|
||||
return bch2_trans_reset(trans, TRANS_RESET_ITERS|TRANS_RESET_MEM);
|
||||
return bch2_trans_reset(trans, 0);
|
||||
}
|
||||
|
||||
void *bch2_trans_kmalloc(struct btree_trans *, size_t);
|
||||
|
@ -59,6 +59,7 @@ enum btree_insert_flags {
|
||||
|
||||
int bch2_btree_delete_at(struct btree_trans *, struct btree_iter *, unsigned);
|
||||
|
||||
int __bch2_btree_insert(struct btree_trans *, enum btree_id, struct bkey_i *);
|
||||
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
|
||||
struct disk_reservation *, u64 *, int flags);
|
||||
|
||||
@ -98,17 +99,17 @@ static inline int bch2_trans_commit(struct btree_trans *trans,
|
||||
return __bch2_trans_commit(trans);
|
||||
}
|
||||
|
||||
#define __bch2_trans_do(_trans, _disk_res, _journal_seq, \
|
||||
_flags, _reset_flags, _do) \
|
||||
#define __bch2_trans_do(_trans, _disk_res, _journal_seq, _flags, _do) \
|
||||
({ \
|
||||
int _ret; \
|
||||
\
|
||||
do { \
|
||||
bch2_trans_reset(_trans, _reset_flags); \
|
||||
\
|
||||
while (1) { \
|
||||
_ret = (_do) ?: bch2_trans_commit(_trans, (_disk_res), \
|
||||
(_journal_seq), (_flags)); \
|
||||
} while (_ret == -EINTR); \
|
||||
if (_ret != -EINTR) \
|
||||
break; \
|
||||
bch2_trans_reset(_trans, 0); \
|
||||
} \
|
||||
\
|
||||
_ret; \
|
||||
})
|
||||
@ -120,7 +121,7 @@ static inline int bch2_trans_commit(struct btree_trans *trans,
|
||||
\
|
||||
bch2_trans_init(&trans, (_c), 0, 0); \
|
||||
_ret = __bch2_trans_do(&trans, _disk_res, _journal_seq, _flags, \
|
||||
TRANS_RESET_MEM|TRANS_RESET_ITERS, _do); \
|
||||
_do); \
|
||||
_ret2 = bch2_trans_exit(&trans); \
|
||||
\
|
||||
_ret ?: _ret2; \
|
||||
|
@ -370,6 +370,9 @@ static struct btree *bch2_btree_node_alloc(struct btree_update *as, unsigned lev
|
||||
set_btree_node_need_write(b);
|
||||
|
||||
bch2_bset_init_first(b, &b->data->keys);
|
||||
b->level = level;
|
||||
b->btree_id = as->btree_id;
|
||||
|
||||
memset(&b->nr, 0, sizeof(b->nr));
|
||||
b->data->magic = cpu_to_le64(bset_magic(c));
|
||||
b->data->flags = 0;
|
||||
@ -666,9 +669,15 @@ static void btree_update_nodes_written(struct closure *cl)
|
||||
* to child nodes that weren't written yet: now, the child nodes have
|
||||
* been written so we can write out the update to the interior node.
|
||||
*/
|
||||
retry:
|
||||
mutex_lock(&c->btree_interior_update_lock);
|
||||
as->nodes_written = true;
|
||||
retry:
|
||||
as = list_first_entry_or_null(&c->btree_interior_updates_unwritten,
|
||||
struct btree_update, unwritten_list);
|
||||
if (!as || !as->nodes_written) {
|
||||
mutex_unlock(&c->btree_interior_update_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
switch (as->mode) {
|
||||
case BTREE_INTERIOR_NO_UPDATE:
|
||||
@ -681,11 +690,12 @@ retry:
|
||||
mutex_unlock(&c->btree_interior_update_lock);
|
||||
btree_node_lock_type(c, b, SIX_LOCK_read);
|
||||
six_unlock_read(&b->lock);
|
||||
mutex_lock(&c->btree_interior_update_lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
BUG_ON(!btree_node_dirty(b));
|
||||
closure_wait(&btree_current_write(b)->wait, cl);
|
||||
closure_wait(&btree_current_write(b)->wait, &as->cl);
|
||||
|
||||
list_del(&as->write_blocked_list);
|
||||
|
||||
@ -694,6 +704,8 @@ retry:
|
||||
* nodes to be writeable:
|
||||
*/
|
||||
closure_wake_up(&c->btree_interior_update_wait);
|
||||
|
||||
list_del(&as->unwritten_list);
|
||||
mutex_unlock(&c->btree_interior_update_lock);
|
||||
|
||||
/*
|
||||
@ -702,6 +714,7 @@ retry:
|
||||
*/
|
||||
bch2_btree_node_write_cond(c, b, true);
|
||||
six_unlock_read(&b->lock);
|
||||
continue_at(&as->cl, btree_update_nodes_reachable, system_wq);
|
||||
break;
|
||||
|
||||
case BTREE_INTERIOR_UPDATING_AS:
|
||||
@ -716,8 +729,12 @@ retry:
|
||||
/*
|
||||
* and then we have to wait on that btree_update to finish:
|
||||
*/
|
||||
closure_wait(&as->parent_as->wait, cl);
|
||||
closure_wait(&as->parent_as->wait, &as->cl);
|
||||
|
||||
list_del(&as->unwritten_list);
|
||||
mutex_unlock(&c->btree_interior_update_lock);
|
||||
|
||||
continue_at(&as->cl, btree_update_nodes_reachable, system_wq);
|
||||
break;
|
||||
|
||||
case BTREE_INTERIOR_UPDATING_ROOT:
|
||||
@ -728,6 +745,7 @@ retry:
|
||||
mutex_unlock(&c->btree_interior_update_lock);
|
||||
btree_node_lock_type(c, b, SIX_LOCK_read);
|
||||
six_unlock_read(&b->lock);
|
||||
mutex_lock(&c->btree_interior_update_lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@ -744,6 +762,8 @@ retry:
|
||||
* can reuse the old nodes it'll have to do a journal commit:
|
||||
*/
|
||||
six_unlock_read(&b->lock);
|
||||
|
||||
list_del(&as->unwritten_list);
|
||||
mutex_unlock(&c->btree_interior_update_lock);
|
||||
|
||||
/*
|
||||
@ -762,11 +782,12 @@ retry:
|
||||
|
||||
as->journal_seq = bch2_journal_last_unwritten_seq(&c->journal);
|
||||
|
||||
btree_update_wait_on_journal(cl);
|
||||
return;
|
||||
btree_update_wait_on_journal(&as->cl);
|
||||
break;
|
||||
}
|
||||
|
||||
continue_at(cl, btree_update_nodes_reachable, system_wq);
|
||||
mutex_lock(&c->btree_interior_update_lock);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -778,6 +799,7 @@ static void btree_update_updated_node(struct btree_update *as, struct btree *b)
|
||||
struct bch_fs *c = as->c;
|
||||
|
||||
mutex_lock(&c->btree_interior_update_lock);
|
||||
list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
|
||||
|
||||
BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
|
||||
BUG_ON(!btree_node_dirty(b));
|
||||
@ -858,6 +880,7 @@ static void btree_update_updated_root(struct btree_update *as)
|
||||
struct btree_root *r = &c->btree_roots[as->btree_id];
|
||||
|
||||
mutex_lock(&c->btree_interior_update_lock);
|
||||
list_add_tail(&as->unwritten_list, &c->btree_interior_updates_unwritten);
|
||||
|
||||
BUG_ON(as->mode != BTREE_INTERIOR_NO_UPDATE);
|
||||
|
||||
|
@ -55,6 +55,7 @@ struct btree_update {
|
||||
struct bch_fs *c;
|
||||
|
||||
struct list_head list;
|
||||
struct list_head unwritten_list;
|
||||
|
||||
/* What kind of update are we doing? */
|
||||
enum {
|
||||
|
@ -758,7 +758,7 @@ out:
|
||||
if (likely(!(trans->flags & BTREE_INSERT_NOCHECK_RW)))
|
||||
percpu_ref_put(&trans->c->writes);
|
||||
out_noupdates:
|
||||
bch2_trans_reset(trans, TRANS_RESET_MEM|TRANS_RESET_NOTRAVERSE);
|
||||
bch2_trans_reset(trans, !ret ? TRANS_RESET_NOTRAVERSE : 0);
|
||||
|
||||
return ret;
|
||||
err:
|
||||
@ -839,18 +839,21 @@ int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __bch2_btree_insert(struct btree_trans *trans,
|
||||
enum btree_id id, struct bkey_i *k)
|
||||
int __bch2_btree_insert(struct btree_trans *trans,
|
||||
enum btree_id id, struct bkey_i *k)
|
||||
{
|
||||
struct btree_iter *iter;
|
||||
int ret;
|
||||
|
||||
iter = bch2_trans_get_iter(trans, id, bkey_start_pos(&k->k),
|
||||
BTREE_ITER_INTENT);
|
||||
if (IS_ERR(iter))
|
||||
return PTR_ERR(iter);
|
||||
|
||||
bch2_trans_update(trans, iter, k, 0);
|
||||
return 0;
|
||||
ret = bch2_btree_iter_traverse(iter) ?:
|
||||
bch2_trans_update(trans, iter, k, 0);
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -882,7 +885,7 @@ retry:
|
||||
bkey_cmp(iter->pos, end) < 0) {
|
||||
struct bkey_i delete;
|
||||
|
||||
bch2_trans_reset(trans, TRANS_RESET_MEM);
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
bkey_init(&delete.k);
|
||||
|
||||
|
@ -169,12 +169,12 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
const struct qstr *dst_name, u64 *dst_inum,
|
||||
enum bch_rename_mode mode)
|
||||
{
|
||||
struct btree_iter *src_iter, *dst_iter;
|
||||
struct btree_iter *src_iter = NULL, *dst_iter = NULL;
|
||||
struct bkey_s_c old_src, old_dst;
|
||||
struct bkey_i_dirent *new_src = NULL, *new_dst = NULL;
|
||||
struct bpos dst_pos =
|
||||
POS(dst_dir, bch2_dirent_hash(dst_hash, dst_name));
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
*src_inum = *dst_inum = 0;
|
||||
|
||||
@ -191,8 +191,10 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
: bch2_hash_lookup(trans, bch2_dirent_hash_desc,
|
||||
dst_hash, dst_dir, dst_name,
|
||||
BTREE_ITER_INTENT);
|
||||
if (IS_ERR(dst_iter))
|
||||
return PTR_ERR(dst_iter);
|
||||
ret = PTR_ERR_OR_ZERO(dst_iter);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
old_dst = bch2_btree_iter_peek_slot(dst_iter);
|
||||
|
||||
if (mode != BCH_RENAME)
|
||||
@ -202,15 +204,18 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
src_iter = bch2_hash_lookup(trans, bch2_dirent_hash_desc,
|
||||
src_hash, src_dir, src_name,
|
||||
BTREE_ITER_INTENT);
|
||||
if (IS_ERR(src_iter))
|
||||
return PTR_ERR(src_iter);
|
||||
ret = PTR_ERR_OR_ZERO(src_iter);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
old_src = bch2_btree_iter_peek_slot(src_iter);
|
||||
*src_inum = le64_to_cpu(bkey_s_c_to_dirent(old_src).v->d_inum);
|
||||
|
||||
/* Create new dst key: */
|
||||
new_dst = dirent_create_key(trans, 0, dst_name, 0);
|
||||
if (IS_ERR(new_dst))
|
||||
return PTR_ERR(new_dst);
|
||||
ret = PTR_ERR_OR_ZERO(new_dst);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
dirent_copy_target(new_dst, bkey_s_c_to_dirent(old_src));
|
||||
new_dst->k.p = dst_iter->pos;
|
||||
@ -218,15 +223,18 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
/* Create new src key: */
|
||||
if (mode == BCH_RENAME_EXCHANGE) {
|
||||
new_src = dirent_create_key(trans, 0, src_name, 0);
|
||||
if (IS_ERR(new_src))
|
||||
return PTR_ERR(new_src);
|
||||
ret = PTR_ERR_OR_ZERO(new_src);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
dirent_copy_target(new_src, bkey_s_c_to_dirent(old_dst));
|
||||
new_src->k.p = src_iter->pos;
|
||||
} else {
|
||||
new_src = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
|
||||
if (IS_ERR(new_src))
|
||||
return PTR_ERR(new_src);
|
||||
ret = PTR_ERR_OR_ZERO(new_src);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
bkey_init(&new_src->k);
|
||||
new_src->k.p = src_iter->pos;
|
||||
|
||||
@ -247,7 +255,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
new_dst->k.p = src_iter->pos;
|
||||
bch2_trans_update(trans, src_iter,
|
||||
&new_dst->k_i, 0);
|
||||
return 0;
|
||||
goto out;
|
||||
} else {
|
||||
/* If we're overwriting, we can't insert new_dst
|
||||
* at a different slot because it has to
|
||||
@ -261,7 +269,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
ret = bch2_hash_needs_whiteout(trans, bch2_dirent_hash_desc,
|
||||
src_hash, src_iter);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
if (ret)
|
||||
new_src->k.type = KEY_TYPE_whiteout;
|
||||
@ -270,7 +278,10 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_update(trans, src_iter, &new_src->k_i, 0);
|
||||
bch2_trans_update(trans, dst_iter, &new_dst->k_i, 0);
|
||||
return 0;
|
||||
out:
|
||||
bch2_trans_iter_put(trans, src_iter);
|
||||
bch2_trans_iter_put(trans, dst_iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_dirent_delete_at(struct btree_trans *trans,
|
||||
@ -331,9 +342,7 @@ int bch2_empty_dir_trans(struct btree_trans *trans, u64 dir_inum)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!IS_ERR(iter))
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -741,6 +741,8 @@ found_slot:
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL);
|
||||
err:
|
||||
bch2_trans_iter_put(&trans, iter);
|
||||
|
||||
if (ret == -EINTR)
|
||||
goto retry;
|
||||
|
||||
@ -1201,8 +1203,7 @@ static int __bch2_stripe_write_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct stripe *m,
|
||||
size_t idx,
|
||||
struct bkey_i_stripe *new_key,
|
||||
unsigned flags)
|
||||
struct bkey_i_stripe *new_key)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k;
|
||||
@ -1231,9 +1232,7 @@ static int __bch2_stripe_write_key(struct btree_trans *trans,
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
|
||||
bch2_trans_update(trans, iter, &new_key->k_i, 0);
|
||||
|
||||
return bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_stripes_write(struct bch_fs *c, unsigned flags, bool *wrote)
|
||||
@ -1257,12 +1256,10 @@ int bch2_stripes_write(struct bch_fs *c, unsigned flags, bool *wrote)
|
||||
if (!m->dirty)
|
||||
continue;
|
||||
|
||||
do {
|
||||
bch2_trans_reset(&trans, TRANS_RESET_MEM);
|
||||
|
||||
ret = __bch2_stripe_write_key(&trans, iter, m,
|
||||
giter.pos, new_key, flags);
|
||||
} while (ret == -EINTR);
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|flags,
|
||||
__bch2_stripe_write_key(&trans, iter, m,
|
||||
giter.pos, new_key));
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -2648,7 +2648,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
|
||||
struct bkey_i_reservation reservation;
|
||||
struct bkey_s_c k;
|
||||
|
||||
bch2_trans_reset(&trans, TRANS_RESET_MEM);
|
||||
bch2_trans_begin(&trans);
|
||||
|
||||
k = bch2_btree_iter_peek_slot(iter);
|
||||
if ((ret = bkey_err(k)))
|
||||
|
@ -81,7 +81,6 @@ static int remove_dirent(struct btree_trans *trans,
|
||||
return __bch2_trans_do(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_LAZY_RW,
|
||||
TRANS_RESET_MEM,
|
||||
__remove_dirent(trans, dirent));
|
||||
}
|
||||
|
||||
@ -182,8 +181,6 @@ static int hash_redo_key(const struct bch_hash_desc desc,
|
||||
struct bkey_i delete;
|
||||
struct bkey_i *tmp;
|
||||
|
||||
bch2_trans_reset(trans, TRANS_RESET_MEM);
|
||||
|
||||
tmp = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
if (IS_ERR(tmp))
|
||||
return PTR_ERR(tmp);
|
||||
@ -194,11 +191,8 @@ static int hash_redo_key(const struct bch_hash_desc desc,
|
||||
delete.k.p = k_iter->pos;
|
||||
bch2_trans_update(trans, k_iter, &delete, 0);
|
||||
|
||||
return bch2_hash_set(trans, desc, &h->info, k_iter->pos.inode,
|
||||
tmp, BCH_HASH_SET_MUST_CREATE) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_LAZY_RW);
|
||||
return bch2_hash_set(trans, desc, &h->info, k_iter->pos.inode,
|
||||
tmp, BCH_HASH_SET_MUST_CREATE);
|
||||
}
|
||||
|
||||
static int fsck_hash_delete_at(struct btree_trans *trans,
|
||||
@ -320,10 +314,9 @@ static int hash_check_key(struct btree_trans *trans,
|
||||
desc.btree_id, k.k->p.offset,
|
||||
hashed, h->chain->pos.offset,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf))) {
|
||||
do {
|
||||
ret = hash_redo_key(desc, trans, h, k_iter, k, hashed);
|
||||
} while (ret == -EINTR);
|
||||
|
||||
ret = __bch2_trans_do(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
hash_redo_key(desc, trans, h, k_iter, k, hashed));
|
||||
if (ret) {
|
||||
bch_err(c, "hash_redo_key err %i", ret);
|
||||
return ret;
|
||||
@ -387,7 +380,6 @@ static int check_dirent_hash(struct btree_trans *trans, struct hash_check *h,
|
||||
ret = __bch2_trans_do(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_LAZY_RW,
|
||||
TRANS_RESET_MEM,
|
||||
(bch2_trans_update(trans, iter, &d->k_i, 0), 0));
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -410,11 +402,10 @@ err_redo:
|
||||
k->k->p.offset, hash, h->chain->pos.offset,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c,
|
||||
*k), buf))) {
|
||||
do {
|
||||
ret = hash_redo_key(bch2_dirent_hash_desc, trans,
|
||||
h, iter, *k, hash);
|
||||
} while (ret == -EINTR);
|
||||
|
||||
ret = __bch2_trans_do(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
hash_redo_key(bch2_dirent_hash_desc, trans,
|
||||
h, iter, *k, hash));
|
||||
if (ret)
|
||||
bch_err(c, "hash_redo_key err %i", ret);
|
||||
else
|
||||
@ -660,7 +651,6 @@ retry:
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_LAZY_RW,
|
||||
TRANS_RESET_MEM,
|
||||
(bch2_trans_update(&trans, iter, &n->k_i, 0), 0));
|
||||
kfree(n);
|
||||
if (ret)
|
||||
@ -1275,7 +1265,6 @@ static int check_inode(struct btree_trans *trans,
|
||||
ret = __bch2_trans_do(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_LAZY_RW,
|
||||
TRANS_RESET_MEM,
|
||||
(bch2_trans_update(trans, iter, &p.inode.k_i, 0), 0));
|
||||
if (ret)
|
||||
bch_err(c, "error in fsck: error %i "
|
||||
|
@ -325,7 +325,7 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bch2_disk_reservation_init(c, 0);
|
||||
struct bkey_i delete;
|
||||
|
||||
bch2_trans_reset(trans, TRANS_RESET_MEM);
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -399,7 +399,7 @@ int bch2_write_index_default(struct bch_write_op *op)
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
|
||||
do {
|
||||
bch2_trans_reset(&trans, TRANS_RESET_MEM);
|
||||
bch2_trans_begin(&trans);
|
||||
|
||||
k = bch2_keylist_front(keys);
|
||||
|
||||
|
@ -1043,9 +1043,16 @@ void bch2_journal_write(struct closure *cl)
|
||||
bytes = vstruct_bytes(jset);
|
||||
memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
|
||||
|
||||
retry_alloc:
|
||||
spin_lock(&j->lock);
|
||||
ret = journal_write_alloc(j, w, sectors);
|
||||
|
||||
if (ret && j->can_discard) {
|
||||
spin_unlock(&j->lock);
|
||||
bch2_journal_do_discards(j);
|
||||
goto retry_alloc;
|
||||
}
|
||||
|
||||
/*
|
||||
* write is allocated, no longer need to account for it in
|
||||
* bch2_journal_space_available():
|
||||
|
@ -1008,9 +1008,7 @@ int bch2_fs_recovery(struct bch_fs *c)
|
||||
c->disk_sb.sb->version_min =
|
||||
le16_to_cpu(bcachefs_metadata_version_min);
|
||||
c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
|
||||
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_new_siphash;
|
||||
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_new_extent_overwrite;
|
||||
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_btree_ptr_v2;
|
||||
c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
|
||||
write_sb = true;
|
||||
}
|
||||
|
||||
@ -1129,8 +1127,7 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
c->disk_sb.sb->version = c->disk_sb.sb->version_min =
|
||||
le16_to_cpu(bcachefs_metadata_version_current);
|
||||
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_atomic_nlink;
|
||||
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_new_siphash;
|
||||
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_new_extent_overwrite;
|
||||
c->disk_sb.sb->features[0] |= BCH_SB_FEATURES_ALL;
|
||||
|
||||
SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
|
||||
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
|
||||
|
@ -184,7 +184,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
BTREE_ITER_INTENT);
|
||||
|
||||
while (1) {
|
||||
bch2_trans_reset(&trans, TRANS_RESET_MEM);
|
||||
bch2_trans_begin(&trans);
|
||||
|
||||
trans.mem_top = 0;
|
||||
|
||||
|
@ -163,6 +163,7 @@ bch2_hash_lookup(struct btree_trans *trans,
|
||||
break;
|
||||
}
|
||||
}
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
|
||||
return ERR_PTR(ret ?: -ENOENT);
|
||||
}
|
||||
@ -187,6 +188,9 @@ bch2_hash_hole(struct btree_trans *trans,
|
||||
return iter;
|
||||
}
|
||||
|
||||
iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
|
||||
return ERR_PTR(ret ?: -ENOSPC);
|
||||
}
|
||||
|
||||
|
@ -674,6 +674,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
INIT_LIST_HEAD(&c->list);
|
||||
|
||||
INIT_LIST_HEAD(&c->btree_interior_update_list);
|
||||
INIT_LIST_HEAD(&c->btree_interior_updates_unwritten);
|
||||
mutex_init(&c->btree_reserve_cache_lock);
|
||||
mutex_init(&c->btree_interior_update_lock);
|
||||
|
||||
|
@ -43,8 +43,8 @@ static void test_delete(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
BUG_ON(ret);
|
||||
|
||||
bch2_trans_update(&trans, iter, &k.k_i, 0);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_trans_update(&trans, iter, &k.k_i, 0));
|
||||
BUG_ON(ret);
|
||||
|
||||
pr_info("deleting once");
|
||||
@ -75,8 +75,8 @@ static void test_delete_written(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
BUG_ON(ret);
|
||||
|
||||
bch2_trans_update(&trans, iter, &k.k_i, 0);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_trans_update(&trans, iter, &k.k_i, 0));
|
||||
BUG_ON(ret);
|
||||
|
||||
bch2_journal_flush_all_pins(&c->journal);
|
||||
@ -409,18 +409,24 @@ static u64 test_rand(void)
|
||||
|
||||
static void rand_insert(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct bkey_i_cookie k;
|
||||
int ret;
|
||||
u64 i;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.offset = test_rand();
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
__bch2_btree_insert(&trans, BTREE_ID_DIRENTS, &k.k_i));
|
||||
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
}
|
||||
|
||||
static void rand_lookup(struct bch_fs *c, u64 nr)
|
||||
@ -465,8 +471,9 @@ static void rand_mixed(struct bch_fs *c, u64 nr)
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p = iter->pos;
|
||||
|
||||
bch2_trans_update(&trans, iter, &k.k_i, 0);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_trans_update(&trans, iter, &k.k_i, 0));
|
||||
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
@ -476,20 +483,50 @@ static void rand_mixed(struct bch_fs *c, u64 nr)
|
||||
bch2_trans_exit(&trans);
|
||||
}
|
||||
|
||||
static int __do_delete(struct btree_trans *trans, struct bpos pos)
|
||||
{
|
||||
struct btree_iter *iter;
|
||||
struct bkey_i delete;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
iter = bch2_trans_get_iter(trans, BTREE_ID_DIRENTS, pos,
|
||||
BTREE_ITER_INTENT);
|
||||
ret = PTR_ERR_OR_ZERO(iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
k = bch2_btree_iter_peek(iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bkey_init(&delete.k);
|
||||
delete.k.p = k.k->p;
|
||||
|
||||
bch2_trans_update(trans, iter, &delete, 0);
|
||||
err:
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void rand_delete(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct bkey_i k;
|
||||
struct btree_trans trans;
|
||||
int ret;
|
||||
u64 i;
|
||||
|
||||
for (i = 0; i < nr; i++) {
|
||||
bkey_init(&k.k);
|
||||
k.k.p.offset = test_rand();
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
|
||||
NULL, NULL, 0);
|
||||
for (i = 0; i < nr; i++) {
|
||||
struct bpos pos = POS(0, test_rand());
|
||||
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
__do_delete(&trans, pos));
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
}
|
||||
|
||||
static void seq_insert(struct bch_fs *c, u64 nr)
|
||||
@ -509,8 +546,9 @@ static void seq_insert(struct bch_fs *c, u64 nr)
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
insert.k.p = iter->pos;
|
||||
|
||||
bch2_trans_update(&trans, iter, &insert.k_i, 0);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_trans_update(&trans, iter, &insert.k_i, 0));
|
||||
|
||||
BUG_ON(ret);
|
||||
|
||||
if (++i == nr)
|
||||
@ -548,8 +586,9 @@ static void seq_overwrite(struct bch_fs *c, u64 nr)
|
||||
|
||||
bkey_reassemble(&u.k_i, k);
|
||||
|
||||
bch2_trans_update(&trans, iter, &u.k_i, 0);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_trans_update(&trans, iter, &u.k_i, 0));
|
||||
|
||||
BUG_ON(ret);
|
||||
}
|
||||
bch2_trans_exit(&trans);
|
||||
|
Loading…
Reference in New Issue
Block a user