mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
Update bcachefs sources to 380885b0b8 bcachefs: Fix counting iterators for reflink pointers
This commit is contained in:
parent
98b8f8d0c0
commit
7f69c4161c
@ -1 +1 @@
|
||||
5a3a4087af27aa10da5f23cb174a439946153584
|
||||
380885b0b8c38dc770c48602325de77171acc419
|
||||
|
@ -375,7 +375,7 @@ int bch2_acl_chmod(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
new->k.p = iter->pos;
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &new->k_i));
|
||||
bch2_trans_update(trans, iter, &new->k_i);
|
||||
*new_acl = acl;
|
||||
acl = NULL;
|
||||
err:
|
||||
|
@ -311,7 +311,7 @@ retry:
|
||||
a->k.p = iter->pos;
|
||||
bch2_alloc_pack(a, new_u);
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
|
||||
bch2_trans_update(trans, iter, &a->k_i);
|
||||
ret = bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_ATOMIC|
|
||||
BTREE_INSERT_NOFAIL|
|
||||
@ -899,7 +899,7 @@ retry:
|
||||
a->k.p = iter->pos;
|
||||
bch2_alloc_pack(a, u);
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &a->k_i));
|
||||
bch2_trans_update(trans, iter, &a->k_i);
|
||||
|
||||
/*
|
||||
* XXX:
|
||||
|
@ -473,7 +473,7 @@ static void __bch2_btree_iter_verify(struct btree_iter *iter,
|
||||
}
|
||||
|
||||
BUG_ON(iter->uptodate == BTREE_ITER_UPTODATE &&
|
||||
(iter->flags & BTREE_ITER_TYPE) == BTREE_ITER_KEYS &&
|
||||
btree_iter_type(iter) == BTREE_ITER_KEYS &&
|
||||
!bkey_whiteout(&iter->k) &&
|
||||
bch2_btree_node_iter_end(&l->iter));
|
||||
}
|
||||
@ -1152,6 +1152,7 @@ static inline void bch2_btree_iter_checks(struct btree_iter *iter,
|
||||
EBUG_ON(!!(iter->flags & BTREE_ITER_IS_EXTENTS) !=
|
||||
(btree_node_type_is_extents(iter->btree_id) &&
|
||||
type != BTREE_ITER_NODES));
|
||||
EBUG_ON(btree_iter_type(iter) != type);
|
||||
|
||||
bch2_btree_trans_verify_locks(iter->trans);
|
||||
}
|
||||
@ -1661,7 +1662,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
{
|
||||
int ret;
|
||||
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_SLOTS);
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
|
||||
|
||||
if (iter->uptodate == BTREE_ITER_UPTODATE)
|
||||
return btree_iter_peek_uptodate(iter);
|
||||
@ -1675,7 +1676,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
|
||||
struct bkey_s_c bch2_btree_iter_next_slot(struct btree_iter *iter)
|
||||
{
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_SLOTS);
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
|
||||
|
||||
iter->pos = btree_type_successor(iter->btree_id, iter->k.p);
|
||||
|
||||
@ -1729,15 +1730,6 @@ static inline void bch2_btree_iter_init(struct btree_trans *trans,
|
||||
|
||||
/* new transactional stuff: */
|
||||
|
||||
int bch2_trans_iter_put(struct btree_trans *trans,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
int ret = btree_iter_err(iter);
|
||||
|
||||
trans->iters_live &= ~(1ULL << iter->idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __bch2_trans_iter_free(struct btree_trans *trans,
|
||||
unsigned idx)
|
||||
{
|
||||
@ -1745,26 +1737,27 @@ static inline void __bch2_trans_iter_free(struct btree_trans *trans,
|
||||
trans->iters_linked &= ~(1ULL << idx);
|
||||
trans->iters_live &= ~(1ULL << idx);
|
||||
trans->iters_touched &= ~(1ULL << idx);
|
||||
trans->iters_unlink_on_restart &= ~(1ULL << idx);
|
||||
trans->iters_unlink_on_commit &= ~(1ULL << idx);
|
||||
}
|
||||
|
||||
int bch2_trans_iter_put(struct btree_trans *trans,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
int ret = btree_iter_err(iter);
|
||||
|
||||
if (!(trans->iters_touched & (1ULL << iter->idx)) &&
|
||||
!(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT))
|
||||
__bch2_trans_iter_free(trans, iter->idx);
|
||||
|
||||
trans->iters_live &= ~(1ULL << iter->idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_iter_free(struct btree_trans *trans,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
int ret = btree_iter_err(iter);
|
||||
trans->iters_touched &= ~(1ULL << iter->idx);
|
||||
|
||||
__bch2_trans_iter_free(trans, iter->idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_iter_free_on_commit(struct btree_trans *trans,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
int ret = btree_iter_err(iter);
|
||||
|
||||
trans->iters_unlink_on_commit |= 1ULL << iter->idx;
|
||||
return ret;
|
||||
return bch2_trans_iter_put(trans, iter);
|
||||
}
|
||||
|
||||
static int bch2_trans_realloc_iters(struct btree_trans *trans,
|
||||
@ -1830,7 +1823,7 @@ success:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int btree_trans_iter_alloc(struct btree_trans *trans)
|
||||
static struct btree_iter *btree_trans_iter_alloc(struct btree_trans *trans)
|
||||
{
|
||||
unsigned idx = __ffs64(~trans->iters_linked);
|
||||
|
||||
@ -1838,9 +1831,27 @@ static int btree_trans_iter_alloc(struct btree_trans *trans)
|
||||
goto got_slot;
|
||||
|
||||
if (trans->nr_iters == trans->size) {
|
||||
int ret = bch2_trans_realloc_iters(trans, trans->size * 2);
|
||||
int ret;
|
||||
|
||||
if (trans->nr_iters >= BTREE_ITER_MAX) {
|
||||
struct btree_iter *iter;
|
||||
|
||||
trans_for_each_iter(trans, iter) {
|
||||
pr_err("iter: btree %s pos %llu:%llu%s%s%s",
|
||||
bch2_btree_ids[iter->btree_id],
|
||||
iter->pos.inode,
|
||||
iter->pos.offset,
|
||||
(trans->iters_live & (1ULL << iter->idx)) ? " live" : "",
|
||||
(trans->iters_touched & (1ULL << iter->idx)) ? " touched" : "",
|
||||
iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT ? " keep" : "");
|
||||
}
|
||||
|
||||
panic("trans iter oveflow\n");
|
||||
}
|
||||
|
||||
ret = bch2_trans_realloc_iters(trans, trans->size * 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
idx = trans->nr_iters++;
|
||||
@ -1850,71 +1861,97 @@ static int btree_trans_iter_alloc(struct btree_trans *trans)
|
||||
got_slot:
|
||||
BUG_ON(trans->iters_linked & (1ULL << idx));
|
||||
trans->iters_linked |= 1ULL << idx;
|
||||
return idx;
|
||||
return &trans->iters[idx];
|
||||
}
|
||||
|
||||
static inline void btree_iter_copy(struct btree_iter *dst,
|
||||
struct btree_iter *src)
|
||||
{
|
||||
unsigned i, idx = dst->idx;
|
||||
|
||||
*dst = *src;
|
||||
dst->idx = idx;
|
||||
|
||||
for (i = 0; i < BTREE_MAX_DEPTH; i++)
|
||||
if (btree_node_locked(dst, i))
|
||||
six_lock_increment(&dst->l[i].b->lock,
|
||||
__btree_lock_want(dst, i));
|
||||
}
|
||||
|
||||
static inline struct bpos bpos_diff(struct bpos l, struct bpos r)
|
||||
{
|
||||
if (bkey_cmp(l, r) > 0)
|
||||
swap(l, r);
|
||||
|
||||
return POS(r.inode - l.inode, r.offset - l.offset);
|
||||
}
|
||||
|
||||
static struct btree_iter *__btree_trans_get_iter(struct btree_trans *trans,
|
||||
unsigned btree_id, struct bpos pos,
|
||||
unsigned flags, u64 iter_id)
|
||||
unsigned flags)
|
||||
{
|
||||
struct btree_iter *iter;
|
||||
int idx;
|
||||
struct btree_iter *iter, *best = NULL;
|
||||
|
||||
BUG_ON(trans->nr_iters > BTREE_ITER_MAX);
|
||||
|
||||
for (idx = 0; idx < trans->nr_iters; idx++) {
|
||||
if (!(trans->iters_linked & (1ULL << idx)))
|
||||
trans_for_each_iter(trans, iter) {
|
||||
if (btree_iter_type(iter) != (flags & BTREE_ITER_TYPE))
|
||||
continue;
|
||||
|
||||
iter = &trans->iters[idx];
|
||||
if (iter_id
|
||||
? iter->id == iter_id
|
||||
: (iter->btree_id == btree_id &&
|
||||
!bkey_cmp(iter->pos, pos)))
|
||||
goto found;
|
||||
}
|
||||
idx = -1;
|
||||
found:
|
||||
if (idx < 0) {
|
||||
idx = btree_trans_iter_alloc(trans);
|
||||
if (idx < 0)
|
||||
return ERR_PTR(idx);
|
||||
if (iter->btree_id != btree_id)
|
||||
continue;
|
||||
|
||||
iter = &trans->iters[idx];
|
||||
iter->id = iter_id;
|
||||
if (best &&
|
||||
bkey_cmp(bpos_diff(best->pos, pos),
|
||||
bpos_diff(iter->pos, pos)) < 0)
|
||||
continue;
|
||||
|
||||
best = iter;
|
||||
}
|
||||
|
||||
if (!best) {
|
||||
iter = btree_trans_iter_alloc(trans);
|
||||
if (IS_ERR(iter))
|
||||
return iter;
|
||||
|
||||
bch2_btree_iter_init(trans, iter, btree_id, pos, flags);
|
||||
} else if ((trans->iters_live & (1ULL << best->idx)) ||
|
||||
(best->flags & BTREE_ITER_KEEP_UNTIL_COMMIT)) {
|
||||
iter = btree_trans_iter_alloc(trans);
|
||||
if (IS_ERR(iter))
|
||||
return iter;
|
||||
|
||||
btree_iter_copy(iter, best);
|
||||
} else {
|
||||
iter = &trans->iters[idx];
|
||||
|
||||
iter->flags &= ~(BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
|
||||
iter->flags |= flags & (BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
|
||||
|
||||
if ((iter->flags & BTREE_ITER_INTENT) &&
|
||||
!bch2_btree_iter_upgrade(iter, 1)) {
|
||||
trace_trans_restart_upgrade(trans->ip);
|
||||
return ERR_PTR(-EINTR);
|
||||
}
|
||||
iter = best;
|
||||
}
|
||||
|
||||
BUG_ON(iter->btree_id != btree_id);
|
||||
BUG_ON(trans->iters_live & (1ULL << idx));
|
||||
trans->iters_live |= 1ULL << idx;
|
||||
trans->iters_touched |= 1ULL << idx;
|
||||
iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
|
||||
iter->flags &= ~(BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
|
||||
iter->flags |= flags & (BTREE_ITER_SLOTS|BTREE_ITER_INTENT|BTREE_ITER_PREFETCH);
|
||||
|
||||
if (iter->flags & BTREE_ITER_INTENT)
|
||||
bch2_btree_iter_upgrade(iter, 1);
|
||||
else
|
||||
bch2_btree_iter_downgrade(iter);
|
||||
|
||||
BUG_ON(iter->btree_id != btree_id);
|
||||
BUG_ON((iter->flags ^ flags) & BTREE_ITER_TYPE);
|
||||
BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
|
||||
BUG_ON(trans->iters_live & (1ULL << iter->idx));
|
||||
|
||||
trans->iters_live |= 1ULL << iter->idx;
|
||||
trans->iters_touched |= 1ULL << iter->idx;
|
||||
|
||||
return iter;
|
||||
}
|
||||
|
||||
struct btree_iter *__bch2_trans_get_iter(struct btree_trans *trans,
|
||||
enum btree_id btree_id,
|
||||
struct bpos pos, unsigned flags,
|
||||
u64 iter_id)
|
||||
struct btree_iter *bch2_trans_get_iter(struct btree_trans *trans,
|
||||
enum btree_id btree_id,
|
||||
struct bpos pos, unsigned flags)
|
||||
{
|
||||
struct btree_iter *iter =
|
||||
__btree_trans_get_iter(trans, btree_id, pos, flags, iter_id);
|
||||
__btree_trans_get_iter(trans, btree_id, pos, flags);
|
||||
|
||||
if (!IS_ERR(iter))
|
||||
bch2_btree_iter_set_pos(iter, pos);
|
||||
@ -1930,7 +1967,7 @@ struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *trans,
|
||||
{
|
||||
struct btree_iter *iter =
|
||||
__btree_trans_get_iter(trans, btree_id, pos,
|
||||
flags|BTREE_ITER_NODES, 0);
|
||||
flags|BTREE_ITER_NODES);
|
||||
unsigned i;
|
||||
|
||||
BUG_ON(IS_ERR(iter));
|
||||
@ -1950,28 +1987,22 @@ struct btree_iter *bch2_trans_copy_iter(struct btree_trans *trans,
|
||||
struct btree_iter *src)
|
||||
{
|
||||
struct btree_iter *iter;
|
||||
int i, idx;
|
||||
|
||||
idx = btree_trans_iter_alloc(trans);
|
||||
if (idx < 0)
|
||||
return ERR_PTR(idx);
|
||||
iter = btree_trans_iter_alloc(trans);
|
||||
if (IS_ERR(iter))
|
||||
return iter;
|
||||
|
||||
trans->iters_live |= 1ULL << idx;
|
||||
trans->iters_touched |= 1ULL << idx;
|
||||
trans->iters_unlink_on_restart |= 1ULL << idx;
|
||||
btree_iter_copy(iter, src);
|
||||
|
||||
iter = &trans->iters[idx];
|
||||
trans->iters_live |= 1ULL << iter->idx;
|
||||
/*
|
||||
* Don't mark it as touched, we don't need to preserve this iter since
|
||||
* it's cheap to copy it again:
|
||||
*/
|
||||
trans->iters_touched &= ~(1ULL << iter->idx);
|
||||
iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
|
||||
|
||||
memcpy(&iter->trans,
|
||||
&src->trans,
|
||||
(void *) &iter[1] - (void *) &iter->trans);
|
||||
|
||||
for (i = 0; i < BTREE_MAX_DEPTH; i++)
|
||||
if (btree_node_locked(iter, i))
|
||||
six_lock_increment(&iter->l[i].b->lock,
|
||||
__btree_lock_want(iter, i));
|
||||
|
||||
return &trans->iters[idx];
|
||||
return iter;
|
||||
}
|
||||
|
||||
static int bch2_trans_preload_mem(struct btree_trans *trans, size_t size)
|
||||
@ -2010,10 +2041,11 @@ void *bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
|
||||
return p;
|
||||
}
|
||||
|
||||
inline void bch2_trans_unlink_iters(struct btree_trans *trans, u64 iters)
|
||||
inline void bch2_trans_unlink_iters(struct btree_trans *trans)
|
||||
{
|
||||
iters &= trans->iters_linked;
|
||||
iters &= ~trans->iters_live;
|
||||
u64 iters = trans->iters_linked &
|
||||
~trans->iters_touched &
|
||||
~trans->iters_live;
|
||||
|
||||
while (iters) {
|
||||
unsigned idx = __ffs64(iters);
|
||||
@ -2023,33 +2055,24 @@ inline void bch2_trans_unlink_iters(struct btree_trans *trans, u64 iters)
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_trans_begin(struct btree_trans *trans)
|
||||
void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
|
||||
{
|
||||
u64 iters_to_unlink;
|
||||
struct btree_iter *iter;
|
||||
|
||||
/*
|
||||
* On transaction restart, the transaction isn't required to allocate
|
||||
* all the same iterators it on the last iteration:
|
||||
*
|
||||
* Unlink any iterators it didn't use this iteration, assuming it got
|
||||
* further (allocated an iter with a higher idx) than where the iter
|
||||
* was originally allocated:
|
||||
*/
|
||||
iters_to_unlink = ~trans->iters_live &
|
||||
((1ULL << fls64(trans->iters_live)) - 1);
|
||||
trans_for_each_iter(trans, iter)
|
||||
iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
|
||||
|
||||
iters_to_unlink |= trans->iters_unlink_on_restart;
|
||||
iters_to_unlink |= trans->iters_unlink_on_commit;
|
||||
bch2_trans_unlink_iters(trans);
|
||||
|
||||
trans->iters_live = 0;
|
||||
if (flags & TRANS_RESET_ITERS)
|
||||
trans->iters_live = 0;
|
||||
|
||||
bch2_trans_unlink_iters(trans, iters_to_unlink);
|
||||
trans->iters_touched &= trans->iters_live;
|
||||
|
||||
trans->iters_touched = 0;
|
||||
trans->iters_unlink_on_restart = 0;
|
||||
trans->iters_unlink_on_commit = 0;
|
||||
trans->nr_updates = 0;
|
||||
trans->mem_top = 0;
|
||||
|
||||
if (flags & TRANS_RESET_MEM)
|
||||
trans->mem_top = 0;
|
||||
|
||||
bch2_btree_iter_traverse_all(trans);
|
||||
}
|
||||
|
@ -246,6 +246,11 @@ static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
|
||||
: bch2_btree_iter_next(iter);
|
||||
}
|
||||
|
||||
static inline int bkey_err(struct bkey_s_c k)
|
||||
{
|
||||
return PTR_ERR_OR_ZERO(k.k);
|
||||
}
|
||||
|
||||
#define for_each_btree_key(_trans, _iter, _btree_id, \
|
||||
_start, _flags, _k, _ret) \
|
||||
for ((_ret) = PTR_ERR_OR_ZERO((_iter) = \
|
||||
@ -257,57 +262,39 @@ static inline struct bkey_s_c __bch2_btree_iter_next(struct btree_iter *iter,
|
||||
(_ret) = PTR_ERR_OR_ZERO(((_k) = \
|
||||
__bch2_btree_iter_next(_iter, _flags)).k))
|
||||
|
||||
#define for_each_btree_key_continue(_iter, _flags, _k) \
|
||||
#define for_each_btree_key_continue(_iter, _flags, _k, _ret) \
|
||||
for ((_k) = __bch2_btree_iter_peek(_iter, _flags); \
|
||||
!IS_ERR_OR_NULL((_k).k); \
|
||||
!((_ret) = bkey_err(_k)) && (_k).k; \
|
||||
(_k) = __bch2_btree_iter_next(_iter, _flags))
|
||||
|
||||
static inline int bkey_err(struct bkey_s_c k)
|
||||
{
|
||||
return PTR_ERR_OR_ZERO(k.k);
|
||||
}
|
||||
|
||||
/* new multiple iterator interface: */
|
||||
|
||||
int bch2_trans_iter_put(struct btree_trans *, struct btree_iter *);
|
||||
int bch2_trans_iter_free(struct btree_trans *, struct btree_iter *);
|
||||
int bch2_trans_iter_free_on_commit(struct btree_trans *, struct btree_iter *);
|
||||
|
||||
void bch2_trans_unlink_iters(struct btree_trans *, u64);
|
||||
void bch2_trans_unlink_iters(struct btree_trans *);
|
||||
|
||||
struct btree_iter *__bch2_trans_get_iter(struct btree_trans *, enum btree_id,
|
||||
struct bpos, unsigned, u64);
|
||||
struct btree_iter *bch2_trans_get_iter(struct btree_trans *, enum btree_id,
|
||||
struct bpos, unsigned);
|
||||
struct btree_iter *bch2_trans_copy_iter(struct btree_trans *,
|
||||
struct btree_iter *);
|
||||
|
||||
static __always_inline u64 __btree_iter_id(void)
|
||||
{
|
||||
u64 ret = 0;
|
||||
|
||||
ret <<= 32;
|
||||
ret |= _RET_IP_ & U32_MAX;
|
||||
ret <<= 32;
|
||||
ret |= _THIS_IP_ & U32_MAX;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline struct btree_iter *
|
||||
bch2_trans_get_iter(struct btree_trans *trans, enum btree_id btree_id,
|
||||
struct bpos pos, unsigned flags)
|
||||
{
|
||||
return __bch2_trans_get_iter(trans, btree_id, pos, flags,
|
||||
__btree_iter_id());
|
||||
}
|
||||
|
||||
struct btree_iter *bch2_trans_get_node_iter(struct btree_trans *,
|
||||
enum btree_id, struct bpos,
|
||||
unsigned, unsigned, unsigned);
|
||||
|
||||
void bch2_trans_begin(struct btree_trans *);
|
||||
#define TRANS_RESET_ITERS (1 << 0)
|
||||
#define TRANS_RESET_MEM (1 << 1)
|
||||
|
||||
void bch2_trans_reset(struct btree_trans *, unsigned);
|
||||
|
||||
static inline void bch2_trans_begin(struct btree_trans *trans)
|
||||
{
|
||||
return bch2_trans_reset(trans, TRANS_RESET_ITERS|TRANS_RESET_MEM);
|
||||
}
|
||||
|
||||
static inline void bch2_trans_begin_updates(struct btree_trans *trans)
|
||||
{
|
||||
trans->nr_updates = 0;
|
||||
return bch2_trans_reset(trans, TRANS_RESET_MEM);
|
||||
}
|
||||
|
||||
void *bch2_trans_kmalloc(struct btree_trans *, size_t);
|
||||
|
@ -180,20 +180,21 @@ struct btree_node_iter {
|
||||
|
||||
enum btree_iter_type {
|
||||
BTREE_ITER_KEYS,
|
||||
BTREE_ITER_SLOTS,
|
||||
BTREE_ITER_NODES,
|
||||
};
|
||||
|
||||
#define BTREE_ITER_TYPE ((1 << 2) - 1)
|
||||
|
||||
#define BTREE_ITER_INTENT (1 << 2)
|
||||
#define BTREE_ITER_PREFETCH (1 << 3)
|
||||
#define BTREE_ITER_SLOTS (1 << 2)
|
||||
#define BTREE_ITER_INTENT (1 << 3)
|
||||
#define BTREE_ITER_PREFETCH (1 << 4)
|
||||
#define BTREE_ITER_KEEP_UNTIL_COMMIT (1 << 5)
|
||||
/*
|
||||
* Used in bch2_btree_iter_traverse(), to indicate whether we're searching for
|
||||
* @pos or the first key strictly greater than @pos
|
||||
*/
|
||||
#define BTREE_ITER_IS_EXTENTS (1 << 4)
|
||||
#define BTREE_ITER_ERROR (1 << 5)
|
||||
#define BTREE_ITER_IS_EXTENTS (1 << 6)
|
||||
#define BTREE_ITER_ERROR (1 << 7)
|
||||
|
||||
enum btree_iter_uptodate {
|
||||
BTREE_ITER_UPTODATE = 0,
|
||||
@ -234,33 +235,16 @@ struct btree_iter {
|
||||
* bch2_btree_iter_next_slot() can correctly advance pos.
|
||||
*/
|
||||
struct bkey k;
|
||||
|
||||
u64 id;
|
||||
};
|
||||
|
||||
struct deferred_update {
|
||||
struct journal_preres res;
|
||||
struct journal_entry_pin journal;
|
||||
|
||||
spinlock_t lock;
|
||||
unsigned dirty:1;
|
||||
|
||||
u8 allocated_u64s;
|
||||
enum btree_id btree_id;
|
||||
|
||||
/* must be last: */
|
||||
struct bkey_i k;
|
||||
};
|
||||
static inline enum btree_iter_type btree_iter_type(struct btree_iter *iter)
|
||||
{
|
||||
return iter->flags & BTREE_ITER_TYPE;
|
||||
}
|
||||
|
||||
struct btree_insert_entry {
|
||||
struct bkey_i *k;
|
||||
|
||||
union {
|
||||
struct btree_iter *iter;
|
||||
struct deferred_update *d;
|
||||
};
|
||||
|
||||
bool deferred;
|
||||
};
|
||||
|
||||
#define BTREE_ITER_MAX 64
|
||||
@ -273,8 +257,6 @@ struct btree_trans {
|
||||
u64 iters_linked;
|
||||
u64 iters_live;
|
||||
u64 iters_touched;
|
||||
u64 iters_unlink_on_restart;
|
||||
u64 iters_unlink_on_commit;
|
||||
|
||||
u8 nr_iters;
|
||||
u8 nr_updates;
|
||||
|
@ -15,24 +15,6 @@ bool bch2_btree_bset_insert_key(struct btree_iter *, struct btree *,
|
||||
void bch2_btree_journal_key(struct btree_trans *, struct btree_iter *,
|
||||
struct bkey_i *);
|
||||
|
||||
void bch2_deferred_update_free(struct bch_fs *,
|
||||
struct deferred_update *);
|
||||
struct deferred_update *
|
||||
bch2_deferred_update_alloc(struct bch_fs *, enum btree_id, unsigned);
|
||||
|
||||
#define BTREE_INSERT_ENTRY(_iter, _k) \
|
||||
((struct btree_insert_entry) { \
|
||||
.iter = (_iter), \
|
||||
.k = (_k), \
|
||||
})
|
||||
|
||||
#define BTREE_INSERT_DEFERRED(_d, _k) \
|
||||
((struct btree_insert_entry) { \
|
||||
.k = (_k), \
|
||||
.d = (_d), \
|
||||
.deferred = true, \
|
||||
})
|
||||
|
||||
enum {
|
||||
__BTREE_INSERT_ATOMIC,
|
||||
__BTREE_INSERT_NOUNLOCK,
|
||||
@ -120,11 +102,16 @@ int bch2_trans_commit(struct btree_trans *,
|
||||
u64 *, unsigned);
|
||||
|
||||
static inline void bch2_trans_update(struct btree_trans *trans,
|
||||
struct btree_insert_entry entry)
|
||||
struct btree_iter *iter,
|
||||
struct bkey_i *k)
|
||||
{
|
||||
EBUG_ON(trans->nr_updates >= trans->nr_iters + 4);
|
||||
|
||||
trans->updates[trans->nr_updates++] = entry;
|
||||
iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
|
||||
|
||||
trans->updates[trans->nr_updates++] = (struct btree_insert_entry) {
|
||||
.iter = iter, .k = k
|
||||
};
|
||||
}
|
||||
|
||||
#define bch2_trans_do(_c, _journal_seq, _flags, _do) \
|
||||
@ -145,23 +132,9 @@ static inline void bch2_trans_update(struct btree_trans *trans,
|
||||
_ret; \
|
||||
})
|
||||
|
||||
#define __trans_next_update(_trans, _i, _filter) \
|
||||
({ \
|
||||
while ((_i) < (_trans)->updates + (_trans->nr_updates) && !(_filter))\
|
||||
(_i)++; \
|
||||
\
|
||||
(_i) < (_trans)->updates + (_trans->nr_updates); \
|
||||
})
|
||||
|
||||
#define __trans_for_each_update(_trans, _i, _filter) \
|
||||
#define trans_for_each_update(_trans, _i) \
|
||||
for ((_i) = (_trans)->updates; \
|
||||
__trans_next_update(_trans, _i, _filter); \
|
||||
(_i) < (_trans)->updates + (_trans)->nr_updates; \
|
||||
(_i)++)
|
||||
|
||||
#define trans_for_each_update(trans, i) \
|
||||
__trans_for_each_update(trans, i, true)
|
||||
|
||||
#define trans_for_each_update_iter(trans, i) \
|
||||
__trans_for_each_update(trans, i, !(i)->deferred)
|
||||
|
||||
#endif /* _BCACHEFS_BTREE_UPDATE_H */
|
||||
|
@ -28,8 +28,7 @@ static inline bool same_leaf_as_prev(struct btree_trans *trans,
|
||||
? trans->updates + trans->updates_sorted[sorted_idx - 1]
|
||||
: NULL;
|
||||
|
||||
return !i->deferred &&
|
||||
prev &&
|
||||
return prev &&
|
||||
i->iter->l[0].b == prev->iter->l[0].b;
|
||||
}
|
||||
|
||||
@ -73,13 +72,6 @@ static void btree_trans_lock_write(struct btree_trans *trans, bool lock)
|
||||
}
|
||||
}
|
||||
|
||||
static inline int btree_trans_cmp(struct btree_insert_entry l,
|
||||
struct btree_insert_entry r)
|
||||
{
|
||||
return cmp_int(l.deferred, r.deferred) ?:
|
||||
btree_iter_cmp(l.iter, r.iter);
|
||||
}
|
||||
|
||||
static inline void btree_trans_sort_updates(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_insert_entry *l, *r;
|
||||
@ -89,7 +81,7 @@ static inline void btree_trans_sort_updates(struct btree_trans *trans)
|
||||
for (pos = 0; pos < nr; pos++) {
|
||||
r = trans->updates + trans->updates_sorted[pos];
|
||||
|
||||
if (btree_trans_cmp(*l, *r) <= 0)
|
||||
if (btree_iter_cmp(l->iter, r->iter) <= 0)
|
||||
break;
|
||||
}
|
||||
|
||||
@ -312,143 +304,23 @@ static void btree_insert_key_leaf(struct btree_trans *trans,
|
||||
trace_btree_insert_key(c, b, insert->k);
|
||||
}
|
||||
|
||||
/* Deferred btree updates: */
|
||||
|
||||
static void deferred_update_flush(struct journal *j,
|
||||
struct journal_entry_pin *pin,
|
||||
u64 seq)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct deferred_update *d =
|
||||
container_of(pin, struct deferred_update, journal);
|
||||
struct journal_preres res = { 0 };
|
||||
u64 tmp[32];
|
||||
struct bkey_i *k = (void *) tmp;
|
||||
int ret;
|
||||
|
||||
if (d->allocated_u64s > ARRAY_SIZE(tmp)) {
|
||||
k = kmalloc(d->allocated_u64s * sizeof(u64), GFP_NOFS);
|
||||
|
||||
BUG_ON(!k); /* XXX */
|
||||
}
|
||||
|
||||
spin_lock(&d->lock);
|
||||
if (d->dirty) {
|
||||
BUG_ON(jset_u64s(d->k.k.u64s) > d->res.u64s);
|
||||
|
||||
swap(res, d->res);
|
||||
|
||||
BUG_ON(d->k.k.u64s > d->allocated_u64s);
|
||||
|
||||
bkey_copy(k, &d->k);
|
||||
d->dirty = false;
|
||||
spin_unlock(&d->lock);
|
||||
|
||||
ret = bch2_btree_insert(c, d->btree_id, k, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_USE_RESERVE|
|
||||
BTREE_INSERT_JOURNAL_RESERVED);
|
||||
bch2_fs_fatal_err_on(ret && !bch2_journal_error(j),
|
||||
c, "error flushing deferred btree update: %i", ret);
|
||||
|
||||
spin_lock(&d->lock);
|
||||
}
|
||||
|
||||
if (!d->dirty)
|
||||
bch2_journal_pin_drop(j, &d->journal);
|
||||
spin_unlock(&d->lock);
|
||||
|
||||
bch2_journal_preres_put(j, &res);
|
||||
if (k != (void *) tmp)
|
||||
kfree(k);
|
||||
}
|
||||
|
||||
static void btree_insert_key_deferred(struct btree_trans *trans,
|
||||
struct btree_insert_entry *insert)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct journal *j = &c->journal;
|
||||
struct deferred_update *d = insert->d;
|
||||
int difference;
|
||||
|
||||
BUG_ON(trans->flags & BTREE_INSERT_JOURNAL_REPLAY);
|
||||
BUG_ON(insert->k->u64s > d->allocated_u64s);
|
||||
|
||||
__btree_journal_key(trans, d->btree_id, insert->k);
|
||||
|
||||
spin_lock(&d->lock);
|
||||
BUG_ON(jset_u64s(insert->k->u64s) >
|
||||
trans->journal_preres.u64s);
|
||||
|
||||
difference = jset_u64s(insert->k->u64s) - d->res.u64s;
|
||||
if (difference > 0) {
|
||||
trans->journal_preres.u64s -= difference;
|
||||
d->res.u64s += difference;
|
||||
}
|
||||
|
||||
bkey_copy(&d->k, insert->k);
|
||||
d->dirty = true;
|
||||
|
||||
bch2_journal_pin_update(j, trans->journal_res.seq, &d->journal,
|
||||
deferred_update_flush);
|
||||
spin_unlock(&d->lock);
|
||||
}
|
||||
|
||||
void bch2_deferred_update_free(struct bch_fs *c,
|
||||
struct deferred_update *d)
|
||||
{
|
||||
deferred_update_flush(&c->journal, &d->journal, 0);
|
||||
|
||||
BUG_ON(journal_pin_active(&d->journal));
|
||||
|
||||
bch2_journal_pin_flush(&c->journal, &d->journal);
|
||||
kfree(d);
|
||||
}
|
||||
|
||||
struct deferred_update *
|
||||
bch2_deferred_update_alloc(struct bch_fs *c,
|
||||
enum btree_id btree_id,
|
||||
unsigned u64s)
|
||||
{
|
||||
struct deferred_update *d;
|
||||
|
||||
BUG_ON(u64s > U8_MAX);
|
||||
|
||||
d = kmalloc(offsetof(struct deferred_update, k) +
|
||||
u64s * sizeof(u64), GFP_NOFS);
|
||||
BUG_ON(!d);
|
||||
|
||||
memset(d, 0, offsetof(struct deferred_update, k));
|
||||
|
||||
spin_lock_init(&d->lock);
|
||||
d->allocated_u64s = u64s;
|
||||
d->btree_id = btree_id;
|
||||
|
||||
return d;
|
||||
}
|
||||
|
||||
/* Normal update interface: */
|
||||
|
||||
static inline void btree_insert_entry_checks(struct btree_trans *trans,
|
||||
struct btree_insert_entry *i)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
enum btree_id btree_id = !i->deferred
|
||||
? i->iter->btree_id
|
||||
: i->d->btree_id;
|
||||
|
||||
if (!i->deferred) {
|
||||
BUG_ON(i->iter->level);
|
||||
BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
|
||||
EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
bkey_cmp(i->k->k.p, i->iter->l[0].b->key.k.p) > 0);
|
||||
EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
!(trans->flags & BTREE_INSERT_ATOMIC));
|
||||
}
|
||||
BUG_ON(i->iter->level);
|
||||
BUG_ON(bkey_cmp(bkey_start_pos(&i->k->k), i->iter->pos));
|
||||
EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
bkey_cmp(i->k->k.p, i->iter->l[0].b->key.k.p) > 0);
|
||||
EBUG_ON((i->iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
!(trans->flags & BTREE_INSERT_ATOMIC));
|
||||
|
||||
BUG_ON(debug_check_bkeys(c) &&
|
||||
!bkey_deleted(&i->k->k) &&
|
||||
bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), btree_id));
|
||||
bch2_bkey_invalid(c, bkey_i_to_s_c(i->k), i->iter->btree_id));
|
||||
}
|
||||
|
||||
static int bch2_trans_journal_preres_get(struct btree_trans *trans)
|
||||
@ -459,7 +331,7 @@ static int bch2_trans_journal_preres_get(struct btree_trans *trans)
|
||||
int ret;
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
if (i->deferred)
|
||||
if (0)
|
||||
u64s += jset_u64s(i->k->k.u64s);
|
||||
|
||||
if (!u64s)
|
||||
@ -551,10 +423,7 @@ static int btree_trans_check_can_insert(struct btree_trans *trans,
|
||||
static inline void do_btree_insert_one(struct btree_trans *trans,
|
||||
struct btree_insert_entry *insert)
|
||||
{
|
||||
if (likely(!insert->deferred))
|
||||
btree_insert_key_leaf(trans, insert);
|
||||
else
|
||||
btree_insert_key_deferred(trans, insert);
|
||||
btree_insert_key_leaf(trans, insert);
|
||||
}
|
||||
|
||||
static inline bool update_triggers_transactional(struct btree_trans *trans,
|
||||
@ -570,7 +439,6 @@ static inline bool update_has_triggers(struct btree_trans *trans,
|
||||
struct btree_insert_entry *i)
|
||||
{
|
||||
return likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
|
||||
!i->deferred &&
|
||||
btree_node_type_needs_gc(i->iter->btree_id);
|
||||
}
|
||||
|
||||
@ -588,14 +456,14 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
|
||||
: 0;
|
||||
int ret;
|
||||
|
||||
trans_for_each_update_iter(trans, i)
|
||||
trans_for_each_update(trans, i)
|
||||
BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
|
||||
|
||||
/*
|
||||
* note: running triggers will append more updates to the list of
|
||||
* updates as we're walking it:
|
||||
*/
|
||||
trans_for_each_update_iter(trans, i)
|
||||
trans_for_each_update(trans, i)
|
||||
if (update_has_triggers(trans, i) &&
|
||||
update_triggers_transactional(trans, i)) {
|
||||
ret = bch2_trans_mark_update(trans, i->iter, i->k);
|
||||
@ -633,7 +501,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
trans_for_each_update_iter(trans, i) {
|
||||
trans_for_each_update(trans, i) {
|
||||
if (!btree_node_type_needs_gc(i->iter->btree_id))
|
||||
continue;
|
||||
|
||||
@ -673,7 +541,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
|
||||
i->k->k.version = MAX_VERSION;
|
||||
}
|
||||
|
||||
trans_for_each_update_iter(trans, i)
|
||||
trans_for_each_update(trans, i)
|
||||
if (update_has_triggers(trans, i) &&
|
||||
!update_triggers_transactional(trans, i))
|
||||
bch2_mark_update(trans, i, fs_usage, mark_flags);
|
||||
@ -687,7 +555,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
|
||||
|
||||
if (likely(!(trans->flags & BTREE_INSERT_NOMARK)) &&
|
||||
unlikely(c->gc_pos.phase))
|
||||
trans_for_each_update_iter(trans, i)
|
||||
trans_for_each_update(trans, i)
|
||||
if (gc_visited(c, gc_pos_btree_node(i->iter->l[0].b)))
|
||||
bch2_mark_update(trans, i, NULL,
|
||||
mark_flags|
|
||||
@ -772,7 +640,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
|
||||
case BTREE_INSERT_NEED_MARK_REPLICAS:
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
trans_for_each_update_iter(trans, i) {
|
||||
trans_for_each_update(trans, i) {
|
||||
ret = bch2_mark_bkey_replicas(c, bkey_i_to_s_c(i->k));
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -842,7 +710,7 @@ static int __bch2_trans_commit(struct btree_trans *trans,
|
||||
unsigned iter;
|
||||
int ret;
|
||||
|
||||
trans_for_each_update_iter(trans, i) {
|
||||
trans_for_each_update(trans, i) {
|
||||
if (!bch2_btree_iter_upgrade(i->iter, 1)) {
|
||||
trace_trans_restart_upgrade(trans->ip);
|
||||
ret = -EINTR;
|
||||
@ -868,7 +736,7 @@ static int __bch2_trans_commit(struct btree_trans *trans,
|
||||
|
||||
trans->nounlock = false;
|
||||
|
||||
trans_for_each_update_iter(trans, i)
|
||||
trans_for_each_update(trans, i)
|
||||
bch2_btree_iter_downgrade(i->iter);
|
||||
err:
|
||||
/* make sure we didn't drop or screw up locks: */
|
||||
@ -884,6 +752,7 @@ int bch2_trans_commit(struct btree_trans *trans,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i = NULL;
|
||||
struct btree_iter *iter;
|
||||
unsigned orig_nr_updates = trans->nr_updates;
|
||||
unsigned orig_mem_top = trans->mem_top;
|
||||
int ret = 0;
|
||||
@ -946,9 +815,11 @@ out_noupdates:
|
||||
|
||||
BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
|
||||
|
||||
trans_for_each_iter(trans, iter)
|
||||
iter->flags &= ~BTREE_ITER_KEEP_UNTIL_COMMIT;
|
||||
|
||||
if (!ret) {
|
||||
bch2_trans_unlink_iters(trans, ~trans->iters_touched|
|
||||
trans->iters_unlink_on_commit);
|
||||
bch2_trans_unlink_iters(trans);
|
||||
trans->iters_touched = 0;
|
||||
}
|
||||
trans->nr_updates = 0;
|
||||
@ -995,7 +866,7 @@ retry:
|
||||
iter = bch2_trans_get_iter(&trans, id, bkey_start_pos(&k->k),
|
||||
BTREE_ITER_INTENT);
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, k));
|
||||
bch2_trans_update(&trans, iter, k);
|
||||
|
||||
ret = bch2_trans_commit(&trans, disk_res, journal_seq, flags);
|
||||
if (ret == -EINTR)
|
||||
@ -1045,7 +916,7 @@ retry:
|
||||
break;
|
||||
}
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &delete));
|
||||
bch2_trans_update(trans, iter, &delete);
|
||||
ret = bch2_trans_commit(trans, NULL, journal_seq,
|
||||
BTREE_INSERT_ATOMIC|
|
||||
BTREE_INSERT_NOFAIL);
|
||||
@ -1072,7 +943,7 @@ int bch2_btree_delete_at(struct btree_trans *trans,
|
||||
bkey_init(&k.k);
|
||||
k.k.p = iter->pos;
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &k));
|
||||
bch2_trans_update(trans, iter, &k);
|
||||
return bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_USE_RESERVE|flags);
|
||||
|
@ -1316,7 +1316,7 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans,
|
||||
bch_err(c, "disk usage increased more than %llu sectors reserved",
|
||||
disk_res_sectors);
|
||||
|
||||
trans_for_each_update_iter(trans, i) {
|
||||
trans_for_each_update(trans, i) {
|
||||
struct btree_iter *iter = i->iter;
|
||||
struct btree *b = iter->l[0].b;
|
||||
struct btree_node_iter node_iter = iter->l[0].iter;
|
||||
@ -1358,7 +1358,7 @@ static int trans_get_key(struct btree_trans *trans,
|
||||
struct btree_insert_entry *i;
|
||||
int ret;
|
||||
|
||||
trans_for_each_update_iter(trans, i)
|
||||
trans_for_each_update(trans, i)
|
||||
if (i->iter->btree_id == btree_id &&
|
||||
(btree_node_type_is_extents(btree_id)
|
||||
? bkey_cmp(pos, bkey_start_pos(&i->k->k)) >= 0 &&
|
||||
@ -1369,13 +1369,11 @@ static int trans_get_key(struct btree_trans *trans,
|
||||
return 1;
|
||||
}
|
||||
|
||||
*iter = __bch2_trans_get_iter(trans, btree_id, pos,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, 0);
|
||||
*iter = bch2_trans_get_iter(trans, btree_id, pos,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
if (IS_ERR(*iter))
|
||||
return PTR_ERR(*iter);
|
||||
|
||||
bch2_trans_iter_free_on_commit(trans, *iter);
|
||||
|
||||
*k = bch2_btree_iter_peek_slot(*iter);
|
||||
ret = bkey_err(*k);
|
||||
if (ret)
|
||||
@ -1397,13 +1395,13 @@ static void *trans_update_key(struct btree_trans *trans,
|
||||
bkey_init(&new_k->k);
|
||||
new_k->k.p = iter->pos;
|
||||
|
||||
trans_for_each_update_iter(trans, i)
|
||||
trans_for_each_update(trans, i)
|
||||
if (i->iter == iter) {
|
||||
i->k = new_k;
|
||||
return new_k;
|
||||
}
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, new_k));
|
||||
bch2_trans_update(trans, iter, new_k);
|
||||
return new_k;
|
||||
}
|
||||
|
||||
|
@ -255,9 +255,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
* new_dst at the src position:
|
||||
*/
|
||||
new_dst->k.p = src_iter->pos;
|
||||
bch2_trans_update(trans,
|
||||
BTREE_INSERT_ENTRY(src_iter,
|
||||
&new_dst->k_i));
|
||||
bch2_trans_update(trans, src_iter,
|
||||
&new_dst->k_i);
|
||||
return 0;
|
||||
} else {
|
||||
/* If we're overwriting, we can't insert new_dst
|
||||
@ -280,8 +279,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(src_iter, &new_src->k_i));
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(dst_iter, &new_dst->k_i));
|
||||
bch2_trans_update(trans, src_iter, &new_src->k_i);
|
||||
bch2_trans_update(trans, dst_iter, &new_dst->k_i);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -738,7 +738,7 @@ found_slot:
|
||||
|
||||
stripe->k.p = iter->pos;
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &stripe->k_i));
|
||||
bch2_trans_update(&trans, iter, &stripe->k_i);
|
||||
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
BTREE_INSERT_ATOMIC|
|
||||
@ -819,7 +819,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
|
||||
|
||||
extent_stripe_ptr_add(e, s, ptr, idx);
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.k));
|
||||
bch2_trans_update(&trans, iter, &tmp.k);
|
||||
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
BTREE_INSERT_ATOMIC|
|
||||
@ -1231,7 +1231,7 @@ static int __bch2_stripe_write_key(struct btree_trans *trans,
|
||||
|
||||
spin_unlock(&c->ec_stripes_heap_lock);
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &new_key->k_i));
|
||||
bch2_trans_update(trans, iter, &new_key->k_i);
|
||||
|
||||
return bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|flags);
|
||||
|
@ -974,11 +974,11 @@ static int count_iters_for_insert(struct btree_trans *trans,
|
||||
*nr_iters += 1;
|
||||
|
||||
if (overwrite &&
|
||||
k.k->type == KEY_TYPE_reflink_v) {
|
||||
struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(k);
|
||||
r_k.k->type == KEY_TYPE_reflink_v) {
|
||||
struct bkey_s_c_reflink_v r = bkey_s_c_to_reflink_v(r_k);
|
||||
|
||||
if (le64_to_cpu(r.v->refcount) == 1)
|
||||
*nr_iters += bch2_bkey_nr_alloc_ptrs(k);
|
||||
*nr_iters += bch2_bkey_nr_alloc_ptrs(r_k);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -274,7 +274,7 @@ static int sum_sector_overwrites(struct btree_trans *trans,
|
||||
old = bch2_btree_iter_next_slot(iter);
|
||||
}
|
||||
|
||||
bch2_trans_iter_free(trans, iter);
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -316,69 +316,36 @@ int bch2_extent_update(struct btree_trans *trans,
|
||||
if (!may_allocate && allocating)
|
||||
return -ENOSPC;
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(extent_iter, k));
|
||||
bch2_trans_update(trans, extent_iter, k);
|
||||
|
||||
new_i_size = min(k->k.p.offset << 9, new_i_size);
|
||||
|
||||
/* XXX: inode->i_size locking */
|
||||
if (i_sectors_delta ||
|
||||
new_i_size > inode->ei_inode.bi_size) {
|
||||
if (c->opts.new_inode_updates) {
|
||||
bch2_trans_unlock(trans);
|
||||
mutex_lock(&inode->ei_update_lock);
|
||||
inode_iter = bch2_trans_get_iter(trans,
|
||||
BTREE_ID_INODES,
|
||||
POS(k->k.p.inode, 0),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
if (IS_ERR(inode_iter))
|
||||
return PTR_ERR(inode_iter);
|
||||
|
||||
if (!bch2_trans_relock(trans)) {
|
||||
mutex_unlock(&inode->ei_update_lock);
|
||||
return -EINTR;
|
||||
}
|
||||
ret = bch2_btree_iter_traverse(inode_iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
inode_locked = true;
|
||||
inode_u = inode->ei_inode;
|
||||
inode_u.bi_sectors += i_sectors_delta;
|
||||
|
||||
if (!inode->ei_inode_update)
|
||||
inode->ei_inode_update =
|
||||
bch2_deferred_update_alloc(c,
|
||||
BTREE_ID_INODES, 64);
|
||||
|
||||
inode_u = inode->ei_inode;
|
||||
inode_u.bi_sectors += i_sectors_delta;
|
||||
|
||||
/* XXX: this is slightly suspect */
|
||||
if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
|
||||
new_i_size > inode_u.bi_size) {
|
||||
inode_u.bi_size = new_i_size;
|
||||
extended = true;
|
||||
}
|
||||
|
||||
bch2_inode_pack(&inode_p, &inode_u);
|
||||
bch2_trans_update(trans,
|
||||
BTREE_INSERT_DEFERRED(inode->ei_inode_update,
|
||||
&inode_p.inode.k_i));
|
||||
} else {
|
||||
inode_iter = bch2_trans_get_iter(trans,
|
||||
BTREE_ID_INODES,
|
||||
POS(k->k.p.inode, 0),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
if (IS_ERR(inode_iter))
|
||||
return PTR_ERR(inode_iter);
|
||||
|
||||
ret = bch2_btree_iter_traverse(inode_iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
inode_u = inode->ei_inode;
|
||||
inode_u.bi_sectors += i_sectors_delta;
|
||||
|
||||
/* XXX: this is slightly suspect */
|
||||
if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
|
||||
new_i_size > inode_u.bi_size) {
|
||||
inode_u.bi_size = new_i_size;
|
||||
extended = true;
|
||||
}
|
||||
|
||||
bch2_inode_pack(&inode_p, &inode_u);
|
||||
bch2_trans_update(trans,
|
||||
BTREE_INSERT_ENTRY(inode_iter, &inode_p.inode.k_i));
|
||||
/* XXX: this is slightly suspect */
|
||||
if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
|
||||
new_i_size > inode_u.bi_size) {
|
||||
inode_u.bi_size = new_i_size;
|
||||
extended = true;
|
||||
}
|
||||
|
||||
bch2_inode_pack(&inode_p, &inode_u);
|
||||
bch2_trans_update(trans, inode_iter, &inode_p.inode.k_i);
|
||||
}
|
||||
|
||||
ret = bch2_trans_commit(trans, disk_res,
|
||||
@ -439,10 +406,9 @@ static int bchfs_write_index_update(struct bch_write_op *wop)
|
||||
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans,
|
||||
BTREE_ID_EXTENTS,
|
||||
bkey_start_pos(&k->k),
|
||||
BTREE_ITER_INTENT);
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
|
||||
bkey_start_pos(&k->k),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
|
||||
do {
|
||||
BKEY_PADDED(k) tmp;
|
||||
@ -749,8 +715,8 @@ static void bch2_set_page_dirty(struct bch_fs *c,
|
||||
struct bch_page_state *s = bch2_page_state(page);
|
||||
unsigned i, dirty_sectors = 0;
|
||||
|
||||
WARN_ON(page_offset(page) + offset + len >
|
||||
round_up(i_size_read(&inode->v), block_bytes(c)));
|
||||
WARN_ON((u64) page_offset(page) + offset + len >
|
||||
round_up((u64) i_size_read(&inode->v), block_bytes(c)));
|
||||
|
||||
for (i = round_down(offset, block_bytes(c)) >> 9;
|
||||
i < round_up(offset + len, block_bytes(c)) >> 9;
|
||||
@ -810,11 +776,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* page is wholly or partially inside EOF */
|
||||
if (((page->index + 1) << PAGE_SHIFT) <= isize)
|
||||
len = PAGE_SIZE;
|
||||
else
|
||||
len = offset_in_page(isize);
|
||||
len = min_t(loff_t, PAGE_SIZE, isize - page_offset(page));
|
||||
|
||||
if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) {
|
||||
unlock_page(page);
|
||||
@ -1767,14 +1729,6 @@ retry_reservation:
|
||||
if (!copied)
|
||||
goto out;
|
||||
|
||||
nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
|
||||
inode->ei_last_dirtied = (unsigned long) current;
|
||||
|
||||
spin_lock(&inode->v.i_lock);
|
||||
if (pos + copied > inode->v.i_size)
|
||||
i_size_write(&inode->v, pos + copied);
|
||||
spin_unlock(&inode->v.i_lock);
|
||||
|
||||
if (copied < len &&
|
||||
((offset + copied) & (PAGE_SIZE - 1))) {
|
||||
struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
|
||||
@ -1785,6 +1739,11 @@ retry_reservation:
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&inode->v.i_lock);
|
||||
if (pos + copied > inode->v.i_size)
|
||||
i_size_write(&inode->v, pos + copied);
|
||||
spin_unlock(&inode->v.i_lock);
|
||||
|
||||
while (set_dirty < copied) {
|
||||
struct page *page = pages[(offset + set_dirty) >> PAGE_SHIFT];
|
||||
unsigned pg_offset = (offset + set_dirty) & (PAGE_SIZE - 1);
|
||||
@ -1800,6 +1759,9 @@ retry_reservation:
|
||||
|
||||
set_dirty += pg_len;
|
||||
}
|
||||
|
||||
nr_pages_copied = DIV_ROUND_UP(offset + copied, PAGE_SIZE);
|
||||
inode->ei_last_dirtied = (unsigned long) current;
|
||||
out:
|
||||
for (i = nr_pages_copied; i < nr_pages; i++) {
|
||||
unlock_page(pages[i]);
|
||||
@ -2808,9 +2770,8 @@ reassemble:
|
||||
bkey_start_pos(&delete.k));
|
||||
}
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(dst, ©.k));
|
||||
bch2_trans_update(&trans,
|
||||
BTREE_INSERT_ENTRY(del ?: src, &delete));
|
||||
bch2_trans_update(&trans, dst, ©.k);
|
||||
bch2_trans_update(&trans, del ?: src, &delete);
|
||||
|
||||
if (copy.k.k.size == k.k->size) {
|
||||
/*
|
||||
@ -2835,7 +2796,7 @@ reassemble:
|
||||
bch2_disk_reservation_put(c, &disk_res);
|
||||
bkey_err:
|
||||
if (del)
|
||||
bch2_trans_iter_free(&trans, del);
|
||||
bch2_trans_iter_put(&trans, del);
|
||||
del = NULL;
|
||||
|
||||
if (!ret)
|
||||
|
@ -81,9 +81,7 @@ void bch2_inode_update_after_write(struct bch_fs *c,
|
||||
struct bch_inode_unpacked *bi,
|
||||
unsigned fields)
|
||||
{
|
||||
set_nlink(&inode->v, bi->bi_flags & BCH_INODE_UNLINKED
|
||||
? 0
|
||||
: bi->bi_nlink + nlink_bias(inode->v.i_mode));
|
||||
set_nlink(&inode->v, bch2_inode_nlink_get(bi));
|
||||
i_uid_write(&inode->v, bi->bi_uid);
|
||||
i_gid_write(&inode->v, bi->bi_gid);
|
||||
inode->v.i_mode = bi->bi_mode;
|
||||
@ -106,30 +104,22 @@ int __must_check bch2_write_inode_trans(struct btree_trans *trans,
|
||||
inode_set_fn set,
|
||||
void *p)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter *iter = NULL;
|
||||
struct bkey_inode_buf *inode_p;
|
||||
int ret;
|
||||
|
||||
lockdep_assert_held(&inode->ei_update_lock);
|
||||
|
||||
if (c->opts.new_inode_updates) {
|
||||
/* XXX: Don't do this with btree locks held */
|
||||
if (!inode->ei_inode_update)
|
||||
inode->ei_inode_update =
|
||||
bch2_deferred_update_alloc(c, BTREE_ID_INODES, 64);
|
||||
} else {
|
||||
iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
|
||||
POS(inode->v.i_ino, 0),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
if (IS_ERR(iter))
|
||||
return PTR_ERR(iter);
|
||||
iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
|
||||
POS(inode->v.i_ino, 0),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
if (IS_ERR(iter))
|
||||
return PTR_ERR(iter);
|
||||
|
||||
/* The btree node lock is our lock on the inode: */
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
/* The btree node lock is our lock on the inode: */
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
*inode_u = inode->ei_inode;
|
||||
|
||||
@ -144,14 +134,7 @@ int __must_check bch2_write_inode_trans(struct btree_trans *trans,
|
||||
return PTR_ERR(inode_p);
|
||||
|
||||
bch2_inode_pack(inode_p, inode_u);
|
||||
|
||||
if (!inode->ei_inode_update)
|
||||
bch2_trans_update(trans,
|
||||
BTREE_INSERT_ENTRY(iter, &inode_p->inode.k_i));
|
||||
else
|
||||
bch2_trans_update(trans,
|
||||
BTREE_INSERT_DEFERRED(inode->ei_inode_update,
|
||||
&inode_p->inode.k_i));
|
||||
bch2_trans_update(trans, iter, &inode_p->inode.k_i);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -489,12 +472,7 @@ static int inode_update_for_link_fn(struct bch_inode_info *inode,
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
|
||||
bi->bi_ctime = bch2_current_time(c);
|
||||
|
||||
if (bi->bi_flags & BCH_INODE_UNLINKED)
|
||||
bi->bi_flags &= ~BCH_INODE_UNLINKED;
|
||||
else
|
||||
bi->bi_nlink++;
|
||||
|
||||
bch2_inode_nlink_inc(bi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -577,11 +555,7 @@ static int inode_update_for_unlink_fn(struct bch_inode_info *inode,
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
|
||||
bi->bi_ctime = bch2_current_time(c);
|
||||
if (bi->bi_nlink)
|
||||
bi->bi_nlink--;
|
||||
else
|
||||
bi->bi_flags |= BCH_INODE_UNLINKED;
|
||||
|
||||
bch2_inode_nlink_dec(bi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -749,10 +723,7 @@ static int inode_update_for_rename_fn(struct bch_inode_info *inode,
|
||||
BUG_ON(bi->bi_nlink &&
|
||||
S_ISDIR(info->dst_inode->v.i_mode));
|
||||
|
||||
if (bi->bi_nlink)
|
||||
bi->bi_nlink--;
|
||||
else
|
||||
bi->bi_flags |= BCH_INODE_UNLINKED;
|
||||
bch2_inode_nlink_dec(bi);
|
||||
}
|
||||
|
||||
if (inode == info->src_dir ||
|
||||
@ -1412,7 +1383,6 @@ static struct inode *bch2_alloc_inode(struct super_block *sb)
|
||||
inode_init_once(&inode->v);
|
||||
mutex_init(&inode->ei_update_lock);
|
||||
mutex_init(&inode->ei_quota_lock);
|
||||
inode->ei_inode_update = NULL;
|
||||
inode->ei_journal_seq = 0;
|
||||
|
||||
return &inode->v;
|
||||
@ -1470,10 +1440,6 @@ static void bch2_evict_inode(struct inode *vinode)
|
||||
|
||||
BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
|
||||
|
||||
if (inode->ei_inode_update)
|
||||
bch2_deferred_update_free(c, inode->ei_inode_update);
|
||||
inode->ei_inode_update = NULL;
|
||||
|
||||
if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
|
||||
bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
|
||||
KEY_TYPE_QUOTA_WARN);
|
||||
|
@ -14,7 +14,6 @@ struct bch_inode_info {
|
||||
struct inode v;
|
||||
|
||||
struct mutex ei_update_lock;
|
||||
struct deferred_update *ei_inode_update;
|
||||
u64 ei_journal_seq;
|
||||
u64 ei_quota_reserved;
|
||||
unsigned long ei_last_dirtied;
|
||||
@ -83,11 +82,6 @@ static inline u8 mode_to_type(umode_t mode)
|
||||
return (mode >> 12) & 15;
|
||||
}
|
||||
|
||||
static inline unsigned nlink_bias(umode_t mode)
|
||||
{
|
||||
return S_ISDIR(mode) ? 2 : 1;
|
||||
}
|
||||
|
||||
static inline bool inode_attr_changing(struct bch_inode_info *dir,
|
||||
struct bch_inode_info *inode,
|
||||
enum inode_opt_id id)
|
||||
|
@ -248,7 +248,7 @@ static int hash_check_duplicates(struct btree_trans *trans,
|
||||
iter = bch2_trans_copy_iter(trans, h->chain);
|
||||
BUG_ON(IS_ERR(iter));
|
||||
|
||||
for_each_btree_key_continue(iter, 0, k2) {
|
||||
for_each_btree_key_continue(iter, 0, k2, ret) {
|
||||
if (bkey_cmp(k2.k->p, k.k->p) >= 0)
|
||||
break;
|
||||
|
||||
@ -393,7 +393,7 @@ static int check_dirent_hash(struct btree_trans *trans, struct hash_check *h,
|
||||
|
||||
if (fsck_err(c, "dirent with junk at end, was %s (%zu) now %s (%u)",
|
||||
buf, strlen(buf), d->v.d_name, len)) {
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &d->k_i));
|
||||
bch2_trans_update(trans, iter, &d->k_i);
|
||||
|
||||
ret = bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
@ -458,7 +458,7 @@ static int check_extents(struct bch_fs *c)
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
|
||||
POS(BCACHEFS_ROOT_INO, 0), 0);
|
||||
retry:
|
||||
for_each_btree_key_continue(iter, 0, k) {
|
||||
for_each_btree_key_continue(iter, 0, k, ret) {
|
||||
ret = walk_inode(&trans, &w, k.k->p.inode);
|
||||
if (ret)
|
||||
break;
|
||||
@ -553,7 +553,7 @@ static int check_dirents(struct bch_fs *c)
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
|
||||
POS(BCACHEFS_ROOT_INO, 0), 0);
|
||||
retry:
|
||||
for_each_btree_key_continue(iter, 0, k) {
|
||||
for_each_btree_key_continue(iter, 0, k, ret) {
|
||||
struct bkey_s_c_dirent d;
|
||||
struct bch_inode_unpacked target;
|
||||
bool have_target;
|
||||
@ -663,8 +663,7 @@ retry:
|
||||
bkey_reassemble(&n->k_i, d.s_c);
|
||||
n->v.d_type = mode_to_type(target.bi_mode);
|
||||
|
||||
bch2_trans_update(&trans,
|
||||
BTREE_INSERT_ENTRY(iter, &n->k_i));
|
||||
bch2_trans_update(&trans, iter, &n->k_i);
|
||||
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
@ -707,7 +706,7 @@ static int check_xattrs(struct bch_fs *c)
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
|
||||
POS(BCACHEFS_ROOT_INO, 0), 0);
|
||||
retry:
|
||||
for_each_btree_key_continue(iter, 0, k) {
|
||||
for_each_btree_key_continue(iter, 0, k, ret) {
|
||||
ret = walk_inode(&trans, &w, k.k->p.inode);
|
||||
if (ret)
|
||||
break;
|
||||
@ -995,7 +994,7 @@ up:
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS_MIN, 0);
|
||||
retry:
|
||||
for_each_btree_key_continue(iter, 0, k) {
|
||||
for_each_btree_key_continue(iter, 0, k, ret) {
|
||||
if (k.k->type != KEY_TYPE_inode)
|
||||
continue;
|
||||
|
||||
@ -1021,7 +1020,7 @@ retry:
|
||||
had_unreachable = true;
|
||||
}
|
||||
}
|
||||
ret = bch2_trans_iter_free(&trans, iter);
|
||||
bch2_trans_iter_free(&trans, iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -1116,9 +1115,7 @@ static int check_inode_nlink(struct bch_fs *c,
|
||||
struct nlink *link,
|
||||
bool *do_update)
|
||||
{
|
||||
u32 i_nlink = u->bi_flags & BCH_INODE_UNLINKED
|
||||
? 0
|
||||
: u->bi_nlink + nlink_bias(u->bi_mode);
|
||||
u32 i_nlink = bch2_inode_nlink_get(u);
|
||||
u32 real_i_nlink =
|
||||
link->count * nlink_bias(u->bi_mode) +
|
||||
link->dir_count;
|
||||
@ -1197,14 +1194,7 @@ static int check_inode_nlink(struct bch_fs *c,
|
||||
u->bi_inum, i_nlink, real_i_nlink);
|
||||
set_i_nlink:
|
||||
if (i_nlink != real_i_nlink) {
|
||||
if (real_i_nlink) {
|
||||
u->bi_nlink = real_i_nlink - nlink_bias(u->bi_mode);
|
||||
u->bi_flags &= ~BCH_INODE_UNLINKED;
|
||||
} else {
|
||||
u->bi_nlink = 0;
|
||||
u->bi_flags |= BCH_INODE_UNLINKED;
|
||||
}
|
||||
|
||||
bch2_inode_nlink_set(u, real_i_nlink);
|
||||
*do_update = true;
|
||||
}
|
||||
fsck_err:
|
||||
@ -1302,7 +1292,7 @@ static int check_inode(struct btree_trans *trans,
|
||||
struct bkey_inode_buf p;
|
||||
|
||||
bch2_inode_pack(&p, &u);
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
|
||||
bch2_trans_update(trans, iter, &p.inode.k_i);
|
||||
|
||||
ret = bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
|
@ -345,8 +345,7 @@ again:
|
||||
inode_u->bi_generation = bkey_generation(k);
|
||||
|
||||
bch2_inode_pack(inode_p, inode_u);
|
||||
bch2_trans_update(trans,
|
||||
BTREE_INSERT_ENTRY(iter, &inode_p->inode.k_i));
|
||||
bch2_trans_update(trans, iter, &inode_p->inode.k_i);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -435,8 +434,7 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
|
||||
delete.v.bi_generation = cpu_to_le32(bi_generation);
|
||||
}
|
||||
|
||||
bch2_trans_update(&trans,
|
||||
BTREE_INSERT_ENTRY(iter, &delete.k_i));
|
||||
bch2_trans_update(&trans, iter, &delete.k_i);
|
||||
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
BTREE_INSERT_ATOMIC|
|
||||
|
@ -103,6 +103,49 @@ static inline u64 bch2_inode_opt_get(struct bch_inode_unpacked *inode,
|
||||
}
|
||||
}
|
||||
|
||||
/* i_nlink: */
|
||||
|
||||
static inline unsigned nlink_bias(umode_t mode)
|
||||
{
|
||||
return S_ISDIR(mode) ? 2 : 1;
|
||||
}
|
||||
|
||||
static inline void bch2_inode_nlink_inc(struct bch_inode_unpacked *bi)
|
||||
{
|
||||
if (bi->bi_flags & BCH_INODE_UNLINKED)
|
||||
bi->bi_flags &= ~BCH_INODE_UNLINKED;
|
||||
else
|
||||
bi->bi_nlink++;
|
||||
}
|
||||
|
||||
static inline void bch2_inode_nlink_dec(struct bch_inode_unpacked *bi)
|
||||
{
|
||||
BUG_ON(bi->bi_flags & BCH_INODE_UNLINKED);
|
||||
if (bi->bi_nlink)
|
||||
bi->bi_nlink--;
|
||||
else
|
||||
bi->bi_flags |= BCH_INODE_UNLINKED;
|
||||
}
|
||||
|
||||
static inline unsigned bch2_inode_nlink_get(struct bch_inode_unpacked *bi)
|
||||
{
|
||||
return bi->bi_flags & BCH_INODE_UNLINKED
|
||||
? 0
|
||||
: bi->bi_nlink + nlink_bias(bi->bi_mode);
|
||||
}
|
||||
|
||||
static inline void bch2_inode_nlink_set(struct bch_inode_unpacked *bi,
|
||||
unsigned nlink)
|
||||
{
|
||||
if (nlink) {
|
||||
bi->bi_nlink = nlink - nlink_bias(bi->bi_mode);
|
||||
bi->bi_flags &= ~BCH_INODE_UNLINKED;
|
||||
} else {
|
||||
bi->bi_nlink = 0;
|
||||
bi->bi_flags |= BCH_INODE_UNLINKED;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
void bch2_inode_pack_test(void);
|
||||
#else
|
||||
|
@ -274,8 +274,7 @@ retry:
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
bch2_trans_update(&trans,
|
||||
BTREE_INSERT_ENTRY(iter, &split.k));
|
||||
bch2_trans_update(&trans, iter, &split.k);
|
||||
|
||||
ret = bch2_trans_commit(&trans, &op->res, op_journal_seq(op),
|
||||
BTREE_INSERT_NOFAIL|
|
||||
@ -340,6 +339,7 @@ static void __bch2_write_index(struct bch_write_op *op)
|
||||
u64 sectors_start = keylist_sectors(keys);
|
||||
int ret = op->index_update_fn(op);
|
||||
|
||||
BUG_ON(ret == -EINTR);
|
||||
BUG_ON(keylist_sectors(keys) && !ret);
|
||||
|
||||
op->written += sectors_start - keylist_sectors(keys);
|
||||
@ -1329,6 +1329,8 @@ retry:
|
||||
bio_advance_iter(&rbio->bio, &bvec_iter, bytes);
|
||||
}
|
||||
|
||||
if (ret == -EINTR)
|
||||
goto retry;
|
||||
/*
|
||||
* If we get here, it better have been because there was an error
|
||||
* reading a btree node
|
||||
@ -1408,8 +1410,8 @@ retry:
|
||||
bch2_trans_begin(&trans);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, rbio->pos,
|
||||
BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek(iter);
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek_slot(iter);
|
||||
if (IS_ERR_OR_NULL(k.k))
|
||||
goto out;
|
||||
|
||||
@ -1436,7 +1438,7 @@ retry:
|
||||
if (!bch2_bkey_narrow_crcs(&new.k, new_crc))
|
||||
goto out;
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &new.k));
|
||||
bch2_trans_update(&trans, iter, &new.k);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
BTREE_INSERT_ATOMIC|
|
||||
BTREE_INSERT_NOFAIL|
|
||||
@ -1602,9 +1604,9 @@ int __bch2_read_indirect_extent(struct btree_trans *trans,
|
||||
reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k)->v.idx) +
|
||||
*offset_into_extent;
|
||||
|
||||
iter = __bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
|
||||
POS(0, reflink_offset),
|
||||
BTREE_ITER_SLOTS, 1);
|
||||
iter = bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
|
||||
POS(0, reflink_offset),
|
||||
BTREE_ITER_SLOTS);
|
||||
ret = PTR_ERR_OR_ZERO(iter);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -1873,8 +1875,6 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
|
||||
BCH_READ_USER_MAPPED;
|
||||
int ret;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
BUG_ON(rbio->_state);
|
||||
BUG_ON(flags & BCH_READ_NODECODE);
|
||||
BUG_ON(flags & BCH_READ_IN_RETRY);
|
||||
@ -1882,10 +1882,13 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
|
||||
rbio->c = c;
|
||||
rbio->start_time = local_clock();
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
retry:
|
||||
bch2_trans_begin(&trans);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
|
||||
POS(inode, rbio->bio.bi_iter.bi_sector),
|
||||
BTREE_ITER_SLOTS);
|
||||
|
||||
while (1) {
|
||||
BKEY_PADDED(k) tmp;
|
||||
unsigned bytes, sectors, offset_into_extent;
|
||||
@ -1940,6 +1943,9 @@ out:
|
||||
bch2_trans_exit(&trans);
|
||||
return;
|
||||
err:
|
||||
if (ret == -EINTR)
|
||||
goto retry;
|
||||
|
||||
bcache_io_error(c, &rbio->bio, "btree IO error: %i", ret);
|
||||
bch2_rbio_done(rbio);
|
||||
goto out;
|
||||
|
@ -72,10 +72,9 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
|
||||
*/
|
||||
bch2_extent_normalize(c, bkey_i_to_s(&tmp.key));
|
||||
|
||||
/* XXX not sketchy at all */
|
||||
iter->pos = bkey_start_pos(&tmp.key.k);
|
||||
bch2_btree_iter_set_pos(iter, bkey_start_pos(&tmp.key.k));
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &tmp.key));
|
||||
bch2_trans_update(&trans, iter, &tmp.key);
|
||||
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
BTREE_INSERT_ATOMIC|
|
||||
|
@ -148,8 +148,7 @@ static int bch2_migrate_index_update(struct bch_write_op *op)
|
||||
goto next;
|
||||
}
|
||||
|
||||
bch2_trans_update(&trans,
|
||||
BTREE_INSERT_ENTRY(iter, insert));
|
||||
bch2_trans_update(&trans, iter, insert);
|
||||
|
||||
ret = bch2_trans_commit(&trans, &op->res,
|
||||
op_journal_seq(op),
|
||||
|
@ -289,13 +289,7 @@ enum opt_type {
|
||||
OPT_UINT(0, BCH_REPLICAS_MAX), \
|
||||
NO_SB_OPT, 1, \
|
||||
"n", "Data written to this device will be considered\n"\
|
||||
"to have already been replicated n times") \
|
||||
x(new_inode_updates, u8, \
|
||||
OPT_MOUNT, \
|
||||
OPT_BOOL(), \
|
||||
NO_SB_OPT, false, \
|
||||
NULL, "Enable new btree write-cache for inode updates")
|
||||
|
||||
"to have already been replicated n times")
|
||||
|
||||
struct bch_opts {
|
||||
#define x(_name, _bits, ...) unsigned _name##_defined:1;
|
||||
|
@ -752,7 +752,7 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
|
||||
if (qdq->d_fieldmask & QC_INO_HARD)
|
||||
new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &new_quota.k_i));
|
||||
bch2_trans_update(&trans, iter, &new_quota.k_i);
|
||||
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
|
||||
|
@ -301,7 +301,7 @@ retry:
|
||||
bch2_cut_front(split_iter->pos, split);
|
||||
bch2_cut_back(atomic_end, &split->k);
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(split_iter, split));
|
||||
bch2_trans_update(&trans, split_iter, split);
|
||||
bch2_btree_iter_set_pos(iter, split->k.p);
|
||||
} while (bkey_cmp(iter->pos, k->k.p) < 0);
|
||||
|
||||
|
@ -120,7 +120,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
|
||||
r_v->v.refcount = 0;
|
||||
memcpy(r_v->v.start, e->v.start, bkey_val_bytes(&e->k));
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(reflink_iter, &r_v->k_i));
|
||||
bch2_trans_update(trans, reflink_iter, &r_v->k_i);
|
||||
|
||||
r_p = bch2_trans_kmalloc(trans, sizeof(*r_p));
|
||||
if (IS_ERR(r_p))
|
||||
@ -131,7 +131,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
|
||||
set_bkey_val_bytes(&r_p->k, sizeof(r_p->v));
|
||||
r_p->v.idx = cpu_to_le64(bkey_start_offset(&r_v->k));
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(extent_iter, &r_p->k_i));
|
||||
bch2_trans_update(trans, extent_iter, &r_p->k_i);
|
||||
err:
|
||||
if (!IS_ERR(reflink_iter)) {
|
||||
c->reflink_hint = reflink_iter->pos.offset;
|
||||
@ -190,10 +190,10 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 4096);
|
||||
|
||||
src_iter = __bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, src_start,
|
||||
BTREE_ITER_INTENT, 1);
|
||||
dst_iter = __bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, dst_start,
|
||||
BTREE_ITER_INTENT, 2);
|
||||
src_iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, src_start,
|
||||
BTREE_ITER_INTENT);
|
||||
dst_iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, dst_start,
|
||||
BTREE_ITER_INTENT);
|
||||
|
||||
while (1) {
|
||||
bch2_trans_begin_updates(&trans);
|
||||
|
@ -187,6 +187,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
|
||||
{
|
||||
struct btree_iter *iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
iter = bch2_trans_copy_iter(trans, start);
|
||||
if (IS_ERR(iter))
|
||||
@ -194,19 +195,21 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
|
||||
|
||||
bch2_btree_iter_next_slot(iter);
|
||||
|
||||
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k) {
|
||||
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) {
|
||||
if (k.k->type != desc.key_type &&
|
||||
k.k->type != KEY_TYPE_whiteout)
|
||||
break;
|
||||
|
||||
if (k.k->type == desc.key_type &&
|
||||
desc.hash_bkey(info, k) <= start->pos.offset) {
|
||||
bch2_trans_iter_free_on_commit(trans, iter);
|
||||
return 1;
|
||||
iter->flags |= BTREE_ITER_KEEP_UNTIL_COMMIT;
|
||||
ret = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return bch2_trans_iter_free(trans, iter);
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@ -245,11 +248,14 @@ int bch2_hash_set(struct btree_trans *trans,
|
||||
goto not_found;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
ret = -ENOSPC;
|
||||
out:
|
||||
if (slot)
|
||||
bch2_trans_iter_free(trans, slot);
|
||||
bch2_trans_iter_free(trans, iter);
|
||||
bch2_trans_iter_put(trans, slot);
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
|
||||
return ret ?: -ENOSPC;
|
||||
return ret;
|
||||
found:
|
||||
found = true;
|
||||
not_found:
|
||||
@ -259,17 +265,14 @@ not_found:
|
||||
} else if (found && (flags & BCH_HASH_SET_MUST_CREATE)) {
|
||||
ret = -EEXIST;
|
||||
} else {
|
||||
if (!found && slot) {
|
||||
bch2_trans_iter_free(trans, iter);
|
||||
iter = slot;
|
||||
}
|
||||
if (!found && slot)
|
||||
swap(iter, slot);
|
||||
|
||||
insert->k.p = iter->pos;
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, insert));
|
||||
bch2_trans_iter_free_on_commit(trans, iter);
|
||||
bch2_trans_update(trans, iter, insert);
|
||||
}
|
||||
|
||||
return ret;
|
||||
goto out;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@ -293,7 +296,7 @@ int bch2_hash_delete_at(struct btree_trans *trans,
|
||||
delete->k.p = iter->pos;
|
||||
delete->k.type = ret ? KEY_TYPE_whiteout : KEY_TYPE_deleted;
|
||||
|
||||
bch2_trans_update(trans, BTREE_INSERT_ENTRY(iter, delete));
|
||||
bch2_trans_update(trans, iter, delete);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -43,7 +43,7 @@ static void test_delete(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
BUG_ON(ret);
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
|
||||
bch2_trans_update(&trans, iter, &k.k_i);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
@ -75,7 +75,7 @@ static void test_delete_written(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
BUG_ON(ret);
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
|
||||
bch2_trans_update(&trans, iter, &k.k_i);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
@ -465,7 +465,7 @@ static void rand_mixed(struct bch_fs *c, u64 nr)
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p = iter->pos;
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &k.k_i));
|
||||
bch2_trans_update(&trans, iter, &k.k_i);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
@ -509,7 +509,7 @@ static void seq_insert(struct bch_fs *c, u64 nr)
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
insert.k.p = iter->pos;
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &insert.k_i));
|
||||
bch2_trans_update(&trans, iter, &insert.k_i);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
BUG_ON(ret);
|
||||
|
||||
@ -548,7 +548,7 @@ static void seq_overwrite(struct bch_fs *c, u64 nr)
|
||||
|
||||
bkey_reassemble(&u.k_i, k);
|
||||
|
||||
bch2_trans_update(&trans, BTREE_INSERT_ENTRY(iter, &u.k_i));
|
||||
bch2_trans_update(&trans, iter, &u.k_i);
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL, 0);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user