mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-09 00:00:04 +03:00
Update bcachefs sources to 787de128a5 bcachefs: Improvements to fsck check_dirents()
This commit is contained in:
parent
700d013b52
commit
86bd5c622c
@ -1 +1 @@
|
|||||||
400c2f8d960ac55105bd22905a6ea1a40daa7f4f
|
787de128a5caf209845e5a8d0f14f24e1a42492c
|
||||||
|
@ -105,6 +105,7 @@ struct super_block {
|
|||||||
#define DT_LNK 10
|
#define DT_LNK 10
|
||||||
#define DT_SOCK 12
|
#define DT_SOCK 12
|
||||||
#define DT_WHT 14
|
#define DT_WHT 14
|
||||||
|
#define DT_MAX 16
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -130,7 +130,7 @@ static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
|
|||||||
|
|
||||||
#define x(_name, _bits) \
|
#define x(_name, _bits) \
|
||||||
if (fieldnr < a.v->nr_fields) { \
|
if (fieldnr < a.v->nr_fields) { \
|
||||||
ret = bch2_varint_decode(in, end, &v); \
|
ret = bch2_varint_decode_fast(in, end, &v); \
|
||||||
if (ret < 0) \
|
if (ret < 0) \
|
||||||
return ret; \
|
return ret; \
|
||||||
in += ret; \
|
in += ret; \
|
||||||
@ -166,7 +166,7 @@ static void bch2_alloc_pack_v2(struct bkey_alloc_buf *dst,
|
|||||||
nr_fields++; \
|
nr_fields++; \
|
||||||
\
|
\
|
||||||
if (src._name) { \
|
if (src._name) { \
|
||||||
out += bch2_varint_encode(out, src._name); \
|
out += bch2_varint_encode_fast(out, src._name); \
|
||||||
\
|
\
|
||||||
last_nonzero_field = out; \
|
last_nonzero_field = out; \
|
||||||
last_nonzero_fieldnr = nr_fields; \
|
last_nonzero_fieldnr = nr_fields; \
|
||||||
@ -1232,3 +1232,22 @@ void bch2_fs_allocator_background_init(struct bch_fs *c)
|
|||||||
{
|
{
|
||||||
spin_lock_init(&c->freelist_lock);
|
spin_lock_init(&c->freelist_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void bch2_open_buckets_to_text(struct printbuf *out, struct bch_fs *c)
|
||||||
|
{
|
||||||
|
struct open_bucket *ob;
|
||||||
|
|
||||||
|
for (ob = c->open_buckets;
|
||||||
|
ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
|
||||||
|
ob++) {
|
||||||
|
spin_lock(&ob->lock);
|
||||||
|
if (ob->valid && !ob->on_partial_list) {
|
||||||
|
pr_buf(out, "%zu ref %u type %s\n",
|
||||||
|
ob - c->open_buckets,
|
||||||
|
atomic_read(&ob->pin),
|
||||||
|
bch2_data_types[ob->type]);
|
||||||
|
}
|
||||||
|
spin_unlock(&ob->lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
@ -132,4 +132,6 @@ int bch2_dev_allocator_start(struct bch_dev *);
|
|||||||
int bch2_alloc_write(struct bch_fs *, unsigned);
|
int bch2_alloc_write(struct bch_fs *, unsigned);
|
||||||
void bch2_fs_allocator_background_init(struct bch_fs *);
|
void bch2_fs_allocator_background_init(struct bch_fs *);
|
||||||
|
|
||||||
|
void bch2_open_buckets_to_text(struct printbuf *, struct bch_fs *);
|
||||||
|
|
||||||
#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
|
#endif /* _BCACHEFS_ALLOC_BACKGROUND_H */
|
||||||
|
@ -1346,6 +1346,7 @@ LE64_BITMASK(BCH_SB_GC_RESERVE_BYTES, struct bch_sb, flags[2], 4, 64);
|
|||||||
LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
|
LE64_BITMASK(BCH_SB_ERASURE_CODE, struct bch_sb, flags[3], 0, 16);
|
||||||
LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
|
LE64_BITMASK(BCH_SB_METADATA_TARGET, struct bch_sb, flags[3], 16, 28);
|
||||||
LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
|
LE64_BITMASK(BCH_SB_SHARD_INUMS, struct bch_sb, flags[3], 28, 29);
|
||||||
|
LE64_BITMASK(BCH_SB_INODES_USE_KEY_CACHE,struct bch_sb, flags[3], 29, 30);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Features:
|
* Features:
|
||||||
|
@ -233,7 +233,7 @@ wait_on_io:
|
|||||||
if (bch2_verify_btree_ondisk)
|
if (bch2_verify_btree_ondisk)
|
||||||
bch2_btree_node_write(c, b, SIX_LOCK_intent);
|
bch2_btree_node_write(c, b, SIX_LOCK_intent);
|
||||||
else
|
else
|
||||||
__bch2_btree_node_write(c, b);
|
__bch2_btree_node_write(c, b, false);
|
||||||
|
|
||||||
six_unlock_write(&b->c.lock);
|
six_unlock_write(&b->c.lock);
|
||||||
six_unlock_intent(&b->c.lock);
|
six_unlock_intent(&b->c.lock);
|
||||||
@ -691,7 +691,9 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
|||||||
* currently fails for iterators that aren't pointed at a valid btree
|
* currently fails for iterators that aren't pointed at a valid btree
|
||||||
* node
|
* node
|
||||||
*/
|
*/
|
||||||
if (iter && !bch2_trans_relock(iter->trans))
|
if (iter &&
|
||||||
|
(!bch2_trans_relock(iter->trans) ||
|
||||||
|
!bch2_btree_iter_relock(iter, _THIS_IP_)))
|
||||||
return ERR_PTR(-EINTR);
|
return ERR_PTR(-EINTR);
|
||||||
|
|
||||||
if (!six_relock_type(&b->c.lock, lock_type, seq))
|
if (!six_relock_type(&b->c.lock, lock_type, seq))
|
||||||
@ -851,7 +853,9 @@ lock_node:
|
|||||||
* currently fails for iterators that aren't pointed at a valid
|
* currently fails for iterators that aren't pointed at a valid
|
||||||
* btree node
|
* btree node
|
||||||
*/
|
*/
|
||||||
if (iter && !bch2_trans_relock(iter->trans))
|
if (iter &&
|
||||||
|
(!bch2_trans_relock(iter->trans) ||
|
||||||
|
!bch2_btree_iter_relock(iter, _THIS_IP_)))
|
||||||
return ERR_PTR(-EINTR);
|
return ERR_PTR(-EINTR);
|
||||||
|
|
||||||
if (!six_relock_type(&b->c.lock, lock_type, seq))
|
if (!six_relock_type(&b->c.lock, lock_type, seq))
|
||||||
@ -1002,7 +1006,7 @@ wait_on_io:
|
|||||||
six_lock_write(&b->c.lock, NULL, NULL);
|
six_lock_write(&b->c.lock, NULL, NULL);
|
||||||
|
|
||||||
if (btree_node_dirty(b)) {
|
if (btree_node_dirty(b)) {
|
||||||
__bch2_btree_node_write(c, b);
|
__bch2_btree_node_write(c, b, false);
|
||||||
six_unlock_write(&b->c.lock);
|
six_unlock_write(&b->c.lock);
|
||||||
six_unlock_intent(&b->c.lock);
|
six_unlock_intent(&b->c.lock);
|
||||||
goto wait_on_io;
|
goto wait_on_io;
|
||||||
|
@ -1566,9 +1566,47 @@ void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
|
|||||||
static void btree_node_write_done(struct bch_fs *c, struct btree *b)
|
static void btree_node_write_done(struct bch_fs *c, struct btree *b)
|
||||||
{
|
{
|
||||||
struct btree_write *w = btree_prev_write(b);
|
struct btree_write *w = btree_prev_write(b);
|
||||||
|
unsigned long old, new, v;
|
||||||
|
|
||||||
bch2_btree_complete_write(c, b, w);
|
bch2_btree_complete_write(c, b, w);
|
||||||
bch2_btree_node_io_unlock(b);
|
|
||||||
|
v = READ_ONCE(b->flags);
|
||||||
|
do {
|
||||||
|
old = new = v;
|
||||||
|
|
||||||
|
if (old & (1U << BTREE_NODE_need_write))
|
||||||
|
goto do_write;
|
||||||
|
|
||||||
|
new &= ~(1U << BTREE_NODE_write_in_flight);
|
||||||
|
} while ((v = cmpxchg(&b->flags, old, new)) != old);
|
||||||
|
|
||||||
|
wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
|
||||||
|
return;
|
||||||
|
|
||||||
|
do_write:
|
||||||
|
six_lock_read(&b->c.lock, NULL, NULL);
|
||||||
|
v = READ_ONCE(b->flags);
|
||||||
|
do {
|
||||||
|
old = new = v;
|
||||||
|
|
||||||
|
if ((old & (1U << BTREE_NODE_dirty)) &&
|
||||||
|
(old & (1U << BTREE_NODE_need_write)) &&
|
||||||
|
!(old & (1U << BTREE_NODE_never_write)) &&
|
||||||
|
btree_node_may_write(b)) {
|
||||||
|
new &= ~(1U << BTREE_NODE_dirty);
|
||||||
|
new &= ~(1U << BTREE_NODE_need_write);
|
||||||
|
new |= (1U << BTREE_NODE_write_in_flight);
|
||||||
|
new |= (1U << BTREE_NODE_just_written);
|
||||||
|
new ^= (1U << BTREE_NODE_write_idx);
|
||||||
|
} else {
|
||||||
|
new &= ~(1U << BTREE_NODE_write_in_flight);
|
||||||
|
}
|
||||||
|
} while ((v = cmpxchg(&b->flags, old, new)) != old);
|
||||||
|
|
||||||
|
if (new & (1U << BTREE_NODE_write_in_flight))
|
||||||
|
__bch2_btree_node_write(c, b, true);
|
||||||
|
|
||||||
|
six_unlock_read(&b->c.lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bch2_btree_node_write_error(struct bch_fs *c,
|
static void bch2_btree_node_write_error(struct bch_fs *c,
|
||||||
@ -1733,7 +1771,7 @@ static void btree_write_submit(struct work_struct *work)
|
|||||||
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &wbio->key);
|
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &wbio->key);
|
||||||
}
|
}
|
||||||
|
|
||||||
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b)
|
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_started)
|
||||||
{
|
{
|
||||||
struct btree_write_bio *wbio;
|
struct btree_write_bio *wbio;
|
||||||
struct bset_tree *t;
|
struct bset_tree *t;
|
||||||
@ -1750,7 +1788,8 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b)
|
|||||||
bool validate_before_checksum = false;
|
bool validate_before_checksum = false;
|
||||||
void *data;
|
void *data;
|
||||||
|
|
||||||
BUG_ON(btree_node_write_in_flight(b));
|
if (already_started)
|
||||||
|
goto do_write;
|
||||||
|
|
||||||
if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
|
if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
|
||||||
return;
|
return;
|
||||||
@ -1774,14 +1813,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b)
|
|||||||
if (old & (1 << BTREE_NODE_never_write))
|
if (old & (1 << BTREE_NODE_never_write))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (old & (1 << BTREE_NODE_write_in_flight)) {
|
BUG_ON(old & (1 << BTREE_NODE_write_in_flight));
|
||||||
/*
|
|
||||||
* XXX waiting on btree writes with btree locks held -
|
|
||||||
* this can deadlock, and we hit the write error path
|
|
||||||
*/
|
|
||||||
bch2_btree_node_wait_on_write(b);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
new &= ~(1 << BTREE_NODE_dirty);
|
new &= ~(1 << BTREE_NODE_dirty);
|
||||||
new &= ~(1 << BTREE_NODE_need_write);
|
new &= ~(1 << BTREE_NODE_need_write);
|
||||||
@ -1790,6 +1822,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b)
|
|||||||
new ^= (1 << BTREE_NODE_write_idx);
|
new ^= (1 << BTREE_NODE_write_idx);
|
||||||
} while (cmpxchg_acquire(&b->flags, old, new) != old);
|
} while (cmpxchg_acquire(&b->flags, old, new) != old);
|
||||||
|
|
||||||
|
if (new & (1U << BTREE_NODE_need_write))
|
||||||
|
return;
|
||||||
|
do_write:
|
||||||
atomic_dec(&c->btree_cache.dirty);
|
atomic_dec(&c->btree_cache.dirty);
|
||||||
|
|
||||||
BUG_ON(btree_node_fake(b));
|
BUG_ON(btree_node_fake(b));
|
||||||
@ -2044,7 +2079,7 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
|||||||
if (lock_type_held == SIX_LOCK_intent ||
|
if (lock_type_held == SIX_LOCK_intent ||
|
||||||
(lock_type_held == SIX_LOCK_read &&
|
(lock_type_held == SIX_LOCK_read &&
|
||||||
six_lock_tryupgrade(&b->c.lock))) {
|
six_lock_tryupgrade(&b->c.lock))) {
|
||||||
__bch2_btree_node_write(c, b);
|
__bch2_btree_node_write(c, b, false);
|
||||||
|
|
||||||
/* don't cycle lock unnecessarily: */
|
/* don't cycle lock unnecessarily: */
|
||||||
if (btree_node_just_written(b) &&
|
if (btree_node_just_written(b) &&
|
||||||
@ -2056,7 +2091,7 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
|||||||
if (lock_type_held == SIX_LOCK_read)
|
if (lock_type_held == SIX_LOCK_read)
|
||||||
six_lock_downgrade(&b->c.lock);
|
six_lock_downgrade(&b->c.lock);
|
||||||
} else {
|
} else {
|
||||||
__bch2_btree_node_write(c, b);
|
__bch2_btree_node_write(c, b, false);
|
||||||
if (lock_type_held == SIX_LOCK_write &&
|
if (lock_type_held == SIX_LOCK_write &&
|
||||||
btree_node_just_written(b))
|
btree_node_just_written(b))
|
||||||
bch2_btree_post_write_cleanup(c, b);
|
bch2_btree_post_write_cleanup(c, b);
|
||||||
|
@ -139,7 +139,7 @@ void bch2_btree_complete_write(struct bch_fs *, struct btree *,
|
|||||||
struct btree_write *);
|
struct btree_write *);
|
||||||
void bch2_btree_write_error_work(struct work_struct *);
|
void bch2_btree_write_error_work(struct work_struct *);
|
||||||
|
|
||||||
void __bch2_btree_node_write(struct bch_fs *, struct btree *);
|
void __bch2_btree_node_write(struct bch_fs *, struct btree *, bool);
|
||||||
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
|
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
|
||||||
|
|
||||||
void bch2_btree_node_write(struct bch_fs *, struct btree *,
|
void bch2_btree_node_write(struct bch_fs *, struct btree *,
|
||||||
@ -148,18 +148,11 @@ void bch2_btree_node_write(struct bch_fs *, struct btree *,
|
|||||||
static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
|
static inline void btree_node_write_if_need(struct bch_fs *c, struct btree *b,
|
||||||
enum six_lock_type lock_held)
|
enum six_lock_type lock_held)
|
||||||
{
|
{
|
||||||
while (b->written &&
|
if (b->written &&
|
||||||
btree_node_need_write(b) &&
|
btree_node_need_write(b) &&
|
||||||
btree_node_may_write(b)) {
|
btree_node_may_write(b) &&
|
||||||
if (!btree_node_write_in_flight(b)) {
|
!btree_node_write_in_flight(b))
|
||||||
bch2_btree_node_write(c, b, lock_held);
|
bch2_btree_node_write(c, b, lock_held);
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
six_unlock_type(&b->c.lock, lock_held);
|
|
||||||
bch2_btree_node_wait_on_write(b);
|
|
||||||
btree_node_lock_type(c, b, lock_held);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#define bch2_btree_node_write_cond(_c, _b, cond) \
|
#define bch2_btree_node_write_cond(_c, _b, cond) \
|
||||||
|
@ -347,6 +347,7 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
|
|||||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||||
static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
|
static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
|
||||||
{
|
{
|
||||||
|
struct bch_fs *c = iter->trans->c;
|
||||||
unsigned l;
|
unsigned l;
|
||||||
|
|
||||||
if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
|
if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
|
||||||
@ -354,7 +355,7 @@ static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (l = 0; is_btree_node(iter, l); l++) {
|
for (l = 0; btree_iter_node(iter, l); l++) {
|
||||||
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
|
if (iter->uptodate >= BTREE_ITER_NEED_RELOCK &&
|
||||||
!btree_node_locked(iter, l))
|
!btree_node_locked(iter, l))
|
||||||
continue;
|
continue;
|
||||||
@ -376,7 +377,7 @@ static inline void bch2_btree_iter_verify_locks(struct btree_iter *iter) {}
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
__flatten
|
__flatten
|
||||||
static bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
|
bool bch2_btree_iter_relock(struct btree_iter *iter, unsigned long trace_ip)
|
||||||
{
|
{
|
||||||
return btree_iter_get_locks(iter, false, trace_ip);
|
return btree_iter_get_locks(iter, false, trace_ip);
|
||||||
}
|
}
|
||||||
@ -602,6 +603,8 @@ err:
|
|||||||
|
|
||||||
static void bch2_btree_iter_verify(struct btree_iter *iter)
|
static void bch2_btree_iter_verify(struct btree_iter *iter)
|
||||||
{
|
{
|
||||||
|
struct btree_trans *trans = iter->trans;
|
||||||
|
struct bch_fs *c = trans->c;
|
||||||
enum btree_iter_type type = btree_iter_type(iter);
|
enum btree_iter_type type = btree_iter_type(iter);
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
@ -620,12 +623,18 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
|
|||||||
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||||
!btree_type_has_snapshots(iter->btree_id));
|
!btree_type_has_snapshots(iter->btree_id));
|
||||||
|
|
||||||
bch2_btree_iter_verify_locks(iter);
|
for (i = 0; i < BTREE_MAX_DEPTH; i++) {
|
||||||
|
if (!iter->l[i].b) {
|
||||||
|
BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
for (i = 0; i < BTREE_MAX_DEPTH; i++)
|
|
||||||
bch2_btree_iter_verify_level(iter, i);
|
bch2_btree_iter_verify_level(iter, i);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bch2_btree_iter_verify_locks(iter);
|
||||||
|
}
|
||||||
|
|
||||||
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
|
static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
|
||||||
{
|
{
|
||||||
enum btree_iter_type type = btree_iter_type(iter);
|
enum btree_iter_type type = btree_iter_type(iter);
|
||||||
@ -1345,30 +1354,30 @@ static inline unsigned btree_iter_up_until_good_node(struct btree_iter *iter,
|
|||||||
static int btree_iter_traverse_one(struct btree_iter *iter,
|
static int btree_iter_traverse_one(struct btree_iter *iter,
|
||||||
unsigned long trace_ip)
|
unsigned long trace_ip)
|
||||||
{
|
{
|
||||||
unsigned depth_want = iter->level;
|
unsigned l, depth_want = iter->level;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/*
|
|
||||||
* if we need interior nodes locked, call btree_iter_relock() to make
|
|
||||||
* sure we walk back up enough that we lock them:
|
|
||||||
*/
|
|
||||||
if (iter->uptodate == BTREE_ITER_NEED_RELOCK ||
|
|
||||||
iter->locks_want > 1)
|
|
||||||
bch2_btree_iter_relock(iter, _THIS_IP_);
|
|
||||||
|
|
||||||
if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
|
if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
|
||||||
ret = bch2_btree_iter_traverse_cached(iter);
|
ret = bch2_btree_iter_traverse_cached(iter);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (iter->uptodate < BTREE_ITER_NEED_RELOCK)
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
if (unlikely(iter->level >= BTREE_MAX_DEPTH))
|
if (unlikely(iter->level >= BTREE_MAX_DEPTH))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
iter->level = btree_iter_up_until_good_node(iter, 0);
|
iter->level = btree_iter_up_until_good_node(iter, 0);
|
||||||
|
|
||||||
|
/* If we need intent locks, take them too: */
|
||||||
|
for (l = iter->level + 1;
|
||||||
|
l < iter->locks_want && btree_iter_node(iter, l);
|
||||||
|
l++)
|
||||||
|
if (!bch2_btree_node_relock(iter, l))
|
||||||
|
while (iter->level <= l) {
|
||||||
|
btree_node_unlock(iter, iter->level);
|
||||||
|
iter->l[iter->level].b = BTREE_ITER_NO_NODE_UP;
|
||||||
|
iter->level++;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: iter->nodes[iter->level] may be temporarily NULL here - that
|
* Note: iter->nodes[iter->level] may be temporarily NULL here - that
|
||||||
* would indicate to other code that we got to the end of the btree,
|
* would indicate to other code that we got to the end of the btree,
|
||||||
@ -1389,6 +1398,7 @@ static int btree_iter_traverse_one(struct btree_iter *iter,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
__bch2_btree_iter_unlock(iter);
|
||||||
iter->level = depth_want;
|
iter->level = depth_want;
|
||||||
|
|
||||||
if (ret == -EIO) {
|
if (ret == -EIO) {
|
||||||
|
@ -111,6 +111,8 @@ void bch2_btree_node_iter_fix(struct btree_iter *, struct btree *,
|
|||||||
struct btree_node_iter *, struct bkey_packed *,
|
struct btree_node_iter *, struct bkey_packed *,
|
||||||
unsigned, unsigned);
|
unsigned, unsigned);
|
||||||
|
|
||||||
|
bool bch2_btree_iter_relock(struct btree_iter *, unsigned long);
|
||||||
|
|
||||||
bool bch2_trans_relock(struct btree_trans *);
|
bool bch2_trans_relock(struct btree_trans *);
|
||||||
void bch2_trans_unlock(struct btree_trans *);
|
void bch2_trans_unlock(struct btree_trans *);
|
||||||
|
|
||||||
|
@ -270,7 +270,9 @@ int bch2_btree_iter_traverse_cached(struct btree_iter *iter)
|
|||||||
|
|
||||||
BUG_ON(iter->level);
|
BUG_ON(iter->level);
|
||||||
|
|
||||||
if (btree_node_locked(iter, 0)) {
|
iter->l[1].b = NULL;
|
||||||
|
|
||||||
|
if (bch2_btree_node_relock(iter, 0)) {
|
||||||
ck = (void *) iter->l[0].b;
|
ck = (void *) iter->l[0].b;
|
||||||
goto fill;
|
goto fill;
|
||||||
}
|
}
|
||||||
|
@ -948,13 +948,6 @@ bch2_btree_update_start(struct btree_iter *iter, unsigned level,
|
|||||||
|
|
||||||
closure_init_stack(&cl);
|
closure_init_stack(&cl);
|
||||||
retry:
|
retry:
|
||||||
/*
|
|
||||||
* This check isn't necessary for correctness - it's just to potentially
|
|
||||||
* prevent us from doing a lot of work that'll end up being wasted:
|
|
||||||
*/
|
|
||||||
ret = bch2_journal_error(&c->journal);
|
|
||||||
if (ret)
|
|
||||||
return ERR_PTR(ret);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: figure out how far we might need to split,
|
* XXX: figure out how far we might need to split,
|
||||||
@ -995,6 +988,22 @@ retry:
|
|||||||
bch2_keylist_init(&as->new_keys, as->_new_keys);
|
bch2_keylist_init(&as->new_keys, as->_new_keys);
|
||||||
bch2_keylist_init(&as->parent_keys, as->inline_keys);
|
bch2_keylist_init(&as->parent_keys, as->inline_keys);
|
||||||
|
|
||||||
|
mutex_lock(&c->btree_interior_update_lock);
|
||||||
|
list_add_tail(&as->list, &c->btree_interior_update_list);
|
||||||
|
mutex_unlock(&c->btree_interior_update_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We don't want to allocate if we're in an error state, that can cause
|
||||||
|
* deadlock on emergency shutdown due to open buckets getting stuck in
|
||||||
|
* the btree_reserve_cache after allocator shutdown has cleared it out.
|
||||||
|
* This check needs to come after adding us to the btree_interior_update
|
||||||
|
* list but before calling bch2_btree_reserve_get, to synchronize with
|
||||||
|
* __bch2_fs_read_only().
|
||||||
|
*/
|
||||||
|
ret = bch2_journal_error(&c->journal);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
|
ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
|
||||||
BTREE_UPDATE_JOURNAL_RES,
|
BTREE_UPDATE_JOURNAL_RES,
|
||||||
journal_flags|JOURNAL_RES_GET_NONBLOCK);
|
journal_flags|JOURNAL_RES_GET_NONBLOCK);
|
||||||
@ -1046,10 +1055,6 @@ retry:
|
|||||||
atomic64_read(&c->journal.seq),
|
atomic64_read(&c->journal.seq),
|
||||||
&as->journal, NULL);
|
&as->journal, NULL);
|
||||||
|
|
||||||
mutex_lock(&c->btree_interior_update_lock);
|
|
||||||
list_add_tail(&as->list, &c->btree_interior_update_list);
|
|
||||||
mutex_unlock(&c->btree_interior_update_lock);
|
|
||||||
|
|
||||||
return as;
|
return as;
|
||||||
err:
|
err:
|
||||||
bch2_btree_update_free(as);
|
bch2_btree_update_free(as);
|
||||||
|
@ -112,7 +112,10 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c,
|
|||||||
|
|
||||||
bch_scnmemcpy(out, d.v->d_name,
|
bch_scnmemcpy(out, d.v->d_name,
|
||||||
bch2_dirent_name_bytes(d));
|
bch2_dirent_name_bytes(d));
|
||||||
pr_buf(out, " -> %llu type %s", d.v->d_inum, bch2_d_types[d.v->d_type]);
|
pr_buf(out, " -> %llu type %s", d.v->d_inum,
|
||||||
|
d.v->d_type < DT_MAX
|
||||||
|
? bch2_d_types[d.v->d_type]
|
||||||
|
: "(bad d_type)");
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
|
static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
|
||||||
|
@ -1870,8 +1870,6 @@ static long bch2_dio_write_loop(struct dio_write *dio)
|
|||||||
* bio_iov_iter_get_pages was only able to get <
|
* bio_iov_iter_get_pages was only able to get <
|
||||||
* blocksize worth of pages:
|
* blocksize worth of pages:
|
||||||
*/
|
*/
|
||||||
bio_for_each_segment_all(bv, bio, iter)
|
|
||||||
put_page(bv->bv_page);
|
|
||||||
ret = -EFAULT;
|
ret = -EFAULT;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -1939,6 +1937,7 @@ loop:
|
|||||||
if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
|
if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
|
||||||
bio_for_each_segment_all(bv, bio, iter)
|
bio_for_each_segment_all(bv, bio, iter)
|
||||||
put_page(bv->bv_page);
|
put_page(bv->bv_page);
|
||||||
|
bio->bi_vcnt = 0;
|
||||||
|
|
||||||
if (dio->op.error) {
|
if (dio->op.error) {
|
||||||
set_bit(EI_INODE_ERROR, &inode->ei_flags);
|
set_bit(EI_INODE_ERROR, &inode->ei_flags);
|
||||||
@ -1961,6 +1960,9 @@ err:
|
|||||||
if (dio->free_iov)
|
if (dio->free_iov)
|
||||||
kfree(dio->iter.iov);
|
kfree(dio->iter.iov);
|
||||||
|
|
||||||
|
if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF)))
|
||||||
|
bio_for_each_segment_all(bv, bio, iter)
|
||||||
|
put_page(bv->bv_page);
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
|
|
||||||
/* inode->i_dio_count is our ref on inode and thus bch_fs */
|
/* inode->i_dio_count is our ref on inode and thus bch_fs */
|
||||||
|
@ -267,11 +267,11 @@ static struct inode_walker inode_walker_init(void)
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
static int walk_inode(struct btree_trans *trans,
|
static int __walk_inode(struct btree_trans *trans,
|
||||||
struct inode_walker *w, u64 inum)
|
struct inode_walker *w, u64 inum)
|
||||||
{
|
{
|
||||||
if (inum != w->cur_inum) {
|
if (inum != w->cur_inum) {
|
||||||
int ret = lookup_inode(trans, inum, &w->inode, &w->snapshot);
|
int ret = __lookup_inode(trans, inum, &w->inode, &w->snapshot);
|
||||||
|
|
||||||
if (ret && ret != -ENOENT)
|
if (ret && ret != -ENOENT)
|
||||||
return ret;
|
return ret;
|
||||||
@ -286,6 +286,12 @@ static int walk_inode(struct btree_trans *trans,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int walk_inode(struct btree_trans *trans,
|
||||||
|
struct inode_walker *w, u64 inum)
|
||||||
|
{
|
||||||
|
return lockrestart_do(trans, __walk_inode(trans, w, inum));
|
||||||
|
}
|
||||||
|
|
||||||
static int hash_redo_key(struct btree_trans *trans,
|
static int hash_redo_key(struct btree_trans *trans,
|
||||||
const struct bch_hash_desc desc,
|
const struct bch_hash_desc desc,
|
||||||
struct bch_hash_info *hash_info,
|
struct bch_hash_info *hash_info,
|
||||||
@ -704,98 +710,78 @@ fsck_err:
|
|||||||
return bch2_trans_exit(&trans) ?: ret;
|
return bch2_trans_exit(&trans) ?: ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||||
* Walk dirents: verify that they all have a corresponding S_ISDIR inode,
|
struct bch_hash_info *hash_info,
|
||||||
* validate d_type
|
struct inode_walker *w, unsigned *nr_subdirs)
|
||||||
*/
|
|
||||||
noinline_for_stack
|
|
||||||
static int check_dirents(struct bch_fs *c)
|
|
||||||
{
|
{
|
||||||
struct inode_walker w = inode_walker_init();
|
struct bch_fs *c = trans->c;
|
||||||
struct bch_hash_info hash_info;
|
|
||||||
struct btree_trans trans;
|
|
||||||
struct btree_iter *iter;
|
|
||||||
struct bkey_s_c k;
|
struct bkey_s_c k;
|
||||||
char buf[200];
|
|
||||||
unsigned nr_subdirs = 0;
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
bch_verbose(c, "checking dirents");
|
|
||||||
|
|
||||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
|
|
||||||
|
|
||||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_dirents,
|
|
||||||
POS(BCACHEFS_ROOT_INO, 0),
|
|
||||||
BTREE_ITER_INTENT|
|
|
||||||
BTREE_ITER_PREFETCH);
|
|
||||||
retry:
|
|
||||||
while ((k = bch2_btree_iter_peek(iter)).k &&
|
|
||||||
!(ret = bkey_err(k))) {
|
|
||||||
struct bkey_s_c_dirent d;
|
struct bkey_s_c_dirent d;
|
||||||
struct bch_inode_unpacked target;
|
struct bch_inode_unpacked target;
|
||||||
u32 target_snapshot;
|
u32 target_snapshot;
|
||||||
bool have_target;
|
bool have_target;
|
||||||
bool backpointer_exists = true;
|
bool backpointer_exists = true;
|
||||||
u64 d_inum;
|
u64 d_inum;
|
||||||
|
char buf[200];
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (w.have_inode &&
|
k = bch2_btree_iter_peek(iter);
|
||||||
w.cur_inum != k.k->p.inode &&
|
if (!k.k)
|
||||||
fsck_err_on(w.inode.bi_nlink != nr_subdirs, c,
|
return 1;
|
||||||
|
|
||||||
|
ret = bkey_err(k);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
if (w->have_inode &&
|
||||||
|
w->cur_inum != k.k->p.inode &&
|
||||||
|
fsck_err_on(w->inode.bi_nlink != *nr_subdirs, c,
|
||||||
"directory %llu with wrong i_nlink: got %u, should be %u",
|
"directory %llu with wrong i_nlink: got %u, should be %u",
|
||||||
w.inode.bi_inum, w.inode.bi_nlink, nr_subdirs)) {
|
w->inode.bi_inum, w->inode.bi_nlink, *nr_subdirs)) {
|
||||||
w.inode.bi_nlink = nr_subdirs;
|
w->inode.bi_nlink = *nr_subdirs;
|
||||||
ret = write_inode(&trans, &w.inode, w.snapshot);
|
ret = write_inode(trans, &w->inode, w->snapshot);
|
||||||
if (ret)
|
return ret ?: -EINTR;
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = walk_inode(&trans, &w, k.k->p.inode);
|
ret = __walk_inode(trans, w, k.k->p.inode);
|
||||||
if (ret)
|
if (ret)
|
||||||
break;
|
return ret;
|
||||||
|
|
||||||
if (w.first_this_inode)
|
if (w->first_this_inode)
|
||||||
nr_subdirs = 0;
|
*nr_subdirs = 0;
|
||||||
|
|
||||||
if (fsck_err_on(!w.have_inode, c,
|
if (fsck_err_on(!w->have_inode, c,
|
||||||
"dirent in nonexisting directory:\n%s",
|
"dirent in nonexisting directory:\n%s",
|
||||||
(bch2_bkey_val_to_text(&PBUF(buf), c,
|
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)) ||
|
||||||
k), buf)) ||
|
fsck_err_on(!S_ISDIR(w->inode.bi_mode), c,
|
||||||
fsck_err_on(!S_ISDIR(w.inode.bi_mode), c,
|
|
||||||
"dirent in non directory inode type %u:\n%s",
|
"dirent in non directory inode type %u:\n%s",
|
||||||
mode_to_type(w.inode.bi_mode),
|
mode_to_type(w->inode.bi_mode),
|
||||||
(bch2_bkey_val_to_text(&PBUF(buf), c,
|
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf)))
|
||||||
k), buf))) {
|
return __bch2_trans_do(trans, NULL, NULL, 0,
|
||||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
bch2_btree_delete_at(trans, iter, 0));
|
||||||
bch2_btree_delete_at(&trans, iter, 0));
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
goto next;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!w.have_inode)
|
if (!w->have_inode)
|
||||||
goto next;
|
return 0;
|
||||||
|
|
||||||
if (w.first_this_inode)
|
if (w->first_this_inode)
|
||||||
hash_info = bch2_hash_info_init(c, &w.inode);
|
*hash_info = bch2_hash_info_init(c, &w->inode);
|
||||||
|
|
||||||
ret = hash_check_key(&trans, bch2_dirent_hash_desc,
|
ret = hash_check_key(trans, bch2_dirent_hash_desc,
|
||||||
&hash_info, iter, k);
|
hash_info, iter, k);
|
||||||
if (ret > 0) {
|
if (ret < 0)
|
||||||
ret = 0;
|
return ret;
|
||||||
goto next;
|
if (ret) /* dirent has been deleted */
|
||||||
}
|
return 0;
|
||||||
if (ret)
|
|
||||||
goto fsck_err;
|
|
||||||
|
|
||||||
if (k.k->type != KEY_TYPE_dirent)
|
if (k.k->type != KEY_TYPE_dirent)
|
||||||
goto next;
|
return 0;
|
||||||
|
|
||||||
d = bkey_s_c_to_dirent(k);
|
d = bkey_s_c_to_dirent(k);
|
||||||
d_inum = le64_to_cpu(d.v->d_inum);
|
d_inum = le64_to_cpu(d.v->d_inum);
|
||||||
|
|
||||||
ret = lookup_inode(&trans, d_inum, &target, &target_snapshot);
|
ret = __lookup_inode(trans, d_inum, &target, &target_snapshot);
|
||||||
if (ret && ret != -ENOENT)
|
if (ret && ret != -ENOENT)
|
||||||
break;
|
return ret;
|
||||||
|
|
||||||
have_target = !ret;
|
have_target = !ret;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -803,30 +789,31 @@ retry:
|
|||||||
if (fsck_err_on(!have_target, c,
|
if (fsck_err_on(!have_target, c,
|
||||||
"dirent points to missing inode:\n%s",
|
"dirent points to missing inode:\n%s",
|
||||||
(bch2_bkey_val_to_text(&PBUF(buf), c,
|
(bch2_bkey_val_to_text(&PBUF(buf), c,
|
||||||
k), buf))) {
|
k), buf)))
|
||||||
ret = remove_dirent(&trans, d.k->p);
|
return remove_dirent(trans, d.k->p);
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
goto next;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!have_target)
|
if (!have_target)
|
||||||
goto next;
|
return 0;
|
||||||
|
|
||||||
if (!target.bi_dir &&
|
if (!target.bi_dir &&
|
||||||
!target.bi_dir_offset) {
|
!target.bi_dir_offset) {
|
||||||
target.bi_dir = k.k->p.inode;
|
target.bi_dir = k.k->p.inode;
|
||||||
target.bi_dir_offset = k.k->p.offset;
|
target.bi_dir_offset = k.k->p.offset;
|
||||||
|
|
||||||
ret = write_inode(&trans, &target, target_snapshot);
|
ret = __write_inode(trans, &target, target_snapshot) ?:
|
||||||
|
bch2_trans_commit(trans, NULL, NULL,
|
||||||
|
BTREE_INSERT_NOFAIL|
|
||||||
|
BTREE_INSERT_LAZY_RW|
|
||||||
|
BTREE_INSERT_NOUNLOCK);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
return ret;
|
||||||
|
return -EINTR;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!inode_backpointer_matches(d, &target)) {
|
if (!inode_backpointer_matches(d, &target)) {
|
||||||
ret = inode_backpointer_exists(&trans, &target);
|
ret = inode_backpointer_exists(trans, &target);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto err;
|
return ret;
|
||||||
|
|
||||||
backpointer_exists = ret;
|
backpointer_exists = ret;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -834,12 +821,8 @@ retry:
|
|||||||
if (fsck_err_on(S_ISDIR(target.bi_mode) &&
|
if (fsck_err_on(S_ISDIR(target.bi_mode) &&
|
||||||
backpointer_exists, c,
|
backpointer_exists, c,
|
||||||
"directory %llu with multiple links",
|
"directory %llu with multiple links",
|
||||||
target.bi_inum)) {
|
target.bi_inum))
|
||||||
ret = remove_dirent(&trans, d.k->p);
|
return remove_dirent(trans, d.k->p);
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (fsck_err_on(backpointer_exists &&
|
if (fsck_err_on(backpointer_exists &&
|
||||||
!target.bi_nlink, c,
|
!target.bi_nlink, c,
|
||||||
@ -848,9 +831,8 @@ retry:
|
|||||||
target.bi_nlink++;
|
target.bi_nlink++;
|
||||||
target.bi_flags &= ~BCH_INODE_UNLINKED;
|
target.bi_flags &= ~BCH_INODE_UNLINKED;
|
||||||
|
|
||||||
ret = write_inode(&trans, &target, target_snapshot);
|
ret = write_inode(trans, &target, target_snapshot);
|
||||||
if (ret)
|
return ret ?: -EINTR;
|
||||||
goto err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (fsck_err_on(!backpointer_exists, c,
|
if (fsck_err_on(!backpointer_exists, c,
|
||||||
@ -865,9 +847,8 @@ retry:
|
|||||||
target.bi_dir = k.k->p.inode;
|
target.bi_dir = k.k->p.inode;
|
||||||
target.bi_dir_offset = k.k->p.offset;
|
target.bi_dir_offset = k.k->p.offset;
|
||||||
|
|
||||||
ret = write_inode(&trans, &target, target_snapshot);
|
ret = write_inode(trans, &target, target_snapshot);
|
||||||
if (ret)
|
return ret ?: -EINTR;
|
||||||
goto err;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -879,35 +860,65 @@ retry:
|
|||||||
struct bkey_i_dirent *n;
|
struct bkey_i_dirent *n;
|
||||||
|
|
||||||
n = kmalloc(bkey_bytes(d.k), GFP_KERNEL);
|
n = kmalloc(bkey_bytes(d.k), GFP_KERNEL);
|
||||||
if (!n) {
|
if (!n)
|
||||||
ret = -ENOMEM;
|
return -ENOMEM;
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
bkey_reassemble(&n->k_i, d.s_c);
|
bkey_reassemble(&n->k_i, d.s_c);
|
||||||
n->v.d_type = mode_to_type(target.bi_mode);
|
n->v.d_type = mode_to_type(target.bi_mode);
|
||||||
|
|
||||||
ret = __bch2_trans_do(&trans, NULL, NULL,
|
ret = __bch2_trans_do(trans, NULL, NULL,
|
||||||
BTREE_INSERT_NOFAIL|
|
BTREE_INSERT_NOFAIL|
|
||||||
BTREE_INSERT_LAZY_RW,
|
BTREE_INSERT_LAZY_RW,
|
||||||
bch2_btree_iter_traverse(iter) ?:
|
bch2_btree_iter_traverse(iter) ?:
|
||||||
bch2_trans_update(&trans, iter, &n->k_i, 0));
|
bch2_trans_update(trans, iter, &n->k_i, 0));
|
||||||
kfree(n);
|
kfree(n);
|
||||||
if (ret)
|
return ret ?: -EINTR;
|
||||||
goto err;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
nr_subdirs += d.v->d_type == DT_DIR;
|
*nr_subdirs += d.v->d_type == DT_DIR;
|
||||||
next:
|
return 0;
|
||||||
|
fsck_err:
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Walk dirents: verify that they all have a corresponding S_ISDIR inode,
|
||||||
|
* validate d_type
|
||||||
|
*/
|
||||||
|
noinline_for_stack
|
||||||
|
static int check_dirents(struct bch_fs *c)
|
||||||
|
{
|
||||||
|
struct inode_walker w = inode_walker_init();
|
||||||
|
struct bch_hash_info hash_info;
|
||||||
|
struct btree_trans trans;
|
||||||
|
struct btree_iter *iter;
|
||||||
|
unsigned nr_subdirs = 0;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
bch_verbose(c, "checking dirents");
|
||||||
|
|
||||||
|
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
|
||||||
|
|
||||||
|
iter = bch2_trans_get_iter(&trans, BTREE_ID_dirents,
|
||||||
|
POS(BCACHEFS_ROOT_INO, 0),
|
||||||
|
BTREE_ITER_INTENT|
|
||||||
|
BTREE_ITER_PREFETCH);
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
ret = lockrestart_do(&trans,
|
||||||
|
check_dirent(&trans, iter, &hash_info, &w, &nr_subdirs));
|
||||||
|
if (ret == 1) {
|
||||||
|
/* at end */
|
||||||
|
ret = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
bch2_btree_iter_advance(iter);
|
bch2_btree_iter_advance(iter);
|
||||||
}
|
}
|
||||||
err:
|
|
||||||
fsck_err:
|
|
||||||
if (ret == -EINTR)
|
|
||||||
goto retry;
|
|
||||||
|
|
||||||
bch2_trans_iter_put(&trans, iter);
|
bch2_trans_iter_put(&trans, iter);
|
||||||
|
|
||||||
return bch2_trans_exit(&trans) ?: ret;
|
return bch2_trans_exit(&trans) ?: ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,7 +137,7 @@ static void bch2_inode_pack_v2(struct bkey_inode_buf *packed,
|
|||||||
nr_fields++; \
|
nr_fields++; \
|
||||||
\
|
\
|
||||||
if (inode->_name) { \
|
if (inode->_name) { \
|
||||||
ret = bch2_varint_encode(out, inode->_name); \
|
ret = bch2_varint_encode_fast(out, inode->_name); \
|
||||||
out += ret; \
|
out += ret; \
|
||||||
\
|
\
|
||||||
if (_bits > 64) \
|
if (_bits > 64) \
|
||||||
@ -246,13 +246,13 @@ static int bch2_inode_unpack_v2(struct bkey_s_c_inode inode,
|
|||||||
|
|
||||||
#define x(_name, _bits) \
|
#define x(_name, _bits) \
|
||||||
if (fieldnr < INODE_NR_FIELDS(inode.v)) { \
|
if (fieldnr < INODE_NR_FIELDS(inode.v)) { \
|
||||||
ret = bch2_varint_decode(in, end, &v[0]); \
|
ret = bch2_varint_decode_fast(in, end, &v[0]); \
|
||||||
if (ret < 0) \
|
if (ret < 0) \
|
||||||
return ret; \
|
return ret; \
|
||||||
in += ret; \
|
in += ret; \
|
||||||
\
|
\
|
||||||
if (_bits > 64) { \
|
if (_bits > 64) { \
|
||||||
ret = bch2_varint_decode(in, end, &v[1]); \
|
ret = bch2_varint_decode_fast(in, end, &v[1]); \
|
||||||
if (ret < 0) \
|
if (ret < 0) \
|
||||||
return ret; \
|
return ret; \
|
||||||
in += ret; \
|
in += ret; \
|
||||||
@ -300,8 +300,10 @@ struct btree_iter *bch2_inode_peek(struct btree_trans *trans,
|
|||||||
struct bkey_s_c k;
|
struct bkey_s_c k;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum),
|
if (trans->c->opts.inodes_use_key_cache)
|
||||||
BTREE_ITER_CACHED|flags);
|
flags |= BTREE_ITER_CACHED;
|
||||||
|
|
||||||
|
iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum), flags);
|
||||||
k = bch2_btree_iter_peek_slot(iter);
|
k = bch2_btree_iter_peek_slot(iter);
|
||||||
ret = bkey_err(k);
|
ret = bkey_err(k);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -577,8 +579,12 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
|
|||||||
struct bpos end = POS(inode_nr + 1, 0);
|
struct bpos end = POS(inode_nr + 1, 0);
|
||||||
struct bch_inode_unpacked inode_u;
|
struct bch_inode_unpacked inode_u;
|
||||||
struct bkey_s_c k;
|
struct bkey_s_c k;
|
||||||
|
unsigned iter_flags = BTREE_ITER_INTENT;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
if (cached && c->opts.inodes_use_key_cache)
|
||||||
|
iter_flags |= BTREE_ITER_CACHED;
|
||||||
|
|
||||||
bch2_trans_init(&trans, c, 0, 1024);
|
bch2_trans_init(&trans, c, 0, 1024);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -600,11 +606,8 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
|
|||||||
retry:
|
retry:
|
||||||
bch2_trans_begin(&trans);
|
bch2_trans_begin(&trans);
|
||||||
|
|
||||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr),
|
iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes,
|
||||||
(cached
|
POS(0, inode_nr), iter_flags);
|
||||||
? BTREE_ITER_CACHED
|
|
||||||
: BTREE_ITER_SLOTS)|
|
|
||||||
BTREE_ITER_INTENT);
|
|
||||||
k = bch2_btree_iter_peek_slot(iter);
|
k = bch2_btree_iter_peek_slot(iter);
|
||||||
|
|
||||||
ret = bkey_err(k);
|
ret = bkey_err(k);
|
||||||
|
@ -63,7 +63,7 @@ const char * const bch2_member_states[] = {
|
|||||||
|
|
||||||
#undef x
|
#undef x
|
||||||
|
|
||||||
const char * const bch2_d_types[] = {
|
const char * const bch2_d_types[DT_MAX] = {
|
||||||
[DT_UNKNOWN] = "unknown",
|
[DT_UNKNOWN] = "unknown",
|
||||||
[DT_FIFO] = "fifo",
|
[DT_FIFO] = "fifo",
|
||||||
[DT_CHR] = "chr",
|
[DT_CHR] = "chr",
|
||||||
|
@ -173,6 +173,11 @@ enum opt_type {
|
|||||||
OPT_BOOL(), \
|
OPT_BOOL(), \
|
||||||
BCH_SB_SHARD_INUMS, false, \
|
BCH_SB_SHARD_INUMS, false, \
|
||||||
NULL, "Shard new inode numbers by CPU id") \
|
NULL, "Shard new inode numbers by CPU id") \
|
||||||
|
x(inodes_use_key_cache, u8, \
|
||||||
|
OPT_FORMAT|OPT_MOUNT, \
|
||||||
|
OPT_BOOL(), \
|
||||||
|
BCH_SB_INODES_USE_KEY_CACHE, true, \
|
||||||
|
NULL, "Use the btree key cache for the inodes btree") \
|
||||||
x(gc_reserve_percent, u8, \
|
x(gc_reserve_percent, u8, \
|
||||||
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
|
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
|
||||||
OPT_UINT(5, 21), \
|
OPT_UINT(5, 21), \
|
||||||
|
@ -171,6 +171,7 @@ read_attribute(btree_cache);
|
|||||||
read_attribute(btree_key_cache);
|
read_attribute(btree_key_cache);
|
||||||
read_attribute(btree_transactions);
|
read_attribute(btree_transactions);
|
||||||
read_attribute(stripes_heap);
|
read_attribute(stripes_heap);
|
||||||
|
read_attribute(open_buckets);
|
||||||
|
|
||||||
read_attribute(internal_uuid);
|
read_attribute(internal_uuid);
|
||||||
|
|
||||||
@ -409,6 +410,11 @@ SHOW(bch2_fs)
|
|||||||
return out.pos - buf;
|
return out.pos - buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (attr == &sysfs_open_buckets) {
|
||||||
|
bch2_open_buckets_to_text(&out, c);
|
||||||
|
return out.pos - buf;
|
||||||
|
}
|
||||||
|
|
||||||
if (attr == &sysfs_compression_stats) {
|
if (attr == &sysfs_compression_stats) {
|
||||||
bch2_compression_stats_to_text(&out, c);
|
bch2_compression_stats_to_text(&out, c);
|
||||||
return out.pos - buf;
|
return out.pos - buf;
|
||||||
@ -567,6 +573,7 @@ struct attribute *bch2_fs_internal_files[] = {
|
|||||||
&sysfs_btree_key_cache,
|
&sysfs_btree_key_cache,
|
||||||
&sysfs_btree_transactions,
|
&sysfs_btree_transactions,
|
||||||
&sysfs_stripes_heap,
|
&sysfs_stripes_heap,
|
||||||
|
&sysfs_open_buckets,
|
||||||
|
|
||||||
&sysfs_read_realloc_races,
|
&sysfs_read_realloc_races,
|
||||||
&sysfs_extent_migrate_done,
|
&sysfs_extent_migrate_done,
|
||||||
|
@ -1,15 +1,80 @@
|
|||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
|
#include <linux/string.h>
|
||||||
#include <asm/unaligned.h>
|
#include <asm/unaligned.h>
|
||||||
|
|
||||||
#include "varint.h"
|
#include "varint.h"
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bch2_varint_encode - encode a variable length integer
|
||||||
|
* @out - destination to encode to
|
||||||
|
* @v - unsigned integer to encode
|
||||||
|
*
|
||||||
|
* Returns the size in bytes of the encoded integer - at most 9 bytes
|
||||||
|
*/
|
||||||
int bch2_varint_encode(u8 *out, u64 v)
|
int bch2_varint_encode(u8 *out, u64 v)
|
||||||
{
|
{
|
||||||
unsigned bits = fls64(v|1);
|
unsigned bits = fls64(v|1);
|
||||||
unsigned bytes = DIV_ROUND_UP(bits, 7);
|
unsigned bytes = DIV_ROUND_UP(bits, 7);
|
||||||
|
|
||||||
|
if (likely(bytes < 9)) {
|
||||||
|
v <<= bytes;
|
||||||
|
v |= ~(~0 << (bytes - 1));
|
||||||
|
v = cpu_to_le64(v);
|
||||||
|
memcpy(out, &v, bytes);
|
||||||
|
} else {
|
||||||
|
*out++ = 255;
|
||||||
|
bytes = 9;
|
||||||
|
put_unaligned_le64(v, out);
|
||||||
|
}
|
||||||
|
|
||||||
|
return bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bch2_varint_decode - encode a variable length integer
|
||||||
|
* @in - varint to decode
|
||||||
|
* @end - end of buffer to decode from
|
||||||
|
* @out - on success, decoded integer
|
||||||
|
*
|
||||||
|
* Returns the size in bytes of the decoded integer - or -1 on failure (would
|
||||||
|
* have read past the end of the buffer)
|
||||||
|
*/
|
||||||
|
int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
|
||||||
|
{
|
||||||
|
unsigned bytes = likely(in < end)
|
||||||
|
? ffz(*in & 255) + 1
|
||||||
|
: 1;
|
||||||
|
u64 v;
|
||||||
|
|
||||||
|
if (unlikely(in + bytes > end))
|
||||||
|
return -1;
|
||||||
|
|
||||||
|
if (likely(bytes < 9)) {
|
||||||
|
v = 0;
|
||||||
|
memcpy(&v, in, bytes);
|
||||||
|
v = le64_to_cpu(v);
|
||||||
|
v >>= bytes;
|
||||||
|
} else {
|
||||||
|
v = get_unaligned_le64(++in);
|
||||||
|
}
|
||||||
|
|
||||||
|
*out = v;
|
||||||
|
return bytes;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* bch2_varint_encode_fast - fast version of bch2_varint_encode
|
||||||
|
*
|
||||||
|
* This version assumes it's always safe to write 8 bytes to @out, even if the
|
||||||
|
* encoded integer would be smaller.
|
||||||
|
*/
|
||||||
|
int bch2_varint_encode_fast(u8 *out, u64 v)
|
||||||
|
{
|
||||||
|
unsigned bits = fls64(v|1);
|
||||||
|
unsigned bytes = DIV_ROUND_UP(bits, 7);
|
||||||
|
|
||||||
if (likely(bytes < 9)) {
|
if (likely(bytes < 9)) {
|
||||||
v <<= bytes;
|
v <<= bytes;
|
||||||
v |= ~(~0 << (bytes - 1));
|
v |= ~(~0 << (bytes - 1));
|
||||||
@ -22,7 +87,13 @@ int bch2_varint_encode(u8 *out, u64 v)
|
|||||||
return bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
|
/**
|
||||||
|
* bch2_varint_decode_fast - fast version of bch2_varint_decode
|
||||||
|
*
|
||||||
|
* This version assumes that it is safe to read at most 8 bytes past the end of
|
||||||
|
* @end (we still return an error if the varint extends past @end).
|
||||||
|
*/
|
||||||
|
int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out)
|
||||||
{
|
{
|
||||||
u64 v = get_unaligned_le64(in);
|
u64 v = get_unaligned_le64(in);
|
||||||
unsigned bytes = ffz(v & 255) + 1;
|
unsigned bytes = ffz(v & 255) + 1;
|
||||||
|
@ -5,4 +5,7 @@
|
|||||||
int bch2_varint_encode(u8 *, u64);
|
int bch2_varint_encode(u8 *, u64);
|
||||||
int bch2_varint_decode(const u8 *, const u8 *, u64 *);
|
int bch2_varint_decode(const u8 *, const u8 *, u64 *);
|
||||||
|
|
||||||
|
int bch2_varint_encode_fast(u8 *, u64);
|
||||||
|
int bch2_varint_decode_fast(const u8 *, const u8 *, u64 *);
|
||||||
|
|
||||||
#endif /* _BCACHEFS_VARINT_H */
|
#endif /* _BCACHEFS_VARINT_H */
|
||||||
|
Loading…
Reference in New Issue
Block a user