mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-12-11 00:00:12 +03:00
Update bcachefs sources to 916d92b6b4 bcachefs: Add error messages for memory allocation failures
This commit is contained in:
parent
617dc6dd68
commit
12fe5797ad
@ -1 +1 @@
|
|||||||
078a1a596a74ade60db6eee0f0be927defb7abed
|
916d92b6b46b13873118a608ff16212f966375ba
|
||||||
|
|||||||
@ -859,7 +859,8 @@ static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
|
|||||||
static bool allocator_thread_running(struct bch_dev *ca)
|
static bool allocator_thread_running(struct bch_dev *ca)
|
||||||
{
|
{
|
||||||
unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
|
unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
|
||||||
test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
|
test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags) &&
|
||||||
|
test_bit(BCH_FS_ALLOC_REPLAY_DONE, &ca->fs->flags)
|
||||||
? ALLOCATOR_running
|
? ALLOCATOR_running
|
||||||
: ALLOCATOR_stopped;
|
: ALLOCATOR_stopped;
|
||||||
alloc_thread_set_state(ca, state);
|
alloc_thread_set_state(ca, state);
|
||||||
|
|||||||
@ -510,6 +510,7 @@ enum {
|
|||||||
BCH_FS_INITIAL_GC_DONE,
|
BCH_FS_INITIAL_GC_DONE,
|
||||||
BCH_FS_INITIAL_GC_UNFIXED,
|
BCH_FS_INITIAL_GC_UNFIXED,
|
||||||
BCH_FS_TOPOLOGY_REPAIR_DONE,
|
BCH_FS_TOPOLOGY_REPAIR_DONE,
|
||||||
|
BCH_FS_ALLOC_REPLAY_DONE,
|
||||||
BCH_FS_BTREE_INTERIOR_REPLAY_DONE,
|
BCH_FS_BTREE_INTERIOR_REPLAY_DONE,
|
||||||
BCH_FS_FSCK_DONE,
|
BCH_FS_FSCK_DONE,
|
||||||
BCH_FS_STARTED,
|
BCH_FS_STARTED,
|
||||||
|
|||||||
@ -83,6 +83,8 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
|
|||||||
b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
|
b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
|
||||||
PROT_READ|PROT_WRITE|PROT_EXEC,
|
PROT_READ|PROT_WRITE|PROT_EXEC,
|
||||||
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
|
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
|
||||||
|
if (b->aux_data == MAP_FAILED)
|
||||||
|
b->aux_data = NULL;
|
||||||
#endif
|
#endif
|
||||||
if (!b->aux_data) {
|
if (!b->aux_data) {
|
||||||
kvpfree(b->data, btree_bytes(c));
|
kvpfree(b->data, btree_bytes(c));
|
||||||
|
|||||||
@ -169,11 +169,11 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
|
|||||||
new->v.min_key = new_min;
|
new->v.min_key = new_min;
|
||||||
SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
|
SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
|
||||||
|
|
||||||
ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
|
ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
|
||||||
kfree(new);
|
if (ret) {
|
||||||
|
kfree(new);
|
||||||
if (ret)
|
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
bch2_btree_node_drop_keys_outside_node(b);
|
bch2_btree_node_drop_keys_outside_node(b);
|
||||||
|
|
||||||
@ -198,11 +198,11 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
|
|||||||
new->k.p = new_max;
|
new->k.p = new_max;
|
||||||
SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
|
SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
|
||||||
|
|
||||||
ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
|
ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
|
||||||
kfree(new);
|
if (ret) {
|
||||||
|
kfree(new);
|
||||||
if (ret)
|
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
bch2_btree_node_drop_keys_outside_node(b);
|
bch2_btree_node_drop_keys_outside_node(b);
|
||||||
|
|
||||||
@ -690,10 +690,10 @@ found:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bch2_journal_key_insert(c, btree_id, level, new);
|
ret = bch2_journal_key_insert_take(c, btree_id, level, new);
|
||||||
kfree(new);
|
if (ret)
|
||||||
|
kfree(new);
|
||||||
if (!ret)
|
else
|
||||||
*k = bkey_i_to_s_c(new);
|
*k = bkey_i_to_s_c(new);
|
||||||
}
|
}
|
||||||
fsck_err:
|
fsck_err:
|
||||||
|
|||||||
@ -2437,15 +2437,18 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
|||||||
if (bkey_cmp(iter->pos, next) < 0) {
|
if (bkey_cmp(iter->pos, next) < 0) {
|
||||||
bkey_init(&iter->k);
|
bkey_init(&iter->k);
|
||||||
iter->k.p = iter->pos;
|
iter->k.p = iter->pos;
|
||||||
bch2_key_resize(&iter->k,
|
|
||||||
min_t(u64, KEY_SIZE_MAX,
|
if (iter->flags & BTREE_ITER_IS_EXTENTS) {
|
||||||
(next.inode == iter->pos.inode
|
bch2_key_resize(&iter->k,
|
||||||
? next.offset
|
min_t(u64, KEY_SIZE_MAX,
|
||||||
: KEY_OFFSET_MAX) -
|
(next.inode == iter->pos.inode
|
||||||
iter->pos.offset));
|
? next.offset
|
||||||
|
: KEY_OFFSET_MAX) -
|
||||||
|
iter->pos.offset));
|
||||||
|
EBUG_ON(!iter->k.size);
|
||||||
|
}
|
||||||
|
|
||||||
k = (struct bkey_s_c) { &iter->k, NULL };
|
k = (struct bkey_s_c) { &iter->k, NULL };
|
||||||
EBUG_ON(!k.k->size);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -146,19 +146,23 @@ bkey_cached_reuse(struct btree_key_cache *c)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static struct bkey_cached *
|
static struct bkey_cached *
|
||||||
btree_key_cache_create(struct btree_key_cache *c,
|
btree_key_cache_create(struct bch_fs *c,
|
||||||
enum btree_id btree_id,
|
enum btree_id btree_id,
|
||||||
struct bpos pos)
|
struct bpos pos)
|
||||||
{
|
{
|
||||||
|
struct btree_key_cache *bc = &c->btree_key_cache;
|
||||||
struct bkey_cached *ck;
|
struct bkey_cached *ck;
|
||||||
bool was_new = true;
|
bool was_new = true;
|
||||||
|
|
||||||
ck = bkey_cached_alloc(c);
|
ck = bkey_cached_alloc(bc);
|
||||||
|
|
||||||
if (unlikely(!ck)) {
|
if (unlikely(!ck)) {
|
||||||
ck = bkey_cached_reuse(c);
|
ck = bkey_cached_reuse(bc);
|
||||||
if (unlikely(!ck))
|
if (unlikely(!ck)) {
|
||||||
|
bch_err(c, "error allocating memory for key cache item, btree %s",
|
||||||
|
bch2_btree_ids[btree_id]);
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
|
||||||
was_new = false;
|
was_new = false;
|
||||||
}
|
}
|
||||||
@ -175,7 +179,7 @@ btree_key_cache_create(struct btree_key_cache *c,
|
|||||||
ck->valid = false;
|
ck->valid = false;
|
||||||
ck->flags = 1U << BKEY_CACHED_ACCESSED;
|
ck->flags = 1U << BKEY_CACHED_ACCESSED;
|
||||||
|
|
||||||
if (unlikely(rhashtable_lookup_insert_fast(&c->table,
|
if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
|
||||||
&ck->hash,
|
&ck->hash,
|
||||||
bch2_btree_key_cache_params))) {
|
bch2_btree_key_cache_params))) {
|
||||||
/* We raced with another fill: */
|
/* We raced with another fill: */
|
||||||
@ -185,15 +189,15 @@ btree_key_cache_create(struct btree_key_cache *c,
|
|||||||
six_unlock_intent(&ck->c.lock);
|
six_unlock_intent(&ck->c.lock);
|
||||||
kfree(ck);
|
kfree(ck);
|
||||||
} else {
|
} else {
|
||||||
mutex_lock(&c->lock);
|
mutex_lock(&bc->lock);
|
||||||
bkey_cached_free(c, ck);
|
bkey_cached_free(bc, ck);
|
||||||
mutex_unlock(&c->lock);
|
mutex_unlock(&bc->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
atomic_long_inc(&c->nr_keys);
|
atomic_long_inc(&bc->nr_keys);
|
||||||
|
|
||||||
six_unlock_write(&ck->c.lock);
|
six_unlock_write(&ck->c.lock);
|
||||||
|
|
||||||
@ -204,6 +208,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
|||||||
struct btree_path *ck_path,
|
struct btree_path *ck_path,
|
||||||
struct bkey_cached *ck)
|
struct bkey_cached *ck)
|
||||||
{
|
{
|
||||||
|
struct bch_fs *c = trans->c;
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
struct bkey_s_c k;
|
struct bkey_s_c k;
|
||||||
unsigned new_u64s = 0;
|
unsigned new_u64s = 0;
|
||||||
@ -233,6 +238,8 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
|||||||
new_u64s = roundup_pow_of_two(new_u64s);
|
new_u64s = roundup_pow_of_two(new_u64s);
|
||||||
new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
|
new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
|
||||||
if (!new_k) {
|
if (!new_k) {
|
||||||
|
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
|
||||||
|
bch2_btree_ids[ck->key.btree_id], new_u64s);
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -293,8 +300,7 @@ retry:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ck = btree_key_cache_create(&c->btree_key_cache,
|
ck = btree_key_cache_create(c, path->btree_id, path->pos);
|
||||||
path->btree_id, path->pos);
|
|
||||||
ret = PTR_ERR_OR_ZERO(ck);
|
ret = PTR_ERR_OR_ZERO(ck);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|||||||
@ -63,7 +63,7 @@ int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
|
|||||||
int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
|
int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
|
||||||
struct bpos, struct bpos, unsigned, u64 *);
|
struct bpos, struct bpos, unsigned, u64 *);
|
||||||
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
|
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
|
||||||
struct bpos, struct bpos, u64 *);
|
struct bpos, struct bpos, unsigned, u64 *);
|
||||||
|
|
||||||
int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
|
int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
|
||||||
struct btree *, unsigned);
|
struct btree *, unsigned);
|
||||||
|
|||||||
@ -82,12 +82,12 @@ struct btree_update {
|
|||||||
/* Nodes being freed: */
|
/* Nodes being freed: */
|
||||||
struct keylist old_keys;
|
struct keylist old_keys;
|
||||||
u64 _old_keys[BTREE_UPDATE_NODES_MAX *
|
u64 _old_keys[BTREE_UPDATE_NODES_MAX *
|
||||||
BKEY_BTREE_PTR_VAL_U64s_MAX];
|
BKEY_BTREE_PTR_U64s_MAX];
|
||||||
|
|
||||||
/* Nodes being added: */
|
/* Nodes being added: */
|
||||||
struct keylist new_keys;
|
struct keylist new_keys;
|
||||||
u64 _new_keys[BTREE_UPDATE_NODES_MAX *
|
u64 _new_keys[BTREE_UPDATE_NODES_MAX *
|
||||||
BKEY_BTREE_PTR_VAL_U64s_MAX];
|
BKEY_BTREE_PTR_U64s_MAX];
|
||||||
|
|
||||||
/* New nodes, that will be made reachable by this update: */
|
/* New nodes, that will be made reachable by this update: */
|
||||||
struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
|
struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
|
||||||
|
|||||||
@ -308,6 +308,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
|||||||
struct btree_path *path,
|
struct btree_path *path,
|
||||||
unsigned u64s)
|
unsigned u64s)
|
||||||
{
|
{
|
||||||
|
struct bch_fs *c = trans->c;
|
||||||
struct bkey_cached *ck = (void *) path->l[0].b;
|
struct bkey_cached *ck = (void *) path->l[0].b;
|
||||||
unsigned new_u64s;
|
unsigned new_u64s;
|
||||||
struct bkey_i *new_k;
|
struct bkey_i *new_k;
|
||||||
@ -315,7 +316,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
|||||||
EBUG_ON(path->level);
|
EBUG_ON(path->level);
|
||||||
|
|
||||||
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
|
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
|
||||||
bch2_btree_key_cache_must_wait(trans->c) &&
|
bch2_btree_key_cache_must_wait(c) &&
|
||||||
!(trans->flags & BTREE_INSERT_JOURNAL_RECLAIM))
|
!(trans->flags & BTREE_INSERT_JOURNAL_RECLAIM))
|
||||||
return BTREE_INSERT_NEED_JOURNAL_RECLAIM;
|
return BTREE_INSERT_NEED_JOURNAL_RECLAIM;
|
||||||
|
|
||||||
@ -330,8 +331,11 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
|||||||
|
|
||||||
new_u64s = roundup_pow_of_two(u64s);
|
new_u64s = roundup_pow_of_two(u64s);
|
||||||
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
|
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
|
||||||
if (!new_k)
|
if (!new_k) {
|
||||||
|
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
|
||||||
|
bch2_btree_ids[path->btree_id], new_u64s);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
ck->u64s = new_u64s;
|
ck->u64s = new_u64s;
|
||||||
ck->k = new_k;
|
ck->k = new_k;
|
||||||
@ -1463,7 +1467,7 @@ retry:
|
|||||||
*/
|
*/
|
||||||
delete.k.p = iter.pos;
|
delete.k.p = iter.pos;
|
||||||
|
|
||||||
if (btree_node_type_is_extents(id)) {
|
if (iter.flags & BTREE_ITER_IS_EXTENTS) {
|
||||||
unsigned max_sectors =
|
unsigned max_sectors =
|
||||||
KEY_SIZE_MAX & (~0 << trans->c->block_bits);
|
KEY_SIZE_MAX & (~0 << trans->c->block_bits);
|
||||||
|
|
||||||
@ -1500,8 +1504,10 @@ retry:
|
|||||||
*/
|
*/
|
||||||
int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
|
int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
|
||||||
struct bpos start, struct bpos end,
|
struct bpos start, struct bpos end,
|
||||||
|
unsigned iter_flags,
|
||||||
u64 *journal_seq)
|
u64 *journal_seq)
|
||||||
{
|
{
|
||||||
return bch2_trans_do(c, NULL, journal_seq, 0,
|
return bch2_trans_do(c, NULL, journal_seq, 0,
|
||||||
bch2_btree_delete_range_trans(&trans, id, start, end, 0, journal_seq));
|
bch2_btree_delete_range_trans(&trans, id, start, end,
|
||||||
|
iter_flags, journal_seq));
|
||||||
}
|
}
|
||||||
|
|||||||
@ -940,9 +940,11 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
|
|||||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||||
|
|
||||||
m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
|
m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
|
||||||
|
if (!m) {
|
||||||
if (!m)
|
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
|
||||||
|
(u64) p.idx);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
spin_lock(&c->ec_stripes_heap_lock);
|
spin_lock(&c->ec_stripes_heap_lock);
|
||||||
|
|
||||||
@ -1053,7 +1055,7 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
|||||||
bool gc = flags & BTREE_TRIGGER_GC;
|
bool gc = flags & BTREE_TRIGGER_GC;
|
||||||
u64 journal_seq = trans->journal_res.seq;
|
u64 journal_seq = trans->journal_res.seq;
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
size_t idx = new.k->p.offset;
|
u64 idx = new.k->p.offset;
|
||||||
const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
|
const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
|
||||||
? bkey_s_c_to_stripe(old).v : NULL;
|
? bkey_s_c_to_stripe(old).v : NULL;
|
||||||
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
|
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
|
||||||
@ -1071,7 +1073,7 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
|||||||
|
|
||||||
bch2_bkey_val_to_text(&PBUF(buf1), c, old);
|
bch2_bkey_val_to_text(&PBUF(buf1), c, old);
|
||||||
bch2_bkey_val_to_text(&PBUF(buf2), c, new);
|
bch2_bkey_val_to_text(&PBUF(buf2), c, new);
|
||||||
bch_err_ratelimited(c, "error marking nonexistent stripe %zu while marking\n"
|
bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
|
||||||
"old %s\n"
|
"old %s\n"
|
||||||
"new %s", idx, buf1, buf2);
|
"new %s", idx, buf1, buf2);
|
||||||
bch2_inconsistent_error(c);
|
bch2_inconsistent_error(c);
|
||||||
@ -1103,9 +1105,11 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
|||||||
struct gc_stripe *m =
|
struct gc_stripe *m =
|
||||||
genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
|
genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
|
||||||
|
|
||||||
if (!m)
|
if (!m) {
|
||||||
|
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
|
||||||
|
idx);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* This will be wrong when we bring back runtime gc: we should
|
* This will be wrong when we bring back runtime gc: we should
|
||||||
* be unmarking the old key and then marking the new key
|
* be unmarking the old key and then marking the new key
|
||||||
|
|||||||
@ -677,7 +677,7 @@ static int ec_stripe_delete(struct bch_fs *c, size_t idx)
|
|||||||
return bch2_btree_delete_range(c, BTREE_ID_stripes,
|
return bch2_btree_delete_range(c, BTREE_ID_stripes,
|
||||||
POS(0, idx),
|
POS(0, idx),
|
||||||
POS(0, idx + 1),
|
POS(0, idx + 1),
|
||||||
NULL);
|
0, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ec_stripe_delete_work(struct work_struct *work)
|
static void ec_stripe_delete_work(struct work_struct *work)
|
||||||
|
|||||||
@ -564,14 +564,17 @@ static struct inode_walker inode_walker_init(void)
|
|||||||
return (struct inode_walker) { 0, };
|
return (struct inode_walker) { 0, };
|
||||||
}
|
}
|
||||||
|
|
||||||
static int inode_walker_realloc(struct inode_walker *w)
|
static int inode_walker_realloc(struct bch_fs *c, struct inode_walker *w)
|
||||||
{
|
{
|
||||||
if (w->nr == w->size) {
|
if (w->nr == w->size) {
|
||||||
size_t new_size = max_t(size_t, 8UL, w->size * 2);
|
size_t new_size = max_t(size_t, 8UL, w->size * 2);
|
||||||
void *d = krealloc(w->d, new_size * sizeof(w->d[0]),
|
void *d = krealloc(w->d, new_size * sizeof(w->d[0]),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!d)
|
if (!d) {
|
||||||
|
bch_err(c, "fsck: error allocating memory for inode_walker, size %zu",
|
||||||
|
new_size);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
w->d = d;
|
w->d = d;
|
||||||
w->size = new_size;
|
w->size = new_size;
|
||||||
@ -586,7 +589,7 @@ static int add_inode(struct bch_fs *c, struct inode_walker *w,
|
|||||||
struct bch_inode_unpacked u;
|
struct bch_inode_unpacked u;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = inode_walker_realloc(w);
|
ret = inode_walker_realloc(c, w);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -647,7 +650,7 @@ found:
|
|||||||
while (i && w->d[i - 1].snapshot > pos.snapshot)
|
while (i && w->d[i - 1].snapshot > pos.snapshot)
|
||||||
--i;
|
--i;
|
||||||
|
|
||||||
ret = inode_walker_realloc(w);
|
ret = inode_walker_realloc(c, w);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -1132,9 +1135,9 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
|
|||||||
count2 = lockrestart_do(trans,
|
count2 = lockrestart_do(trans,
|
||||||
bch2_count_inode_sectors(trans, w->cur_inum, i->snapshot));
|
bch2_count_inode_sectors(trans, w->cur_inum, i->snapshot));
|
||||||
|
|
||||||
if (fsck_err_on(i->count != count2, c,
|
if (i->count != count2) {
|
||||||
"fsck counted i_sectors wrong: got %llu should be %llu",
|
bch_err(c, "fsck counted i_sectors wrong: got %llu should be %llu",
|
||||||
i->count, count2)) {
|
i->count, count2);
|
||||||
i->count = count2;
|
i->count = count2;
|
||||||
if (i->inode.bi_sectors == i->count)
|
if (i->inode.bi_sectors == i->count)
|
||||||
continue;
|
continue;
|
||||||
@ -1316,9 +1319,9 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
|
|||||||
count2 = lockrestart_do(trans,
|
count2 = lockrestart_do(trans,
|
||||||
bch2_count_subdirs(trans, w->cur_inum, i->snapshot));
|
bch2_count_subdirs(trans, w->cur_inum, i->snapshot));
|
||||||
|
|
||||||
if (fsck_err_on(i->count != count2, c,
|
if (i->count != count2) {
|
||||||
"directory %llu:%u: fsck counted subdirectories wrong, got %llu should be %llu",
|
bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
|
||||||
w->cur_inum, i->snapshot, i->count, count2)) {
|
i->count, count2);
|
||||||
i->count = count2;
|
i->count = count2;
|
||||||
if (i->inode.bi_nlink == i->count)
|
if (i->inode.bi_nlink == i->count)
|
||||||
continue;
|
continue;
|
||||||
@ -1812,7 +1815,8 @@ static bool path_is_dup(struct pathbuf *p, u64 inum, u32 snapshot)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int path_down(struct pathbuf *p, u64 inum, u32 snapshot)
|
static int path_down(struct bch_fs *c, struct pathbuf *p,
|
||||||
|
u64 inum, u32 snapshot)
|
||||||
{
|
{
|
||||||
if (p->nr == p->size) {
|
if (p->nr == p->size) {
|
||||||
size_t new_size = max_t(size_t, 256UL, p->size * 2);
|
size_t new_size = max_t(size_t, 256UL, p->size * 2);
|
||||||
@ -1820,6 +1824,8 @@ static int path_down(struct pathbuf *p, u64 inum, u32 snapshot)
|
|||||||
new_size * sizeof(p->entries[0]),
|
new_size * sizeof(p->entries[0]),
|
||||||
GFP_KERNEL);
|
GFP_KERNEL);
|
||||||
if (!n) {
|
if (!n) {
|
||||||
|
bch_err(c, "fsck: error allocating memory for pathbuf, size %zu",
|
||||||
|
new_size);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1893,7 +1899,7 @@ static int check_path(struct btree_trans *trans,
|
|||||||
if (!S_ISDIR(inode->bi_mode))
|
if (!S_ISDIR(inode->bi_mode))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
ret = path_down(p, inode->bi_inum, snapshot);
|
ret = path_down(c, p, inode->bi_inum, snapshot);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
bch_err(c, "memory allocation failure");
|
bch_err(c, "memory allocation failure");
|
||||||
return ret;
|
return ret;
|
||||||
@ -1998,12 +2004,15 @@ struct nlink_table {
|
|||||||
} *d;
|
} *d;
|
||||||
};
|
};
|
||||||
|
|
||||||
static int add_nlink(struct nlink_table *t, u64 inum, u32 snapshot)
|
static int add_nlink(struct bch_fs *c, struct nlink_table *t,
|
||||||
|
u64 inum, u32 snapshot)
|
||||||
{
|
{
|
||||||
if (t->nr == t->size) {
|
if (t->nr == t->size) {
|
||||||
size_t new_size = max_t(size_t, 128UL, t->size * 2);
|
size_t new_size = max_t(size_t, 128UL, t->size * 2);
|
||||||
void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL);
|
void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL);
|
||||||
if (!d) {
|
if (!d) {
|
||||||
|
bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
|
||||||
|
new_size);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2093,7 +2102,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
|
|||||||
if (!u.bi_nlink)
|
if (!u.bi_nlink)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
ret = add_nlink(t, k.k->p.offset, k.k->p.snapshot);
|
ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
*end = k.k->p.offset;
|
*end = k.k->p.offset;
|
||||||
ret = 0;
|
ret = 0;
|
||||||
|
|||||||
@ -585,49 +585,62 @@ found_slot:
|
|||||||
static int bch2_inode_delete_keys(struct btree_trans *trans,
|
static int bch2_inode_delete_keys(struct btree_trans *trans,
|
||||||
subvol_inum inum, enum btree_id id)
|
subvol_inum inum, enum btree_id id)
|
||||||
{
|
{
|
||||||
struct btree_iter iter;
|
u64 offset = 0;
|
||||||
struct bkey_s_c k;
|
|
||||||
struct bkey_i delete;
|
|
||||||
u32 snapshot;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
/*
|
while (!ret || ret == -EINTR) {
|
||||||
* We're never going to be deleting extents, no need to use an extent
|
struct disk_reservation disk_res =
|
||||||
* iterator:
|
bch2_disk_reservation_init(trans->c, 0);
|
||||||
*/
|
struct btree_iter iter;
|
||||||
bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
|
struct bkey_s_c k;
|
||||||
BTREE_ITER_NOT_EXTENTS|
|
struct bkey_i delete;
|
||||||
BTREE_ITER_INTENT);
|
u32 snapshot;
|
||||||
|
|
||||||
while (1) {
|
|
||||||
bch2_trans_begin(trans);
|
bch2_trans_begin(trans);
|
||||||
|
|
||||||
ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
|
ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
continue;
|
||||||
|
|
||||||
bch2_btree_iter_set_snapshot(&iter, snapshot);
|
|
||||||
|
|
||||||
|
bch2_trans_iter_init(trans, &iter, id,
|
||||||
|
SPOS(inum.inum, offset, snapshot),
|
||||||
|
BTREE_ITER_INTENT);
|
||||||
k = bch2_btree_iter_peek(&iter);
|
k = bch2_btree_iter_peek(&iter);
|
||||||
|
|
||||||
|
if (!k.k || iter.pos.inode != inum.inum) {
|
||||||
|
bch2_trans_iter_exit(trans, &iter);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
ret = bkey_err(k);
|
ret = bkey_err(k);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
if (!k.k || iter.pos.inode != inum.inum)
|
|
||||||
break;
|
|
||||||
|
|
||||||
bkey_init(&delete.k);
|
bkey_init(&delete.k);
|
||||||
delete.k.p = iter.pos;
|
delete.k.p = iter.pos;
|
||||||
|
|
||||||
|
if (btree_node_type_is_extents(iter.btree_id)) {
|
||||||
|
unsigned max_sectors =
|
||||||
|
min_t(u64, U64_MAX - iter.pos.offset,
|
||||||
|
KEY_SIZE_MAX & (~0 << trans->c->block_bits));
|
||||||
|
|
||||||
|
/* create the biggest key we can */
|
||||||
|
bch2_key_resize(&delete.k, max_sectors);
|
||||||
|
|
||||||
|
ret = bch2_extent_trim_atomic(trans, &iter, &delete);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
ret = bch2_trans_update(trans, &iter, &delete, 0) ?:
|
ret = bch2_trans_update(trans, &iter, &delete, 0) ?:
|
||||||
bch2_trans_commit(trans, NULL, NULL,
|
bch2_trans_commit(trans, &disk_res, NULL,
|
||||||
BTREE_INSERT_NOFAIL);
|
BTREE_INSERT_NOFAIL);
|
||||||
|
bch2_disk_reservation_put(trans->c, &disk_res);
|
||||||
err:
|
err:
|
||||||
if (ret && ret != -EINTR)
|
offset = iter.pos.offset;
|
||||||
break;
|
bch2_trans_iter_exit(trans, &iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
bch2_trans_iter_exit(trans, &iter);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -205,6 +205,11 @@ static int bch2_copygc(struct bch_fs *c)
|
|||||||
up_read(&ca->bucket_lock);
|
up_read(&ca->bucket_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!h->used) {
|
||||||
|
bch_err_ratelimited(c, "copygc requested to run but found no buckets to move!");
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Our btree node allocations also come out of RESERVE_MOVINGGC:
|
* Our btree node allocations also come out of RESERVE_MOVINGGC:
|
||||||
*/
|
*/
|
||||||
|
|||||||
@ -570,7 +570,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
|
|||||||
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
|
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
|
||||||
POS(QTYP_USR, 0),
|
POS(QTYP_USR, 0),
|
||||||
POS(QTYP_USR + 1, 0),
|
POS(QTYP_USR + 1, 0),
|
||||||
NULL);
|
0, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -582,7 +582,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
|
|||||||
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
|
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
|
||||||
POS(QTYP_GRP, 0),
|
POS(QTYP_GRP, 0),
|
||||||
POS(QTYP_GRP + 1, 0),
|
POS(QTYP_GRP + 1, 0),
|
||||||
NULL);
|
0, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -594,7 +594,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
|
|||||||
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
|
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
|
||||||
POS(QTYP_PRJ, 0),
|
POS(QTYP_PRJ, 0),
|
||||||
POS(QTYP_PRJ + 1, 0),
|
POS(QTYP_PRJ + 1, 0),
|
||||||
NULL);
|
0, NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -109,18 +109,27 @@ static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsign
|
|||||||
iter->idx++;
|
iter->idx++;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
|
int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
||||||
unsigned level, struct bkey_i *k)
|
unsigned level, struct bkey_i *k)
|
||||||
{
|
{
|
||||||
struct journal_key n = {
|
struct journal_key n = {
|
||||||
.btree_id = id,
|
.btree_id = id,
|
||||||
.level = level,
|
.level = level,
|
||||||
|
.k = k,
|
||||||
.allocated = true
|
.allocated = true
|
||||||
};
|
};
|
||||||
struct journal_keys *keys = &c->journal_keys;
|
struct journal_keys *keys = &c->journal_keys;
|
||||||
struct journal_iter *iter;
|
struct journal_iter *iter;
|
||||||
unsigned idx = journal_key_search(keys, id, level, k->k.p);
|
unsigned idx = journal_key_search(keys, id, level, k->k.p);
|
||||||
|
|
||||||
|
if (idx < keys->nr &&
|
||||||
|
journal_key_cmp(&n, &keys->d[idx]) == 0) {
|
||||||
|
if (keys->d[idx].allocated)
|
||||||
|
kfree(keys->d[idx].k);
|
||||||
|
keys->d[idx] = n;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
if (keys->nr == keys->size) {
|
if (keys->nr == keys->size) {
|
||||||
struct journal_keys new_keys = {
|
struct journal_keys new_keys = {
|
||||||
.nr = keys->nr,
|
.nr = keys->nr,
|
||||||
@ -140,27 +149,31 @@ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
|
|||||||
*keys = new_keys;
|
*keys = new_keys;
|
||||||
}
|
}
|
||||||
|
|
||||||
n.k = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
|
array_insert_item(keys->d, keys->nr, idx, n);
|
||||||
if (!n.k)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
bkey_copy(n.k, k);
|
list_for_each_entry(iter, &c->journal_iters, list)
|
||||||
|
journal_iter_fix(c, iter, idx);
|
||||||
if (idx < keys->nr &&
|
|
||||||
journal_key_cmp(&n, &keys->d[idx]) == 0) {
|
|
||||||
if (keys->d[idx].allocated)
|
|
||||||
kfree(keys->d[idx].k);
|
|
||||||
keys->d[idx] = n;
|
|
||||||
} else {
|
|
||||||
array_insert_item(keys->d, keys->nr, idx, n);
|
|
||||||
|
|
||||||
list_for_each_entry(iter, &c->journal_iters, list)
|
|
||||||
journal_iter_fix(c, iter, idx);
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
|
||||||
|
unsigned level, struct bkey_i *k)
|
||||||
|
{
|
||||||
|
struct bkey_i *n;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
|
||||||
|
if (!n)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
bkey_copy(n, k);
|
||||||
|
ret = bch2_journal_key_insert_take(c, id, level, n);
|
||||||
|
if (ret)
|
||||||
|
kfree(n);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
|
int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
|
||||||
unsigned level, struct bpos pos)
|
unsigned level, struct bpos pos)
|
||||||
{
|
{
|
||||||
@ -548,8 +561,8 @@ static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
|
|||||||
|
|
||||||
static int journal_sort_seq_cmp(const void *_l, const void *_r)
|
static int journal_sort_seq_cmp(const void *_l, const void *_r)
|
||||||
{
|
{
|
||||||
const struct journal_key *l = _l;
|
const struct journal_key *l = *((const struct journal_key **)_l);
|
||||||
const struct journal_key *r = _r;
|
const struct journal_key *r = *((const struct journal_key **)_r);
|
||||||
|
|
||||||
return cmp_int(r->level, l->level) ?:
|
return cmp_int(r->level, l->level) ?:
|
||||||
cmp_int(l->journal_seq, r->journal_seq) ?:
|
cmp_int(l->journal_seq, r->journal_seq) ?:
|
||||||
@ -557,18 +570,30 @@ static int journal_sort_seq_cmp(const void *_l, const void *_r)
|
|||||||
bpos_cmp(l->k->k.p, r->k->k.p);
|
bpos_cmp(l->k->k.p, r->k->k.p);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int bch2_journal_replay(struct bch_fs *c,
|
static int bch2_journal_replay(struct bch_fs *c)
|
||||||
struct journal_keys keys)
|
|
||||||
{
|
{
|
||||||
|
struct journal_keys *keys = &c->journal_keys;
|
||||||
|
struct journal_key **keys_sorted, *k;
|
||||||
struct journal *j = &c->journal;
|
struct journal *j = &c->journal;
|
||||||
struct journal_key *i;
|
struct bch_dev *ca;
|
||||||
|
unsigned idx;
|
||||||
|
size_t i;
|
||||||
u64 seq;
|
u64 seq;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
|
keys_sorted = kmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
|
||||||
|
if (!keys_sorted)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
if (keys.nr)
|
for (i = 0; i < keys->nr; i++)
|
||||||
replay_now_at(j, keys.journal_seq_base);
|
keys_sorted[i] = &keys->d[i];
|
||||||
|
|
||||||
|
sort(keys_sorted, keys->nr,
|
||||||
|
sizeof(keys_sorted[0]),
|
||||||
|
journal_sort_seq_cmp, NULL);
|
||||||
|
|
||||||
|
if (keys->nr)
|
||||||
|
replay_now_at(j, keys->journal_seq_base);
|
||||||
|
|
||||||
seq = j->replay_journal_seq;
|
seq = j->replay_journal_seq;
|
||||||
|
|
||||||
@ -576,26 +601,35 @@ static int bch2_journal_replay(struct bch_fs *c,
|
|||||||
* First replay updates to the alloc btree - these will only update the
|
* First replay updates to the alloc btree - these will only update the
|
||||||
* btree key cache:
|
* btree key cache:
|
||||||
*/
|
*/
|
||||||
for_each_journal_key(keys, i) {
|
for (i = 0; i < keys->nr; i++) {
|
||||||
|
k = keys_sorted[i];
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
if (!i->level && i->btree_id == BTREE_ID_alloc) {
|
if (!k->level && k->btree_id == BTREE_ID_alloc) {
|
||||||
j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
|
j->replay_journal_seq = keys->journal_seq_base + k->journal_seq;
|
||||||
ret = bch2_journal_replay_key(c, i);
|
ret = bch2_journal_replay_key(c, k);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Now we can start the allocator threads: */
|
||||||
|
set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
|
||||||
|
for_each_member_device(ca, c, idx)
|
||||||
|
bch2_wake_allocator(ca);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Next replay updates to interior btree nodes:
|
* Next replay updates to interior btree nodes:
|
||||||
*/
|
*/
|
||||||
for_each_journal_key(keys, i) {
|
for (i = 0; i < keys->nr; i++) {
|
||||||
|
k = keys_sorted[i];
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
if (i->level) {
|
if (k->level) {
|
||||||
j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
|
j->replay_journal_seq = keys->journal_seq_base + k->journal_seq;
|
||||||
ret = bch2_journal_replay_key(c, i);
|
ret = bch2_journal_replay_key(c, k);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -615,15 +649,17 @@ static int bch2_journal_replay(struct bch_fs *c,
|
|||||||
/*
|
/*
|
||||||
* Now replay leaf node updates:
|
* Now replay leaf node updates:
|
||||||
*/
|
*/
|
||||||
for_each_journal_key(keys, i) {
|
for (i = 0; i < keys->nr; i++) {
|
||||||
|
k = keys_sorted[i];
|
||||||
|
|
||||||
cond_resched();
|
cond_resched();
|
||||||
|
|
||||||
if (i->level || i->btree_id == BTREE_ID_alloc)
|
if (k->level || k->btree_id == BTREE_ID_alloc)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
replay_now_at(j, keys.journal_seq_base + i->journal_seq);
|
replay_now_at(j, keys->journal_seq_base + k->journal_seq);
|
||||||
|
|
||||||
ret = bch2_journal_replay_key(c, i);
|
ret = bch2_journal_replay_key(c, k);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
@ -633,10 +669,14 @@ static int bch2_journal_replay(struct bch_fs *c,
|
|||||||
|
|
||||||
bch2_journal_set_replay_done(j);
|
bch2_journal_set_replay_done(j);
|
||||||
bch2_journal_flush_all_pins(j);
|
bch2_journal_flush_all_pins(j);
|
||||||
|
kfree(keys_sorted);
|
||||||
|
|
||||||
return bch2_journal_error(j);
|
return bch2_journal_error(j);
|
||||||
err:
|
err:
|
||||||
bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
|
bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
|
||||||
ret, bch2_btree_ids[i->btree_id], i->level);
|
ret, bch2_btree_ids[k->btree_id], k->level);
|
||||||
|
kfree(keys_sorted);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1208,7 +1248,7 @@ use_clean:
|
|||||||
|
|
||||||
bch_verbose(c, "starting journal replay");
|
bch_verbose(c, "starting journal replay");
|
||||||
err = "journal replay failed";
|
err = "journal replay failed";
|
||||||
ret = bch2_journal_replay(c, c->journal_keys);
|
ret = bch2_journal_replay(c);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
bch_verbose(c, "journal replay done");
|
bch_verbose(c, "journal replay done");
|
||||||
@ -1380,6 +1420,7 @@ int bch2_fs_initialize(struct bch_fs *c)
|
|||||||
for (i = 0; i < BTREE_ID_NR; i++)
|
for (i = 0; i < BTREE_ID_NR; i++)
|
||||||
bch2_btree_root_alloc(c, i);
|
bch2_btree_root_alloc(c, i);
|
||||||
|
|
||||||
|
set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
|
||||||
set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
|
set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
|
||||||
set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
|
set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
|
||||||
|
|
||||||
|
|||||||
@ -31,6 +31,8 @@ struct btree_and_journal_iter {
|
|||||||
} last;
|
} last;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
int bch2_journal_key_insert_take(struct bch_fs *, enum btree_id,
|
||||||
|
unsigned, struct bkey_i *);
|
||||||
int bch2_journal_key_insert(struct bch_fs *, enum btree_id,
|
int bch2_journal_key_insert(struct bch_fs *, enum btree_id,
|
||||||
unsigned, struct bkey_i *);
|
unsigned, struct bkey_i *);
|
||||||
int bch2_journal_key_delete(struct bch_fs *, enum btree_id,
|
int bch2_journal_key_delete(struct bch_fs *, enum btree_id,
|
||||||
|
|||||||
@ -1478,7 +1478,7 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
|
|||||||
return bch2_btree_delete_range(c, BTREE_ID_alloc,
|
return bch2_btree_delete_range(c, BTREE_ID_alloc,
|
||||||
POS(ca->dev_idx, 0),
|
POS(ca->dev_idx, 0),
|
||||||
POS(ca->dev_idx + 1, 0),
|
POS(ca->dev_idx + 1, 0),
|
||||||
NULL);
|
0, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
|
int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
|
||||||
@ -1599,18 +1599,24 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = bch2_read_super(path, &opts, &sb);
|
ret = bch2_read_super(path, &opts, &sb);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
bch_err(c, "device add error: error reading super: %i", ret);
|
||||||
return ret;
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
err = bch2_sb_validate(&sb);
|
err = bch2_sb_validate(&sb);
|
||||||
if (err)
|
if (err) {
|
||||||
|
bch_err(c, "device add error: error validating super: %s", err);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
dev_mi = bch2_sb_get_members(sb.sb)->members[sb.sb->dev_idx];
|
dev_mi = bch2_sb_get_members(sb.sb)->members[sb.sb->dev_idx];
|
||||||
|
|
||||||
err = bch2_dev_may_add(sb.sb, c);
|
err = bch2_dev_may_add(sb.sb, c);
|
||||||
if (err)
|
if (err) {
|
||||||
|
bch_err(c, "device add error: %s", err);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
ca = __bch2_dev_alloc(c, &dev_mi);
|
ca = __bch2_dev_alloc(c, &dev_mi);
|
||||||
if (!ca) {
|
if (!ca) {
|
||||||
@ -1624,24 +1630,27 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
err = "journal alloc failed";
|
|
||||||
ret = bch2_dev_journal_alloc(ca);
|
ret = bch2_dev_journal_alloc(ca);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
bch_err(c, "device add error: journal alloc failed");
|
||||||
goto err;
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
down_write(&c->state_lock);
|
down_write(&c->state_lock);
|
||||||
mutex_lock(&c->sb_lock);
|
mutex_lock(&c->sb_lock);
|
||||||
|
|
||||||
err = "insufficient space in new superblock";
|
|
||||||
ret = bch2_sb_from_fs(c, ca);
|
ret = bch2_sb_from_fs(c, ca);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
bch_err(c, "device add error: new device superblock too small");
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
mi = bch2_sb_get_members(ca->disk_sb.sb);
|
mi = bch2_sb_get_members(ca->disk_sb.sb);
|
||||||
|
|
||||||
if (!bch2_sb_resize_members(&ca->disk_sb,
|
if (!bch2_sb_resize_members(&ca->disk_sb,
|
||||||
le32_to_cpu(mi->field.u64s) +
|
le32_to_cpu(mi->field.u64s) +
|
||||||
sizeof(dev_mi) / sizeof(u64))) {
|
sizeof(dev_mi) / sizeof(u64))) {
|
||||||
|
bch_err(c, "device add error: new device superblock too small");
|
||||||
ret = -ENOSPC;
|
ret = -ENOSPC;
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
}
|
}
|
||||||
@ -1654,7 +1663,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
|||||||
if (!bch2_dev_exists(c->disk_sb.sb, mi, dev_idx))
|
if (!bch2_dev_exists(c->disk_sb.sb, mi, dev_idx))
|
||||||
goto have_slot;
|
goto have_slot;
|
||||||
no_slot:
|
no_slot:
|
||||||
err = "no slots available in superblock";
|
bch_err(c, "device add error: already have maximum number of devices");
|
||||||
ret = -ENOSPC;
|
ret = -ENOSPC;
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
|
|
||||||
@ -1663,12 +1672,12 @@ have_slot:
|
|||||||
u64s = (sizeof(struct bch_sb_field_members) +
|
u64s = (sizeof(struct bch_sb_field_members) +
|
||||||
sizeof(struct bch_member) * nr_devices) / sizeof(u64);
|
sizeof(struct bch_member) * nr_devices) / sizeof(u64);
|
||||||
|
|
||||||
err = "no space in superblock for member info";
|
|
||||||
ret = -ENOSPC;
|
|
||||||
|
|
||||||
mi = bch2_sb_resize_members(&c->disk_sb, u64s);
|
mi = bch2_sb_resize_members(&c->disk_sb, u64s);
|
||||||
if (!mi)
|
if (!mi) {
|
||||||
|
bch_err(c, "device add error: no room in superblock for member info");
|
||||||
|
ret = -ENOSPC;
|
||||||
goto err_unlock;
|
goto err_unlock;
|
||||||
|
}
|
||||||
|
|
||||||
/* success: */
|
/* success: */
|
||||||
|
|
||||||
@ -1684,17 +1693,20 @@ have_slot:
|
|||||||
|
|
||||||
bch2_dev_usage_journal_reserve(c);
|
bch2_dev_usage_journal_reserve(c);
|
||||||
|
|
||||||
err = "error marking superblock";
|
|
||||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
bch_err(c, "device add error: error marking new superblock: %i", ret);
|
||||||
goto err_late;
|
goto err_late;
|
||||||
|
}
|
||||||
|
|
||||||
ca->new_fs_bucket_idx = 0;
|
ca->new_fs_bucket_idx = 0;
|
||||||
|
|
||||||
if (ca->mi.state == BCH_MEMBER_STATE_rw) {
|
if (ca->mi.state == BCH_MEMBER_STATE_rw) {
|
||||||
ret = __bch2_dev_read_write(c, ca);
|
ret = __bch2_dev_read_write(c, ca);
|
||||||
if (ret)
|
if (ret) {
|
||||||
|
bch_err(c, "device add error: error going RW on new device: %i", ret);
|
||||||
goto err_late;
|
goto err_late;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
up_write(&c->state_lock);
|
up_write(&c->state_lock);
|
||||||
@ -1707,11 +1719,9 @@ err:
|
|||||||
if (ca)
|
if (ca)
|
||||||
bch2_dev_free(ca);
|
bch2_dev_free(ca);
|
||||||
bch2_free_super(&sb);
|
bch2_free_super(&sb);
|
||||||
bch_err(c, "Unable to add device: %s", err);
|
|
||||||
return ret;
|
return ret;
|
||||||
err_late:
|
err_late:
|
||||||
up_write(&c->state_lock);
|
up_write(&c->state_lock);
|
||||||
bch_err(c, "Error going rw after adding device: %s", err);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -14,14 +14,14 @@ static void delete_test_keys(struct bch_fs *c)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = bch2_btree_delete_range(c, BTREE_ID_extents,
|
ret = bch2_btree_delete_range(c, BTREE_ID_extents,
|
||||||
SPOS(0, 0, U32_MAX),
|
POS_MIN, SPOS_MAX,
|
||||||
SPOS(0, U64_MAX, U32_MAX),
|
BTREE_ITER_ALL_SNAPSHOTS,
|
||||||
NULL);
|
NULL);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
|
|
||||||
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
|
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
|
||||||
SPOS(0, 0, U32_MAX),
|
POS_MIN, SPOS_MAX,
|
||||||
SPOS(0, U64_MAX, U32_MAX),
|
BTREE_ITER_ALL_SNAPSHOTS,
|
||||||
NULL);
|
NULL);
|
||||||
BUG_ON(ret);
|
BUG_ON(ret);
|
||||||
}
|
}
|
||||||
@ -146,7 +146,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
|
|||||||
i = 0;
|
i = 0;
|
||||||
|
|
||||||
for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
|
for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
|
||||||
POS_MIN, 0, k, ret) {
|
SPOS(0, 0, U32_MAX), 0, k, ret) {
|
||||||
if (k.k->p.inode)
|
if (k.k->p.inode)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -202,7 +202,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
|
|||||||
i = 0;
|
i = 0;
|
||||||
|
|
||||||
for_each_btree_key(&trans, iter, BTREE_ID_extents,
|
for_each_btree_key(&trans, iter, BTREE_ID_extents,
|
||||||
POS_MIN, 0, k, ret) {
|
SPOS(0, 0, U32_MAX), 0, k, ret) {
|
||||||
BUG_ON(bkey_start_offset(k.k) != i);
|
BUG_ON(bkey_start_offset(k.k) != i);
|
||||||
i = k.k->p.offset;
|
i = k.k->p.offset;
|
||||||
}
|
}
|
||||||
@ -256,8 +256,8 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
|||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
|
|
||||||
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
|
for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
|
||||||
0, k, ret) {
|
SPOS(0, 0, U32_MAX), 0, k, ret) {
|
||||||
if (k.k->p.inode)
|
if (k.k->p.inode)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -272,7 +272,8 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
|||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
|
|
||||||
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
|
for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
|
||||||
|
SPOS(0, 0, U32_MAX),
|
||||||
BTREE_ITER_SLOTS, k, ret) {
|
BTREE_ITER_SLOTS, k, ret) {
|
||||||
BUG_ON(k.k->p.offset != i);
|
BUG_ON(k.k->p.offset != i);
|
||||||
BUG_ON(bkey_deleted(k.k) != (i & 1));
|
BUG_ON(bkey_deleted(k.k) != (i & 1));
|
||||||
@ -321,8 +322,8 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
|||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
|
|
||||||
for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
|
for_each_btree_key(&trans, iter, BTREE_ID_extents,
|
||||||
0, k, ret) {
|
SPOS(0, 0, U32_MAX), 0, k, ret) {
|
||||||
BUG_ON(bkey_start_offset(k.k) != i + 8);
|
BUG_ON(bkey_start_offset(k.k) != i + 8);
|
||||||
BUG_ON(k.k->size != 8);
|
BUG_ON(k.k->size != 8);
|
||||||
i += 16;
|
i += 16;
|
||||||
@ -335,7 +336,8 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
|||||||
|
|
||||||
i = 0;
|
i = 0;
|
||||||
|
|
||||||
for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
|
for_each_btree_key(&trans, iter, BTREE_ID_extents,
|
||||||
|
SPOS(0, 0, U32_MAX),
|
||||||
BTREE_ITER_SLOTS, k, ret) {
|
BTREE_ITER_SLOTS, k, ret) {
|
||||||
BUG_ON(bkey_deleted(k.k) != !(i % 16));
|
BUG_ON(bkey_deleted(k.k) != !(i % 16));
|
||||||
|
|
||||||
@ -363,7 +365,8 @@ static int test_peek_end(struct bch_fs *c, u64 nr)
|
|||||||
struct bkey_s_c k;
|
struct bkey_s_c k;
|
||||||
|
|
||||||
bch2_trans_init(&trans, c, 0, 0);
|
bch2_trans_init(&trans, c, 0, 0);
|
||||||
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, POS_MIN, 0);
|
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
|
||||||
|
SPOS(0, 0, U32_MAX), 0);
|
||||||
|
|
||||||
k = bch2_btree_iter_peek(&iter);
|
k = bch2_btree_iter_peek(&iter);
|
||||||
BUG_ON(k.k);
|
BUG_ON(k.k);
|
||||||
@ -383,7 +386,8 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
|
|||||||
struct bkey_s_c k;
|
struct bkey_s_c k;
|
||||||
|
|
||||||
bch2_trans_init(&trans, c, 0, 0);
|
bch2_trans_init(&trans, c, 0, 0);
|
||||||
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, POS_MIN, 0);
|
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
|
||||||
|
SPOS(0, 0, U32_MAX), 0);
|
||||||
|
|
||||||
k = bch2_btree_iter_peek(&iter);
|
k = bch2_btree_iter_peek(&iter);
|
||||||
BUG_ON(k.k);
|
BUG_ON(k.k);
|
||||||
@ -406,8 +410,6 @@ static int insert_test_extent(struct bch_fs *c,
|
|||||||
struct bkey_i_cookie k;
|
struct bkey_i_cookie k;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
//pr_info("inserting %llu-%llu v %llu", start, end, test_version);
|
|
||||||
|
|
||||||
bkey_cookie_init(&k.k_i);
|
bkey_cookie_init(&k.k_i);
|
||||||
k.k_i.k.p.offset = end;
|
k.k_i.k.p.offset = end;
|
||||||
k.k_i.k.p.snapshot = U32_MAX;
|
k.k_i.k.p.snapshot = U32_MAX;
|
||||||
@ -747,7 +749,9 @@ static int seq_delete(struct bch_fs *c, u64 nr)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
|
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
|
||||||
SPOS(0, 0, U32_MAX), POS_MAX, NULL);
|
POS_MIN, SPOS_MAX,
|
||||||
|
BTREE_ITER_ALL_SNAPSHOTS,
|
||||||
|
NULL);
|
||||||
if (ret)
|
if (ret)
|
||||||
bch_err(c, "error in seq_delete: %i", ret);
|
bch_err(c, "error in seq_delete: %i", ret);
|
||||||
return ret;
|
return ret;
|
||||||
|
|||||||
@ -114,7 +114,7 @@ void bch2_hprint(struct printbuf *buf, s64 v)
|
|||||||
* 103 is magic: t is in the range [-1023, 1023] and we want
|
* 103 is magic: t is in the range [-1023, 1023] and we want
|
||||||
* to turn it into [-9, 9]
|
* to turn it into [-9, 9]
|
||||||
*/
|
*/
|
||||||
if (u && v < 100 && v > -100)
|
if (u && t && v < 100 && v > -100)
|
||||||
pr_buf(buf, ".%i", t / 103);
|
pr_buf(buf, ".%i", t / 103);
|
||||||
if (u)
|
if (u)
|
||||||
pr_buf(buf, "%c", si_units[u]);
|
pr_buf(buf, "%c", si_units[u]);
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user