mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-04-02 00:00:04 +03:00
Update bcachefs sources to 916d92b6b4 bcachefs: Add error messages for memory allocation failures
This commit is contained in:
parent
617dc6dd68
commit
12fe5797ad
@ -1 +1 @@
|
||||
078a1a596a74ade60db6eee0f0be927defb7abed
|
||||
916d92b6b46b13873118a608ff16212f966375ba
|
||||
|
@ -859,7 +859,8 @@ static void discard_one_bucket(struct bch_fs *c, struct bch_dev *ca, u64 b)
|
||||
static bool allocator_thread_running(struct bch_dev *ca)
|
||||
{
|
||||
unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
|
||||
test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
|
||||
test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags) &&
|
||||
test_bit(BCH_FS_ALLOC_REPLAY_DONE, &ca->fs->flags)
|
||||
? ALLOCATOR_running
|
||||
: ALLOCATOR_stopped;
|
||||
alloc_thread_set_state(ca, state);
|
||||
|
@ -510,6 +510,7 @@ enum {
|
||||
BCH_FS_INITIAL_GC_DONE,
|
||||
BCH_FS_INITIAL_GC_UNFIXED,
|
||||
BCH_FS_TOPOLOGY_REPAIR_DONE,
|
||||
BCH_FS_ALLOC_REPLAY_DONE,
|
||||
BCH_FS_BTREE_INTERIOR_REPLAY_DONE,
|
||||
BCH_FS_FSCK_DONE,
|
||||
BCH_FS_STARTED,
|
||||
|
@ -83,6 +83,8 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
|
||||
b->aux_data = mmap(NULL, btree_aux_data_bytes(b),
|
||||
PROT_READ|PROT_WRITE|PROT_EXEC,
|
||||
MAP_PRIVATE|MAP_ANONYMOUS, 0, 0);
|
||||
if (b->aux_data == MAP_FAILED)
|
||||
b->aux_data = NULL;
|
||||
#endif
|
||||
if (!b->aux_data) {
|
||||
kvpfree(b->data, btree_bytes(c));
|
||||
|
@ -169,11 +169,11 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
|
||||
new->v.min_key = new_min;
|
||||
SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
|
||||
|
||||
ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
|
||||
kfree(new);
|
||||
|
||||
if (ret)
|
||||
ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
|
||||
if (ret) {
|
||||
kfree(new);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bch2_btree_node_drop_keys_outside_node(b);
|
||||
|
||||
@ -198,11 +198,11 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
|
||||
new->k.p = new_max;
|
||||
SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
|
||||
|
||||
ret = bch2_journal_key_insert(c, b->c.btree_id, b->c.level + 1, &new->k_i);
|
||||
kfree(new);
|
||||
|
||||
if (ret)
|
||||
ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
|
||||
if (ret) {
|
||||
kfree(new);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bch2_btree_node_drop_keys_outside_node(b);
|
||||
|
||||
@ -690,10 +690,10 @@ found:
|
||||
}
|
||||
}
|
||||
|
||||
ret = bch2_journal_key_insert(c, btree_id, level, new);
|
||||
kfree(new);
|
||||
|
||||
if (!ret)
|
||||
ret = bch2_journal_key_insert_take(c, btree_id, level, new);
|
||||
if (ret)
|
||||
kfree(new);
|
||||
else
|
||||
*k = bkey_i_to_s_c(new);
|
||||
}
|
||||
fsck_err:
|
||||
|
@ -2437,15 +2437,18 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
if (bkey_cmp(iter->pos, next) < 0) {
|
||||
bkey_init(&iter->k);
|
||||
iter->k.p = iter->pos;
|
||||
bch2_key_resize(&iter->k,
|
||||
min_t(u64, KEY_SIZE_MAX,
|
||||
(next.inode == iter->pos.inode
|
||||
? next.offset
|
||||
: KEY_OFFSET_MAX) -
|
||||
iter->pos.offset));
|
||||
|
||||
if (iter->flags & BTREE_ITER_IS_EXTENTS) {
|
||||
bch2_key_resize(&iter->k,
|
||||
min_t(u64, KEY_SIZE_MAX,
|
||||
(next.inode == iter->pos.inode
|
||||
? next.offset
|
||||
: KEY_OFFSET_MAX) -
|
||||
iter->pos.offset));
|
||||
EBUG_ON(!iter->k.size);
|
||||
}
|
||||
|
||||
k = (struct bkey_s_c) { &iter->k, NULL };
|
||||
EBUG_ON(!k.k->size);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -146,19 +146,23 @@ bkey_cached_reuse(struct btree_key_cache *c)
|
||||
}
|
||||
|
||||
static struct bkey_cached *
|
||||
btree_key_cache_create(struct btree_key_cache *c,
|
||||
btree_key_cache_create(struct bch_fs *c,
|
||||
enum btree_id btree_id,
|
||||
struct bpos pos)
|
||||
{
|
||||
struct btree_key_cache *bc = &c->btree_key_cache;
|
||||
struct bkey_cached *ck;
|
||||
bool was_new = true;
|
||||
|
||||
ck = bkey_cached_alloc(c);
|
||||
ck = bkey_cached_alloc(bc);
|
||||
|
||||
if (unlikely(!ck)) {
|
||||
ck = bkey_cached_reuse(c);
|
||||
if (unlikely(!ck))
|
||||
ck = bkey_cached_reuse(bc);
|
||||
if (unlikely(!ck)) {
|
||||
bch_err(c, "error allocating memory for key cache item, btree %s",
|
||||
bch2_btree_ids[btree_id]);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
was_new = false;
|
||||
}
|
||||
@ -175,7 +179,7 @@ btree_key_cache_create(struct btree_key_cache *c,
|
||||
ck->valid = false;
|
||||
ck->flags = 1U << BKEY_CACHED_ACCESSED;
|
||||
|
||||
if (unlikely(rhashtable_lookup_insert_fast(&c->table,
|
||||
if (unlikely(rhashtable_lookup_insert_fast(&bc->table,
|
||||
&ck->hash,
|
||||
bch2_btree_key_cache_params))) {
|
||||
/* We raced with another fill: */
|
||||
@ -185,15 +189,15 @@ btree_key_cache_create(struct btree_key_cache *c,
|
||||
six_unlock_intent(&ck->c.lock);
|
||||
kfree(ck);
|
||||
} else {
|
||||
mutex_lock(&c->lock);
|
||||
bkey_cached_free(c, ck);
|
||||
mutex_unlock(&c->lock);
|
||||
mutex_lock(&bc->lock);
|
||||
bkey_cached_free(bc, ck);
|
||||
mutex_unlock(&bc->lock);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
atomic_long_inc(&c->nr_keys);
|
||||
atomic_long_inc(&bc->nr_keys);
|
||||
|
||||
six_unlock_write(&ck->c.lock);
|
||||
|
||||
@ -204,6 +208,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
||||
struct btree_path *ck_path,
|
||||
struct bkey_cached *ck)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
unsigned new_u64s = 0;
|
||||
@ -233,6 +238,8 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
||||
new_u64s = roundup_pow_of_two(new_u64s);
|
||||
new_k = kmalloc(new_u64s * sizeof(u64), GFP_NOFS);
|
||||
if (!new_k) {
|
||||
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
|
||||
bch2_btree_ids[ck->key.btree_id], new_u64s);
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
@ -293,8 +300,7 @@ retry:
|
||||
return 0;
|
||||
}
|
||||
|
||||
ck = btree_key_cache_create(&c->btree_key_cache,
|
||||
path->btree_id, path->pos);
|
||||
ck = btree_key_cache_create(c, path->btree_id, path->pos);
|
||||
ret = PTR_ERR_OR_ZERO(ck);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -63,7 +63,7 @@ int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
|
||||
int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
|
||||
struct bpos, struct bpos, unsigned, u64 *);
|
||||
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
|
||||
struct bpos, struct bpos, u64 *);
|
||||
struct bpos, struct bpos, unsigned, u64 *);
|
||||
|
||||
int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
|
||||
struct btree *, unsigned);
|
||||
|
@ -82,12 +82,12 @@ struct btree_update {
|
||||
/* Nodes being freed: */
|
||||
struct keylist old_keys;
|
||||
u64 _old_keys[BTREE_UPDATE_NODES_MAX *
|
||||
BKEY_BTREE_PTR_VAL_U64s_MAX];
|
||||
BKEY_BTREE_PTR_U64s_MAX];
|
||||
|
||||
/* Nodes being added: */
|
||||
struct keylist new_keys;
|
||||
u64 _new_keys[BTREE_UPDATE_NODES_MAX *
|
||||
BKEY_BTREE_PTR_VAL_U64s_MAX];
|
||||
BKEY_BTREE_PTR_U64s_MAX];
|
||||
|
||||
/* New nodes, that will be made reachable by this update: */
|
||||
struct btree *new_nodes[BTREE_UPDATE_NODES_MAX];
|
||||
|
@ -308,6 +308,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
unsigned u64s)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_cached *ck = (void *) path->l[0].b;
|
||||
unsigned new_u64s;
|
||||
struct bkey_i *new_k;
|
||||
@ -315,7 +316,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
EBUG_ON(path->level);
|
||||
|
||||
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
|
||||
bch2_btree_key_cache_must_wait(trans->c) &&
|
||||
bch2_btree_key_cache_must_wait(c) &&
|
||||
!(trans->flags & BTREE_INSERT_JOURNAL_RECLAIM))
|
||||
return BTREE_INSERT_NEED_JOURNAL_RECLAIM;
|
||||
|
||||
@ -330,8 +331,11 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
|
||||
new_u64s = roundup_pow_of_two(u64s);
|
||||
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
|
||||
if (!new_k)
|
||||
if (!new_k) {
|
||||
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
|
||||
bch2_btree_ids[path->btree_id], new_u64s);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ck->u64s = new_u64s;
|
||||
ck->k = new_k;
|
||||
@ -1463,7 +1467,7 @@ retry:
|
||||
*/
|
||||
delete.k.p = iter.pos;
|
||||
|
||||
if (btree_node_type_is_extents(id)) {
|
||||
if (iter.flags & BTREE_ITER_IS_EXTENTS) {
|
||||
unsigned max_sectors =
|
||||
KEY_SIZE_MAX & (~0 << trans->c->block_bits);
|
||||
|
||||
@ -1500,8 +1504,10 @@ retry:
|
||||
*/
|
||||
int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
|
||||
struct bpos start, struct bpos end,
|
||||
unsigned iter_flags,
|
||||
u64 *journal_seq)
|
||||
{
|
||||
return bch2_trans_do(c, NULL, journal_seq, 0,
|
||||
bch2_btree_delete_range_trans(&trans, id, start, end, 0, journal_seq));
|
||||
bch2_btree_delete_range_trans(&trans, id, start, end,
|
||||
iter_flags, journal_seq));
|
||||
}
|
||||
|
@ -940,9 +940,11 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||
|
||||
m = genradix_ptr_alloc(&c->gc_stripes, p.idx, GFP_KERNEL);
|
||||
|
||||
if (!m)
|
||||
if (!m) {
|
||||
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
|
||||
(u64) p.idx);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock(&c->ec_stripes_heap_lock);
|
||||
|
||||
@ -1053,7 +1055,7 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
||||
bool gc = flags & BTREE_TRIGGER_GC;
|
||||
u64 journal_seq = trans->journal_res.seq;
|
||||
struct bch_fs *c = trans->c;
|
||||
size_t idx = new.k->p.offset;
|
||||
u64 idx = new.k->p.offset;
|
||||
const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
|
||||
? bkey_s_c_to_stripe(old).v : NULL;
|
||||
const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
|
||||
@ -1071,7 +1073,7 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
||||
|
||||
bch2_bkey_val_to_text(&PBUF(buf1), c, old);
|
||||
bch2_bkey_val_to_text(&PBUF(buf2), c, new);
|
||||
bch_err_ratelimited(c, "error marking nonexistent stripe %zu while marking\n"
|
||||
bch_err_ratelimited(c, "error marking nonexistent stripe %llu while marking\n"
|
||||
"old %s\n"
|
||||
"new %s", idx, buf1, buf2);
|
||||
bch2_inconsistent_error(c);
|
||||
@ -1103,9 +1105,11 @@ static int bch2_mark_stripe(struct btree_trans *trans,
|
||||
struct gc_stripe *m =
|
||||
genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
|
||||
|
||||
if (!m)
|
||||
if (!m) {
|
||||
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
|
||||
idx);
|
||||
return -ENOMEM;
|
||||
|
||||
}
|
||||
/*
|
||||
* This will be wrong when we bring back runtime gc: we should
|
||||
* be unmarking the old key and then marking the new key
|
||||
|
@ -677,7 +677,7 @@ static int ec_stripe_delete(struct bch_fs *c, size_t idx)
|
||||
return bch2_btree_delete_range(c, BTREE_ID_stripes,
|
||||
POS(0, idx),
|
||||
POS(0, idx + 1),
|
||||
NULL);
|
||||
0, NULL);
|
||||
}
|
||||
|
||||
static void ec_stripe_delete_work(struct work_struct *work)
|
||||
|
@ -564,14 +564,17 @@ static struct inode_walker inode_walker_init(void)
|
||||
return (struct inode_walker) { 0, };
|
||||
}
|
||||
|
||||
static int inode_walker_realloc(struct inode_walker *w)
|
||||
static int inode_walker_realloc(struct bch_fs *c, struct inode_walker *w)
|
||||
{
|
||||
if (w->nr == w->size) {
|
||||
size_t new_size = max_t(size_t, 8UL, w->size * 2);
|
||||
void *d = krealloc(w->d, new_size * sizeof(w->d[0]),
|
||||
GFP_KERNEL);
|
||||
if (!d)
|
||||
if (!d) {
|
||||
bch_err(c, "fsck: error allocating memory for inode_walker, size %zu",
|
||||
new_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
w->d = d;
|
||||
w->size = new_size;
|
||||
@ -586,7 +589,7 @@ static int add_inode(struct bch_fs *c, struct inode_walker *w,
|
||||
struct bch_inode_unpacked u;
|
||||
int ret;
|
||||
|
||||
ret = inode_walker_realloc(w);
|
||||
ret = inode_walker_realloc(c, w);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -647,7 +650,7 @@ found:
|
||||
while (i && w->d[i - 1].snapshot > pos.snapshot)
|
||||
--i;
|
||||
|
||||
ret = inode_walker_realloc(w);
|
||||
ret = inode_walker_realloc(c, w);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1132,9 +1135,9 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
|
||||
count2 = lockrestart_do(trans,
|
||||
bch2_count_inode_sectors(trans, w->cur_inum, i->snapshot));
|
||||
|
||||
if (fsck_err_on(i->count != count2, c,
|
||||
"fsck counted i_sectors wrong: got %llu should be %llu",
|
||||
i->count, count2)) {
|
||||
if (i->count != count2) {
|
||||
bch_err(c, "fsck counted i_sectors wrong: got %llu should be %llu",
|
||||
i->count, count2);
|
||||
i->count = count2;
|
||||
if (i->inode.bi_sectors == i->count)
|
||||
continue;
|
||||
@ -1316,9 +1319,9 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
|
||||
count2 = lockrestart_do(trans,
|
||||
bch2_count_subdirs(trans, w->cur_inum, i->snapshot));
|
||||
|
||||
if (fsck_err_on(i->count != count2, c,
|
||||
"directory %llu:%u: fsck counted subdirectories wrong, got %llu should be %llu",
|
||||
w->cur_inum, i->snapshot, i->count, count2)) {
|
||||
if (i->count != count2) {
|
||||
bch_err(c, "fsck counted subdirectories wrong: got %llu should be %llu",
|
||||
i->count, count2);
|
||||
i->count = count2;
|
||||
if (i->inode.bi_nlink == i->count)
|
||||
continue;
|
||||
@ -1812,7 +1815,8 @@ static bool path_is_dup(struct pathbuf *p, u64 inum, u32 snapshot)
|
||||
return false;
|
||||
}
|
||||
|
||||
static int path_down(struct pathbuf *p, u64 inum, u32 snapshot)
|
||||
static int path_down(struct bch_fs *c, struct pathbuf *p,
|
||||
u64 inum, u32 snapshot)
|
||||
{
|
||||
if (p->nr == p->size) {
|
||||
size_t new_size = max_t(size_t, 256UL, p->size * 2);
|
||||
@ -1820,6 +1824,8 @@ static int path_down(struct pathbuf *p, u64 inum, u32 snapshot)
|
||||
new_size * sizeof(p->entries[0]),
|
||||
GFP_KERNEL);
|
||||
if (!n) {
|
||||
bch_err(c, "fsck: error allocating memory for pathbuf, size %zu",
|
||||
new_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -1893,7 +1899,7 @@ static int check_path(struct btree_trans *trans,
|
||||
if (!S_ISDIR(inode->bi_mode))
|
||||
break;
|
||||
|
||||
ret = path_down(p, inode->bi_inum, snapshot);
|
||||
ret = path_down(c, p, inode->bi_inum, snapshot);
|
||||
if (ret) {
|
||||
bch_err(c, "memory allocation failure");
|
||||
return ret;
|
||||
@ -1998,12 +2004,15 @@ struct nlink_table {
|
||||
} *d;
|
||||
};
|
||||
|
||||
static int add_nlink(struct nlink_table *t, u64 inum, u32 snapshot)
|
||||
static int add_nlink(struct bch_fs *c, struct nlink_table *t,
|
||||
u64 inum, u32 snapshot)
|
||||
{
|
||||
if (t->nr == t->size) {
|
||||
size_t new_size = max_t(size_t, 128UL, t->size * 2);
|
||||
void *d = kvmalloc(new_size * sizeof(t->d[0]), GFP_KERNEL);
|
||||
if (!d) {
|
||||
bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
|
||||
new_size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -2093,7 +2102,7 @@ static int check_nlinks_find_hardlinks(struct bch_fs *c,
|
||||
if (!u.bi_nlink)
|
||||
continue;
|
||||
|
||||
ret = add_nlink(t, k.k->p.offset, k.k->p.snapshot);
|
||||
ret = add_nlink(c, t, k.k->p.offset, k.k->p.snapshot);
|
||||
if (ret) {
|
||||
*end = k.k->p.offset;
|
||||
ret = 0;
|
||||
|
@ -585,49 +585,62 @@ found_slot:
|
||||
static int bch2_inode_delete_keys(struct btree_trans *trans,
|
||||
subvol_inum inum, enum btree_id id)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i delete;
|
||||
u32 snapshot;
|
||||
u64 offset = 0;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* We're never going to be deleting extents, no need to use an extent
|
||||
* iterator:
|
||||
*/
|
||||
bch2_trans_iter_init(trans, &iter, id, POS(inum.inum, 0),
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
while (!ret || ret == -EINTR) {
|
||||
struct disk_reservation disk_res =
|
||||
bch2_disk_reservation_init(trans->c, 0);
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i delete;
|
||||
u32 snapshot;
|
||||
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_btree_iter_set_snapshot(&iter, snapshot);
|
||||
continue;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, id,
|
||||
SPOS(inum.inum, offset, snapshot),
|
||||
BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
|
||||
if (!k.k || iter.pos.inode != inum.inum) {
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
break;
|
||||
}
|
||||
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!k.k || iter.pos.inode != inum.inum)
|
||||
break;
|
||||
|
||||
bkey_init(&delete.k);
|
||||
delete.k.p = iter.pos;
|
||||
|
||||
if (btree_node_type_is_extents(iter.btree_id)) {
|
||||
unsigned max_sectors =
|
||||
min_t(u64, U64_MAX - iter.pos.offset,
|
||||
KEY_SIZE_MAX & (~0 << trans->c->block_bits));
|
||||
|
||||
/* create the biggest key we can */
|
||||
bch2_key_resize(&delete.k, max_sectors);
|
||||
|
||||
ret = bch2_extent_trim_atomic(trans, &iter, &delete);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, &delete, 0) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
bch2_trans_commit(trans, &disk_res, NULL,
|
||||
BTREE_INSERT_NOFAIL);
|
||||
bch2_disk_reservation_put(trans->c, &disk_res);
|
||||
err:
|
||||
if (ret && ret != -EINTR)
|
||||
break;
|
||||
offset = iter.pos.offset;
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
}
|
||||
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -205,6 +205,11 @@ static int bch2_copygc(struct bch_fs *c)
|
||||
up_read(&ca->bucket_lock);
|
||||
}
|
||||
|
||||
if (!h->used) {
|
||||
bch_err_ratelimited(c, "copygc requested to run but found no buckets to move!");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Our btree node allocations also come out of RESERVE_MOVINGGC:
|
||||
*/
|
||||
|
@ -570,7 +570,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
|
||||
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
|
||||
POS(QTYP_USR, 0),
|
||||
POS(QTYP_USR + 1, 0),
|
||||
NULL);
|
||||
0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -582,7 +582,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
|
||||
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
|
||||
POS(QTYP_GRP, 0),
|
||||
POS(QTYP_GRP + 1, 0),
|
||||
NULL);
|
||||
0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -594,7 +594,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
|
||||
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
|
||||
POS(QTYP_PRJ, 0),
|
||||
POS(QTYP_PRJ + 1, 0),
|
||||
NULL);
|
||||
0, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
@ -109,18 +109,27 @@ static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsign
|
||||
iter->idx++;
|
||||
}
|
||||
|
||||
int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
|
||||
unsigned level, struct bkey_i *k)
|
||||
int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
||||
unsigned level, struct bkey_i *k)
|
||||
{
|
||||
struct journal_key n = {
|
||||
.btree_id = id,
|
||||
.level = level,
|
||||
.k = k,
|
||||
.allocated = true
|
||||
};
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
struct journal_iter *iter;
|
||||
unsigned idx = journal_key_search(keys, id, level, k->k.p);
|
||||
|
||||
if (idx < keys->nr &&
|
||||
journal_key_cmp(&n, &keys->d[idx]) == 0) {
|
||||
if (keys->d[idx].allocated)
|
||||
kfree(keys->d[idx].k);
|
||||
keys->d[idx] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (keys->nr == keys->size) {
|
||||
struct journal_keys new_keys = {
|
||||
.nr = keys->nr,
|
||||
@ -140,27 +149,31 @@ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
|
||||
*keys = new_keys;
|
||||
}
|
||||
|
||||
n.k = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
|
||||
if (!n.k)
|
||||
return -ENOMEM;
|
||||
array_insert_item(keys->d, keys->nr, idx, n);
|
||||
|
||||
bkey_copy(n.k, k);
|
||||
|
||||
if (idx < keys->nr &&
|
||||
journal_key_cmp(&n, &keys->d[idx]) == 0) {
|
||||
if (keys->d[idx].allocated)
|
||||
kfree(keys->d[idx].k);
|
||||
keys->d[idx] = n;
|
||||
} else {
|
||||
array_insert_item(keys->d, keys->nr, idx, n);
|
||||
|
||||
list_for_each_entry(iter, &c->journal_iters, list)
|
||||
journal_iter_fix(c, iter, idx);
|
||||
}
|
||||
list_for_each_entry(iter, &c->journal_iters, list)
|
||||
journal_iter_fix(c, iter, idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
|
||||
unsigned level, struct bkey_i *k)
|
||||
{
|
||||
struct bkey_i *n;
|
||||
int ret;
|
||||
|
||||
n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
|
||||
if (!n)
|
||||
return -ENOMEM;
|
||||
|
||||
bkey_copy(n, k);
|
||||
ret = bch2_journal_key_insert_take(c, id, level, n);
|
||||
if (ret)
|
||||
kfree(n);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_journal_key_delete(struct bch_fs *c, enum btree_id id,
|
||||
unsigned level, struct bpos pos)
|
||||
{
|
||||
@ -548,8 +561,8 @@ static int bch2_journal_replay_key(struct bch_fs *c, struct journal_key *k)
|
||||
|
||||
static int journal_sort_seq_cmp(const void *_l, const void *_r)
|
||||
{
|
||||
const struct journal_key *l = _l;
|
||||
const struct journal_key *r = _r;
|
||||
const struct journal_key *l = *((const struct journal_key **)_l);
|
||||
const struct journal_key *r = *((const struct journal_key **)_r);
|
||||
|
||||
return cmp_int(r->level, l->level) ?:
|
||||
cmp_int(l->journal_seq, r->journal_seq) ?:
|
||||
@ -557,18 +570,30 @@ static int journal_sort_seq_cmp(const void *_l, const void *_r)
|
||||
bpos_cmp(l->k->k.p, r->k->k.p);
|
||||
}
|
||||
|
||||
static int bch2_journal_replay(struct bch_fs *c,
|
||||
struct journal_keys keys)
|
||||
static int bch2_journal_replay(struct bch_fs *c)
|
||||
{
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
struct journal_key **keys_sorted, *k;
|
||||
struct journal *j = &c->journal;
|
||||
struct journal_key *i;
|
||||
struct bch_dev *ca;
|
||||
unsigned idx;
|
||||
size_t i;
|
||||
u64 seq;
|
||||
int ret;
|
||||
|
||||
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
|
||||
keys_sorted = kmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
|
||||
if (!keys_sorted)
|
||||
return -ENOMEM;
|
||||
|
||||
if (keys.nr)
|
||||
replay_now_at(j, keys.journal_seq_base);
|
||||
for (i = 0; i < keys->nr; i++)
|
||||
keys_sorted[i] = &keys->d[i];
|
||||
|
||||
sort(keys_sorted, keys->nr,
|
||||
sizeof(keys_sorted[0]),
|
||||
journal_sort_seq_cmp, NULL);
|
||||
|
||||
if (keys->nr)
|
||||
replay_now_at(j, keys->journal_seq_base);
|
||||
|
||||
seq = j->replay_journal_seq;
|
||||
|
||||
@ -576,26 +601,35 @@ static int bch2_journal_replay(struct bch_fs *c,
|
||||
* First replay updates to the alloc btree - these will only update the
|
||||
* btree key cache:
|
||||
*/
|
||||
for_each_journal_key(keys, i) {
|
||||
for (i = 0; i < keys->nr; i++) {
|
||||
k = keys_sorted[i];
|
||||
|
||||
cond_resched();
|
||||
|
||||
if (!i->level && i->btree_id == BTREE_ID_alloc) {
|
||||
j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
|
||||
ret = bch2_journal_replay_key(c, i);
|
||||
if (!k->level && k->btree_id == BTREE_ID_alloc) {
|
||||
j->replay_journal_seq = keys->journal_seq_base + k->journal_seq;
|
||||
ret = bch2_journal_replay_key(c, k);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
/* Now we can start the allocator threads: */
|
||||
set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
|
||||
for_each_member_device(ca, c, idx)
|
||||
bch2_wake_allocator(ca);
|
||||
|
||||
/*
|
||||
* Next replay updates to interior btree nodes:
|
||||
*/
|
||||
for_each_journal_key(keys, i) {
|
||||
for (i = 0; i < keys->nr; i++) {
|
||||
k = keys_sorted[i];
|
||||
|
||||
cond_resched();
|
||||
|
||||
if (i->level) {
|
||||
j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
|
||||
ret = bch2_journal_replay_key(c, i);
|
||||
if (k->level) {
|
||||
j->replay_journal_seq = keys->journal_seq_base + k->journal_seq;
|
||||
ret = bch2_journal_replay_key(c, k);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -615,15 +649,17 @@ static int bch2_journal_replay(struct bch_fs *c,
|
||||
/*
|
||||
* Now replay leaf node updates:
|
||||
*/
|
||||
for_each_journal_key(keys, i) {
|
||||
for (i = 0; i < keys->nr; i++) {
|
||||
k = keys_sorted[i];
|
||||
|
||||
cond_resched();
|
||||
|
||||
if (i->level || i->btree_id == BTREE_ID_alloc)
|
||||
if (k->level || k->btree_id == BTREE_ID_alloc)
|
||||
continue;
|
||||
|
||||
replay_now_at(j, keys.journal_seq_base + i->journal_seq);
|
||||
replay_now_at(j, keys->journal_seq_base + k->journal_seq);
|
||||
|
||||
ret = bch2_journal_replay_key(c, i);
|
||||
ret = bch2_journal_replay_key(c, k);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -633,10 +669,14 @@ static int bch2_journal_replay(struct bch_fs *c,
|
||||
|
||||
bch2_journal_set_replay_done(j);
|
||||
bch2_journal_flush_all_pins(j);
|
||||
kfree(keys_sorted);
|
||||
|
||||
return bch2_journal_error(j);
|
||||
err:
|
||||
bch_err(c, "journal replay: error %d while replaying key at btree %s level %u",
|
||||
ret, bch2_btree_ids[i->btree_id], i->level);
|
||||
ret, bch2_btree_ids[k->btree_id], k->level);
|
||||
kfree(keys_sorted);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1208,7 +1248,7 @@ use_clean:
|
||||
|
||||
bch_verbose(c, "starting journal replay");
|
||||
err = "journal replay failed";
|
||||
ret = bch2_journal_replay(c, c->journal_keys);
|
||||
ret = bch2_journal_replay(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "journal replay done");
|
||||
@ -1380,6 +1420,7 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
for (i = 0; i < BTREE_ID_NR; i++)
|
||||
bch2_btree_root_alloc(c, i);
|
||||
|
||||
set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
|
||||
set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
|
||||
set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);
|
||||
|
||||
|
@ -31,6 +31,8 @@ struct btree_and_journal_iter {
|
||||
} last;
|
||||
};
|
||||
|
||||
int bch2_journal_key_insert_take(struct bch_fs *, enum btree_id,
|
||||
unsigned, struct bkey_i *);
|
||||
int bch2_journal_key_insert(struct bch_fs *, enum btree_id,
|
||||
unsigned, struct bkey_i *);
|
||||
int bch2_journal_key_delete(struct bch_fs *, enum btree_id,
|
||||
|
@ -1478,7 +1478,7 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
|
||||
return bch2_btree_delete_range(c, BTREE_ID_alloc,
|
||||
POS(ca->dev_idx, 0),
|
||||
POS(ca->dev_idx + 1, 0),
|
||||
NULL);
|
||||
0, NULL);
|
||||
}
|
||||
|
||||
int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
|
||||
@ -1599,18 +1599,24 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
int ret;
|
||||
|
||||
ret = bch2_read_super(path, &opts, &sb);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bch_err(c, "device add error: error reading super: %i", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
err = bch2_sb_validate(&sb);
|
||||
if (err)
|
||||
if (err) {
|
||||
bch_err(c, "device add error: error validating super: %s", err);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
dev_mi = bch2_sb_get_members(sb.sb)->members[sb.sb->dev_idx];
|
||||
|
||||
err = bch2_dev_may_add(sb.sb, c);
|
||||
if (err)
|
||||
if (err) {
|
||||
bch_err(c, "device add error: %s", err);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ca = __bch2_dev_alloc(c, &dev_mi);
|
||||
if (!ca) {
|
||||
@ -1624,24 +1630,27 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
return ret;
|
||||
}
|
||||
|
||||
err = "journal alloc failed";
|
||||
ret = bch2_dev_journal_alloc(ca);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bch_err(c, "device add error: journal alloc failed");
|
||||
goto err;
|
||||
}
|
||||
|
||||
down_write(&c->state_lock);
|
||||
mutex_lock(&c->sb_lock);
|
||||
|
||||
err = "insufficient space in new superblock";
|
||||
ret = bch2_sb_from_fs(c, ca);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bch_err(c, "device add error: new device superblock too small");
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
mi = bch2_sb_get_members(ca->disk_sb.sb);
|
||||
|
||||
if (!bch2_sb_resize_members(&ca->disk_sb,
|
||||
le32_to_cpu(mi->field.u64s) +
|
||||
sizeof(dev_mi) / sizeof(u64))) {
|
||||
bch_err(c, "device add error: new device superblock too small");
|
||||
ret = -ENOSPC;
|
||||
goto err_unlock;
|
||||
}
|
||||
@ -1654,7 +1663,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
|
||||
if (!bch2_dev_exists(c->disk_sb.sb, mi, dev_idx))
|
||||
goto have_slot;
|
||||
no_slot:
|
||||
err = "no slots available in superblock";
|
||||
bch_err(c, "device add error: already have maximum number of devices");
|
||||
ret = -ENOSPC;
|
||||
goto err_unlock;
|
||||
|
||||
@ -1663,12 +1672,12 @@ have_slot:
|
||||
u64s = (sizeof(struct bch_sb_field_members) +
|
||||
sizeof(struct bch_member) * nr_devices) / sizeof(u64);
|
||||
|
||||
err = "no space in superblock for member info";
|
||||
ret = -ENOSPC;
|
||||
|
||||
mi = bch2_sb_resize_members(&c->disk_sb, u64s);
|
||||
if (!mi)
|
||||
if (!mi) {
|
||||
bch_err(c, "device add error: no room in superblock for member info");
|
||||
ret = -ENOSPC;
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
/* success: */
|
||||
|
||||
@ -1684,17 +1693,20 @@ have_slot:
|
||||
|
||||
bch2_dev_usage_journal_reserve(c);
|
||||
|
||||
err = "error marking superblock";
|
||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bch_err(c, "device add error: error marking new superblock: %i", ret);
|
||||
goto err_late;
|
||||
}
|
||||
|
||||
ca->new_fs_bucket_idx = 0;
|
||||
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_rw) {
|
||||
ret = __bch2_dev_read_write(c, ca);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bch_err(c, "device add error: error going RW on new device: %i", ret);
|
||||
goto err_late;
|
||||
}
|
||||
}
|
||||
|
||||
up_write(&c->state_lock);
|
||||
@ -1707,11 +1719,9 @@ err:
|
||||
if (ca)
|
||||
bch2_dev_free(ca);
|
||||
bch2_free_super(&sb);
|
||||
bch_err(c, "Unable to add device: %s", err);
|
||||
return ret;
|
||||
err_late:
|
||||
up_write(&c->state_lock);
|
||||
bch_err(c, "Error going rw after adding device: %s", err);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -14,14 +14,14 @@ static void delete_test_keys(struct bch_fs *c)
|
||||
int ret;
|
||||
|
||||
ret = bch2_btree_delete_range(c, BTREE_ID_extents,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
SPOS(0, U64_MAX, U32_MAX),
|
||||
POS_MIN, SPOS_MAX,
|
||||
BTREE_ITER_ALL_SNAPSHOTS,
|
||||
NULL);
|
||||
BUG_ON(ret);
|
||||
|
||||
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
SPOS(0, U64_MAX, U32_MAX),
|
||||
POS_MIN, SPOS_MAX,
|
||||
BTREE_ITER_ALL_SNAPSHOTS,
|
||||
NULL);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
@ -146,7 +146,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
|
||||
i = 0;
|
||||
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
|
||||
POS_MIN, 0, k, ret) {
|
||||
SPOS(0, 0, U32_MAX), 0, k, ret) {
|
||||
if (k.k->p.inode)
|
||||
break;
|
||||
|
||||
@ -202,7 +202,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
|
||||
i = 0;
|
||||
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_extents,
|
||||
POS_MIN, 0, k, ret) {
|
||||
SPOS(0, 0, U32_MAX), 0, k, ret) {
|
||||
BUG_ON(bkey_start_offset(k.k) != i);
|
||||
i = k.k->p.offset;
|
||||
}
|
||||
@ -256,8 +256,8 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
|
||||
i = 0;
|
||||
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
|
||||
0, k, ret) {
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), 0, k, ret) {
|
||||
if (k.k->p.inode)
|
||||
break;
|
||||
|
||||
@ -272,7 +272,8 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
|
||||
i = 0;
|
||||
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BUG_ON(k.k->p.offset != i);
|
||||
BUG_ON(bkey_deleted(k.k) != (i & 1));
|
||||
@ -321,8 +322,8 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
|
||||
i = 0;
|
||||
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
|
||||
0, k, ret) {
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_extents,
|
||||
SPOS(0, 0, U32_MAX), 0, k, ret) {
|
||||
BUG_ON(bkey_start_offset(k.k) != i + 8);
|
||||
BUG_ON(k.k->size != 8);
|
||||
i += 16;
|
||||
@ -335,7 +336,8 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
|
||||
i = 0;
|
||||
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
|
||||
for_each_btree_key(&trans, iter, BTREE_ID_extents,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
BUG_ON(bkey_deleted(k.k) != !(i % 16));
|
||||
|
||||
@ -363,7 +365,8 @@ static int test_peek_end(struct bch_fs *c, u64 nr)
|
||||
struct bkey_s_c k;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs, POS_MIN, 0);
|
||||
bch2_trans_iter_init(&trans, &iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), 0);
|
||||
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
BUG_ON(k.k);
|
||||
@ -383,7 +386,8 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
|
||||
struct bkey_s_c k;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents, POS_MIN, 0);
|
||||
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
|
||||
SPOS(0, 0, U32_MAX), 0);
|
||||
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
BUG_ON(k.k);
|
||||
@ -406,8 +410,6 @@ static int insert_test_extent(struct bch_fs *c,
|
||||
struct bkey_i_cookie k;
|
||||
int ret;
|
||||
|
||||
//pr_info("inserting %llu-%llu v %llu", start, end, test_version);
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k_i.k.p.offset = end;
|
||||
k.k_i.k.p.snapshot = U32_MAX;
|
||||
@ -747,7 +749,9 @@ static int seq_delete(struct bch_fs *c, u64 nr)
|
||||
int ret;
|
||||
|
||||
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), POS_MAX, NULL);
|
||||
POS_MIN, SPOS_MAX,
|
||||
BTREE_ITER_ALL_SNAPSHOTS,
|
||||
NULL);
|
||||
if (ret)
|
||||
bch_err(c, "error in seq_delete: %i", ret);
|
||||
return ret;
|
||||
|
@ -114,7 +114,7 @@ void bch2_hprint(struct printbuf *buf, s64 v)
|
||||
* 103 is magic: t is in the range [-1023, 1023] and we want
|
||||
* to turn it into [-9, 9]
|
||||
*/
|
||||
if (u && v < 100 && v > -100)
|
||||
if (u && t && v < 100 && v > -100)
|
||||
pr_buf(buf, ".%i", t / 103);
|
||||
if (u)
|
||||
pr_buf(buf, "%c", si_units[u]);
|
||||
|
Loading…
Reference in New Issue
Block a user