Update bcachefs sources to 0342eebf85 bcachefs: Improve the backpointer to missing extent message

This commit is contained in:
Kent Overstreet 2023-03-15 08:59:01 -04:00
parent fa35853772
commit ec28e6bfa2
34 changed files with 306 additions and 178 deletions

View File

@ -1 +1 @@
72405e7ff8c5fb569b74b046d19866ee480f29b7 0342eebf85b7be76f01bacec8f958c6e6039535b

View File

@ -298,11 +298,12 @@ err:
/* /*
* Find the next backpointer >= *bp_offset: * Find the next backpointer >= *bp_offset:
*/ */
int bch2_get_next_backpointer(struct btree_trans *trans, int __bch2_get_next_backpointer(struct btree_trans *trans,
struct bpos bucket, int gen, struct bpos bucket, int gen,
u64 *bp_offset, u64 *bp_offset,
struct bch_backpointer *dst, struct bpos *bp_pos_ret,
unsigned iter_flags) struct bch_backpointer *dst,
unsigned iter_flags)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct bpos bp_pos, bp_end_pos; struct bpos bp_pos, bp_end_pos;
@ -352,6 +353,7 @@ int bch2_get_next_backpointer(struct btree_trans *trans,
*dst = *bkey_s_c_to_backpointer(k).v; *dst = *bkey_s_c_to_backpointer(k).v;
*bp_offset = dst->bucket_offset + BACKPOINTER_OFFSET_MAX; *bp_offset = dst->bucket_offset + BACKPOINTER_OFFSET_MAX;
*bp_pos_ret = k.k->p;
goto out; goto out;
} }
done: done:
@ -362,6 +364,19 @@ out:
return ret; return ret;
} }
int bch2_get_next_backpointer(struct btree_trans *trans,
struct bpos bucket, int gen,
u64 *bp_offset,
struct bch_backpointer *dst,
unsigned iter_flags)
{
struct bpos bp_pos;
return __bch2_get_next_backpointer(trans, bucket, gen,
bp_offset, &bp_pos,
dst, iter_flags);
}
static void backpointer_not_found(struct btree_trans *trans, static void backpointer_not_found(struct btree_trans *trans,
struct bpos bucket, struct bpos bucket,
u64 bp_offset, u64 bp_offset,
@ -952,7 +967,7 @@ static int check_one_backpointer(struct btree_trans *trans,
struct printbuf buf = PRINTBUF; struct printbuf buf = PRINTBUF;
int ret; int ret;
ret = bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp, 0); ret = __bch2_get_next_backpointer(trans, bucket, -1, bp_offset, &bp_pos, &bp, 0);
if (ret || *bp_offset == U64_MAX) if (ret || *bp_offset == U64_MAX)
return ret; return ret;
@ -968,23 +983,17 @@ static int check_one_backpointer(struct btree_trans *trans,
if (ret) if (ret)
return ret; return ret;
bp_pos = bucket_pos_to_bp(c, bucket,
max(*bp_offset, BACKPOINTER_OFFSET_MAX) - BACKPOINTER_OFFSET_MAX);
if (!k.k && !bpos_eq(*last_flushed_pos, bp_pos)) { if (!k.k && !bpos_eq(*last_flushed_pos, bp_pos)) {
*last_flushed_pos = bp_pos; *last_flushed_pos = bp_pos;
pr_info("flushing at %llu:%llu",
last_flushed_pos->inode,
last_flushed_pos->offset);
ret = bch2_btree_write_buffer_flush_sync(trans) ?: ret = bch2_btree_write_buffer_flush_sync(trans) ?:
-BCH_ERR_transaction_restart_write_buffer_flush; -BCH_ERR_transaction_restart_write_buffer_flush;
goto out; goto out;
} }
if (fsck_err_on(!k.k, c, if (fsck_err_on(!k.k, c,
"%s backpointer points to missing extent\n%s", "backpointer for %llu:%llu:%llu (btree pos %llu:%llu) points to missing extent\n %s",
*bp_offset < BACKPOINTER_OFFSET_MAX ? "alloc" : "btree", bucket.inode, bucket.offset, (u64) bp.bucket_offset,
bp_pos.inode, bp_pos.offset,
(bch2_backpointer_to_text(&buf, &bp), buf.buf))) { (bch2_backpointer_to_text(&buf, &bp), buf.buf))) {
ret = bch2_backpointer_del_by_offset(trans, bucket, *bp_offset, bp); ret = bch2_backpointer_del_by_offset(trans, bucket, *bp_offset, bp);
if (ret == -ENOENT) if (ret == -ENOENT)

View File

@ -48,7 +48,7 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
(bucket_to_sector(ca, bucket.offset) << (bucket_to_sector(ca, bucket.offset) <<
MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset); MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
BUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret))); EBUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
return ret; return ret;
} }

View File

@ -98,7 +98,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
b->data = kvpmalloc(btree_bytes(c), gfp); b->data = kvpmalloc(btree_bytes(c), gfp);
if (!b->data) if (!b->data)
return -ENOMEM; return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
#ifdef __KERNEL__ #ifdef __KERNEL__
b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp); b->aux_data = vmalloc_exec(btree_aux_data_bytes(b), gfp);
#else #else
@ -111,7 +111,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
if (!b->aux_data) { if (!b->aux_data) {
kvpfree(b->data, btree_bytes(c)); kvpfree(b->data, btree_bytes(c));
b->data = NULL; b->data = NULL;
return -ENOMEM; return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
} }
return 0; return 0;
@ -223,7 +223,7 @@ wait_on_io:
BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight); BTREE_CACHE_NOT_FREED_INCREMENT(read_in_flight);
else if (btree_node_write_in_flight(b)) else if (btree_node_write_in_flight(b))
BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight); BTREE_CACHE_NOT_FREED_INCREMENT(write_in_flight);
return -ENOMEM; return -BCH_ERR_ENOMEM_btree_node_reclaim;
} }
/* XXX: waiting on IO with btree cache lock held */ /* XXX: waiting on IO with btree cache lock held */
@ -233,7 +233,7 @@ wait_on_io:
if (!six_trylock_intent(&b->c.lock)) { if (!six_trylock_intent(&b->c.lock)) {
BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent); BTREE_CACHE_NOT_FREED_INCREMENT(lock_intent);
return -ENOMEM; return -BCH_ERR_ENOMEM_btree_node_reclaim;
} }
if (!six_trylock_write(&b->c.lock)) { if (!six_trylock_write(&b->c.lock)) {
@ -299,7 +299,7 @@ out_unlock:
six_unlock_write(&b->c.lock); six_unlock_write(&b->c.lock);
out_unlock_intent: out_unlock_intent:
six_unlock_intent(&b->c.lock); six_unlock_intent(&b->c.lock);
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_btree_node_reclaim;
goto out; goto out;
} }
@ -513,7 +513,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
for (i = 0; i < bc->reserve; i++) for (i = 0; i < bc->reserve; i++)
if (!__bch2_btree_node_mem_alloc(c)) { if (!__bch2_btree_node_mem_alloc(c)) {
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_fs_btree_cache_init;
goto out; goto out;
} }
@ -568,7 +568,7 @@ int bch2_btree_cache_cannibalize_lock(struct bch_fs *c, struct closure *cl)
if (!cl) { if (!cl) {
trace_and_count(c, btree_cache_cannibalize_lock_fail, c); trace_and_count(c, btree_cache_cannibalize_lock_fail, c);
return -ENOMEM; return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
} }
closure_wait(&bc->alloc_wait, cl); closure_wait(&bc->alloc_wait, cl);
@ -721,7 +721,7 @@ err:
mutex_unlock(&bc->lock); mutex_unlock(&bc->lock);
memalloc_nofs_restore(flags); memalloc_nofs_restore(flags);
return ERR_PTR(-ENOMEM); return ERR_PTR(-BCH_ERR_ENOMEM_btree_node_mem_alloc);
} }
/* Slowpath, don't want it inlined into btree_iter_traverse() */ /* Slowpath, don't want it inlined into btree_iter_traverse() */
@ -750,7 +750,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
b = bch2_btree_node_mem_alloc(trans, level != 0); b = bch2_btree_node_mem_alloc(trans, level != 0);
if (b == ERR_PTR(-ENOMEM)) { if (bch2_err_matches(PTR_ERR_OR_ZERO(b), ENOMEM)) {
trans->memory_allocation_failure = true; trans->memory_allocation_failure = true;
trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path); trace_and_count(c, trans_restart_memory_allocation_failure, trans, _THIS_IP_, path);
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail)); return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_fill_mem_alloc_fail));

View File

@ -201,7 +201,7 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL); new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new) if (!new)
return -ENOMEM; return -BCH_ERR_ENOMEM_gc_repair_key;
btree_ptr_to_v2(b, new); btree_ptr_to_v2(b, new);
b->data->min_key = new_min; b->data->min_key = new_min;
@ -230,7 +230,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL); new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new) if (!new)
return -ENOMEM; return -BCH_ERR_ENOMEM_gc_repair_key;
btree_ptr_to_v2(b, new); btree_ptr_to_v2(b, new);
b->data->max_key = new_max; b->data->max_key = new_max;
@ -686,7 +686,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL); new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
if (!new) { if (!new) {
bch_err(c, "%s: error allocating new key", __func__); bch_err(c, "%s: error allocating new key", __func__);
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_gc_repair_key;
goto err; goto err;
} }
@ -1296,7 +1296,7 @@ static int bch2_gc_start(struct bch_fs *c)
sizeof(u64), GFP_KERNEL); sizeof(u64), GFP_KERNEL);
if (!c->usage_gc) { if (!c->usage_gc) {
bch_err(c, "error allocating c->usage_gc"); bch_err(c, "error allocating c->usage_gc");
return -ENOMEM; return -BCH_ERR_ENOMEM_gc_start;
} }
for_each_member_device(ca, c, i) { for_each_member_device(ca, c, i) {
@ -1306,7 +1306,7 @@ static int bch2_gc_start(struct bch_fs *c)
if (!ca->usage_gc) { if (!ca->usage_gc) {
bch_err(c, "error allocating ca->usage_gc"); bch_err(c, "error allocating ca->usage_gc");
percpu_ref_put(&ca->ref); percpu_ref_put(&ca->ref);
return -ENOMEM; return -BCH_ERR_ENOMEM_gc_start;
} }
this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets, this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets,
@ -1498,7 +1498,7 @@ static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
if (!buckets) { if (!buckets) {
percpu_ref_put(&ca->ref); percpu_ref_put(&ca->ref);
bch_err(c, "error allocating ca->buckets[gc]"); bch_err(c, "error allocating ca->buckets[gc]");
return -ENOMEM; return -BCH_ERR_ENOMEM_gc_alloc_start;
} }
buckets->first_bucket = ca->mi.first_bucket; buckets->first_bucket = ca->mi.first_bucket;
@ -1659,7 +1659,7 @@ static int bch2_gc_reflink_start(struct bch_fs *c,
r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++, r = genradix_ptr_alloc(&c->reflink_gc_table, c->reflink_gc_nr++,
GFP_KERNEL); GFP_KERNEL);
if (!r) { if (!r) {
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_gc_reflink_start;
break; break;
} }
@ -1980,7 +1980,7 @@ int bch2_gc_gens(struct bch_fs *c)
ca->oldest_gen = kvmalloc(ca->mi.nbuckets, GFP_KERNEL); ca->oldest_gen = kvmalloc(ca->mi.nbuckets, GFP_KERNEL);
if (!ca->oldest_gen) { if (!ca->oldest_gen) {
percpu_ref_put(&ca->ref); percpu_ref_put(&ca->ref);
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_gc_gens;
goto err; goto err;
} }

View File

@ -1485,7 +1485,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
ra = kzalloc(sizeof(*ra), GFP_NOFS); ra = kzalloc(sizeof(*ra), GFP_NOFS);
if (!ra) if (!ra)
return -ENOMEM; return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
closure_init(&ra->cl, NULL); closure_init(&ra->cl, NULL);
ra->c = c; ra->c = c;

View File

@ -1012,7 +1012,7 @@ retry_all:
__btree_path_put(path, false); __btree_path_put(path, false);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) || if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
ret == -ENOMEM) bch2_err_matches(ret, ENOMEM))
goto retry_all; goto retry_all;
if (ret) if (ret)
goto err; goto err;
@ -2809,7 +2809,7 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size)
} }
if (!new_mem) if (!new_mem)
return ERR_PTR(-ENOMEM); return ERR_PTR(-BCH_ERR_ENOMEM_trans_kmalloc);
trans->mem = new_mem; trans->mem = new_mem;
trans->mem_bytes = new_bytes; trans->mem_bytes = new_bytes;

View File

@ -337,7 +337,7 @@ btree_key_cache_create(struct btree_trans *trans, struct btree_path *path)
if (unlikely(!ck)) { if (unlikely(!ck)) {
bch_err(c, "error allocating memory for key cache item, btree %s", bch_err(c, "error allocating memory for key cache item, btree %s",
bch2_btree_ids[path->btree_id]); bch2_btree_ids[path->btree_id]);
return ERR_PTR(-ENOMEM); return ERR_PTR(-BCH_ERR_ENOMEM_btree_key_cache_create);
} }
mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent); mark_btree_node_locked(trans, path, 0, SIX_LOCK_intent);
@ -424,7 +424,7 @@ static int btree_key_cache_fill(struct btree_trans *trans,
if (!new_k) { if (!new_k) {
bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u", bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
bch2_btree_ids[ck->key.btree_id], new_u64s); bch2_btree_ids[ck->key.btree_id], new_u64s);
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
goto err; goto err;
} }
@ -1056,17 +1056,15 @@ static void bch2_btree_key_cache_shrinker_to_text(struct seq_buf *s, struct shri
int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc) int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
{ {
struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache); struct bch_fs *c = container_of(bc, struct bch_fs, btree_key_cache);
int ret;
#ifdef __KERNEL__ #ifdef __KERNEL__
bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist); bc->pcpu_freed = alloc_percpu(struct btree_key_cache_freelist);
if (!bc->pcpu_freed) if (!bc->pcpu_freed)
return -ENOMEM; return -BCH_ERR_ENOMEM_fs_btree_cache_init;
#endif #endif
ret = rhashtable_init(&bc->table, &bch2_btree_key_cache_params); if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
if (ret) return -BCH_ERR_ENOMEM_fs_btree_cache_init;
return ret;
bc->table_init_done = true; bc->table_init_done = true;
@ -1074,7 +1072,9 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
bc->shrink.count_objects = bch2_btree_key_cache_count; bc->shrink.count_objects = bch2_btree_key_cache_count;
bc->shrink.scan_objects = bch2_btree_key_cache_scan; bc->shrink.scan_objects = bch2_btree_key_cache_scan;
bc->shrink.to_text = bch2_btree_key_cache_shrinker_to_text; bc->shrink.to_text = bch2_btree_key_cache_shrinker_to_text;
return register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name); if (register_shrinker(&bc->shrink, "%s/btree_key_cache", c->name))
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
return 0;
} }
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c) void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)

View File

@ -2474,8 +2474,11 @@ int bch2_fs_btree_interior_update_init(struct bch_fs *c)
c->btree_interior_update_worker = c->btree_interior_update_worker =
alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1); alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
if (!c->btree_interior_update_worker) if (!c->btree_interior_update_worker)
return -ENOMEM; return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
return mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1, if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
sizeof(struct btree_update)); sizeof(struct btree_update)))
return -BCH_ERR_ENOMEM_btree_interior_update_pool_init;
return 0;
} }

View File

@ -401,7 +401,7 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
if (!new_k) { if (!new_k) {
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u", bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
bch2_btree_ids[path->btree_id], new_u64s); bch2_btree_ids[path->btree_id], new_u64s);
return -ENOMEM; return -BCH_ERR_ENOMEM_btree_key_cache_insert;
} }
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
@ -1891,7 +1891,7 @@ static int __bch2_trans_log_msg(darray_u64 *entries, const char *fmt, va_list ar
int ret; int ret;
prt_vprintf(&buf, fmt, args); prt_vprintf(&buf, fmt, args);
ret = buf.allocation_failure ? -ENOMEM : 0; ret = buf.allocation_failure ? -BCH_ERR_ENOMEM_trans_log_msg : 0;
if (ret) if (ret)
goto err; goto err;

View File

@ -333,7 +333,7 @@ int bch2_fs_btree_write_buffer_init(struct bch_fs *c)
wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL); wb->keys[0] = kvmalloc_array(wb->size, sizeof(*wb->keys[0]), GFP_KERNEL);
wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL); wb->keys[1] = kvmalloc_array(wb->size, sizeof(*wb->keys[1]), GFP_KERNEL);
if (!wb->keys[0] || !wb->keys[1]) if (!wb->keys[0] || !wb->keys[1])
return -ENOMEM; return -BCH_ERR_ENOMEM_fs_btree_write_buffer_init;
return 0; return 0;
} }

View File

@ -906,7 +906,7 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
if (!m) { if (!m) {
bch_err(c, "error allocating memory for gc_stripes, idx %llu", bch_err(c, "error allocating memory for gc_stripes, idx %llu",
(u64) p.idx); (u64) p.idx);
return -ENOMEM; return -BCH_ERR_ENOMEM_mark_stripe_ptr;
} }
mutex_lock(&c->ec_stripes_heap_lock); mutex_lock(&c->ec_stripes_heap_lock);
@ -1075,7 +1075,7 @@ int bch2_mark_stripe(struct btree_trans *trans,
if (!m) { if (!m) {
bch_err(c, "error allocating memory for gc_stripes, idx %llu", bch_err(c, "error allocating memory for gc_stripes, idx %llu",
idx); idx);
return -ENOMEM; return -BCH_ERR_ENOMEM_mark_stripe;
} }
/* /*
* This will be wrong when we bring back runtime gc: we should * This will be wrong when we bring back runtime gc: we should
@ -2045,15 +2045,21 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL; struct bucket_gens *bucket_gens = NULL, *old_bucket_gens = NULL;
unsigned long *buckets_nouse = NULL; unsigned long *buckets_nouse = NULL;
bool resize = ca->bucket_gens != NULL; bool resize = ca->bucket_gens != NULL;
int ret = -ENOMEM; int ret;
if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets, if (!(bucket_gens = kvpmalloc(sizeof(struct bucket_gens) + nbuckets,
GFP_KERNEL|__GFP_ZERO)) || GFP_KERNEL|__GFP_ZERO))) {
(c->opts.buckets_nouse && ret = -BCH_ERR_ENOMEM_bucket_gens;
goto err;
}
if ((c->opts.buckets_nouse &&
!(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) * !(buckets_nouse = kvpmalloc(BITS_TO_LONGS(nbuckets) *
sizeof(unsigned long), sizeof(unsigned long),
GFP_KERNEL|__GFP_ZERO)))) GFP_KERNEL|__GFP_ZERO)))) {
ret = -BCH_ERR_ENOMEM_buckets_nouse;
goto err; goto err;
}
bucket_gens->first_bucket = ca->mi.first_bucket; bucket_gens->first_bucket = ca->mi.first_bucket;
bucket_gens->nbuckets = nbuckets; bucket_gens->nbuckets = nbuckets;
@ -2123,12 +2129,12 @@ int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL); ca->usage_base = kzalloc(sizeof(struct bch_dev_usage), GFP_KERNEL);
if (!ca->usage_base) if (!ca->usage_base)
return -ENOMEM; return -BCH_ERR_ENOMEM_usage_init;
for (i = 0; i < ARRAY_SIZE(ca->usage); i++) { for (i = 0; i < ARRAY_SIZE(ca->usage); i++) {
ca->usage[i] = alloc_percpu(struct bch_dev_usage); ca->usage[i] = alloc_percpu(struct bch_dev_usage);
if (!ca->usage[i]) if (!ca->usage[i])
return -ENOMEM; return -BCH_ERR_ENOMEM_usage_init;
} }
return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets); return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);

View File

@ -110,7 +110,7 @@ int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL); n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL);
if (!n) { if (!n) {
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_buckets_waiting_for_journal_set;
goto out; goto out;
} }
@ -159,7 +159,7 @@ int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *c)
b->t = kvmalloc(sizeof(*b->t) + b->t = kvmalloc(sizeof(*b->t) +
(sizeof(b->t->d[0]) << INITIAL_TABLE_BITS), GFP_KERNEL); (sizeof(b->t->d[0]) << INITIAL_TABLE_BITS), GFP_KERNEL);
if (!b->t) if (!b->t)
return -ENOMEM; return -BCH_ERR_ENOMEM_buckets_waiting_for_journal_init;
bucket_table_init(b->t, INITIAL_TABLE_BITS); bucket_table_init(b->t, INITIAL_TABLE_BITS);
return 0; return 0;

View File

@ -133,7 +133,7 @@ static inline int do_encrypt(struct crypto_sync_skcipher *tfm,
sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL); sg = kmalloc_array(pages, sizeof(*sg), GFP_KERNEL);
if (!sg) if (!sg)
return -ENOMEM; return -BCH_ERR_ENOMEM_do_encrypt;
sg_init_table(sg, pages); sg_init_table(sg, pages);
@ -648,7 +648,7 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed)
crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64)); crypt = bch2_sb_resize_crypt(&c->disk_sb, sizeof(*crypt) / sizeof(u64));
if (!crypt) { if (!crypt) {
ret = -ENOMEM; /* XXX this technically could be -ENOSPC */ ret = -BCH_ERR_ENOSPC_sb_crypt;
goto err; goto err;
} }

View File

@ -184,10 +184,10 @@ int bch2_io_clock_init(struct io_clock *clock)
clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf); clock->pcpu_buf = alloc_percpu(*clock->pcpu_buf);
if (!clock->pcpu_buf) if (!clock->pcpu_buf)
return -ENOMEM; return -BCH_ERR_ENOMEM_io_clock_init;
if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL)) if (!init_heap(&clock->timers, NR_IO_TIMERS, GFP_KERNEL))
return -ENOMEM; return -BCH_ERR_ENOMEM_io_clock_init;
return 0; return 0;
} }

View File

@ -270,7 +270,7 @@ int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
{ {
struct bbuf dst_data = { NULL }; struct bbuf dst_data = { NULL };
size_t dst_len = crc.uncompressed_size << 9; size_t dst_len = crc.uncompressed_size << 9;
int ret = -ENOMEM; int ret;
if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max || if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max ||
crc.compressed_size << 9 > c->opts.encoded_extent_max) crc.compressed_size << 9 > c->opts.encoded_extent_max)
@ -542,7 +542,7 @@ void bch2_fs_compress_exit(struct bch_fs *c)
mempool_exit(&c->compression_bounce[READ]); mempool_exit(&c->compression_bounce[READ]);
} }
static int __bch2_fs_compress_init(struct bch_fs *c, u64 features) static int _bch2_fs_compress_init(struct bch_fs *c, u64 features)
{ {
size_t decompress_workspace_size = 0; size_t decompress_workspace_size = 0;
bool decompress_workspace_needed; bool decompress_workspace_needed;
@ -561,34 +561,27 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
zstd_cctx_workspace_bound(&params.cParams), zstd_cctx_workspace_bound(&params.cParams),
zstd_dctx_workspace_bound() }, zstd_dctx_workspace_bound() },
}, *i; }, *i;
int ret = 0; bool have_compressed = false;
pr_verbose_init(c->opts, "");
c->zstd_params = params; c->zstd_params = params;
for (i = compression_types; for (i = compression_types;
i < compression_types + ARRAY_SIZE(compression_types); i < compression_types + ARRAY_SIZE(compression_types);
i++) i++)
if (features & (1 << i->feature)) have_compressed |= (features & (1 << i->feature)) != 0;
goto have_compressed;
goto out; if (!have_compressed)
have_compressed: return 0;
if (!mempool_initialized(&c->compression_bounce[READ])) { if (!mempool_initialized(&c->compression_bounce[READ]) &&
ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[READ], mempool_init_kvpmalloc_pool(&c->compression_bounce[READ],
1, c->opts.encoded_extent_max); 1, c->opts.encoded_extent_max))
if (ret) return -BCH_ERR_ENOMEM_compression_bounce_read_init;
goto out;
}
if (!mempool_initialized(&c->compression_bounce[WRITE])) { if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
ret = mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE], mempool_init_kvpmalloc_pool(&c->compression_bounce[WRITE],
1, c->opts.encoded_extent_max); 1, c->opts.encoded_extent_max))
if (ret) return -BCH_ERR_ENOMEM_compression_bounce_write_init;
goto out;
}
for (i = compression_types; for (i = compression_types;
i < compression_types + ARRAY_SIZE(compression_types); i < compression_types + ARRAY_SIZE(compression_types);
@ -605,22 +598,28 @@ have_compressed:
if (mempool_initialized(&c->compress_workspace[i->type])) if (mempool_initialized(&c->compress_workspace[i->type]))
continue; continue;
ret = mempool_init_kvpmalloc_pool( if (mempool_init_kvpmalloc_pool(
&c->compress_workspace[i->type], &c->compress_workspace[i->type],
1, i->compress_workspace); 1, i->compress_workspace))
if (ret) return -BCH_ERR_ENOMEM_compression_workspace_init;
goto out;
} }
if (!mempool_initialized(&c->decompress_workspace)) { if (!mempool_initialized(&c->decompress_workspace) &&
ret = mempool_init_kvpmalloc_pool( mempool_init_kvpmalloc_pool(&c->decompress_workspace,
&c->decompress_workspace, 1, decompress_workspace_size))
1, decompress_workspace_size); return -BCH_ERR_ENOMEM_decompression_workspace_init;
if (ret)
goto out; return 0;
} }
out:
static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
{
int ret;
pr_verbose_init(c->opts, "");
ret = _bch2_fs_compress_init(c, features);
pr_verbose_init(c->opts, "ret %i", ret); pr_verbose_init(c->opts, "ret %i", ret);
return ret; return ret;
} }

View File

@ -96,7 +96,7 @@ int bch2_fs_counters_init(struct bch_fs *c)
{ {
c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64)); c->counters = __alloc_percpu(sizeof(u64) * BCH_COUNTER_NR, sizeof(u64));
if (!c->counters) if (!c->counters)
return -ENOMEM; return -BCH_ERR_ENOMEM_fs_counters_init;
return bch2_sb_counters_to_cpu(c); return bch2_sb_counters_to_cpu(c);
} }

View File

@ -68,7 +68,7 @@ static int bch2_sb_disk_groups_validate(struct bch_sb *sb,
sorted = kmalloc_array(nr_groups, sizeof(*sorted), GFP_KERNEL); sorted = kmalloc_array(nr_groups, sizeof(*sorted), GFP_KERNEL);
if (!sorted) if (!sorted)
return -ENOMEM; return -BCH_ERR_ENOMEM_disk_groups_validate;
memcpy(sorted, groups->entries, nr_groups * sizeof(*sorted)); memcpy(sorted, groups->entries, nr_groups * sizeof(*sorted));
sort(sorted, nr_groups, sizeof(*sorted), group_cmp, NULL); sort(sorted, nr_groups, sizeof(*sorted), group_cmp, NULL);
@ -134,7 +134,7 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
cpu_g = kzalloc(sizeof(*cpu_g) + cpu_g = kzalloc(sizeof(*cpu_g) +
sizeof(cpu_g->entries[0]) * nr_groups, GFP_KERNEL); sizeof(cpu_g->entries[0]) * nr_groups, GFP_KERNEL);
if (!cpu_g) if (!cpu_g)
return -ENOMEM; return -BCH_ERR_ENOMEM_disk_groups_to_cpu;
cpu_g->nr = nr_groups; cpu_g->nr = nr_groups;

View File

@ -494,7 +494,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
buf = kzalloc(sizeof(*buf), GFP_NOIO); buf = kzalloc(sizeof(*buf), GFP_NOIO);
if (!buf) if (!buf)
return -ENOMEM; return -BCH_ERR_ENOMEM_ec_read_extent;
ret = get_stripe_key(c, rbio->pick.ec.idx, buf); ret = get_stripe_key(c, rbio->pick.ec.idx, buf);
if (ret) { if (ret) {
@ -559,7 +559,7 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
if (idx >= h->size) { if (idx >= h->size) {
if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp)) if (!init_heap(&n, max(1024UL, roundup_pow_of_two(idx + 1)), gfp))
return -ENOMEM; return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
mutex_lock(&c->ec_stripes_heap_lock); mutex_lock(&c->ec_stripes_heap_lock);
if (n.size > h->size) { if (n.size > h->size) {
@ -573,11 +573,11 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
} }
if (!genradix_ptr_alloc(&c->stripes, idx, gfp)) if (!genradix_ptr_alloc(&c->stripes, idx, gfp))
return -ENOMEM; return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING && if (c->gc_pos.phase != GC_PHASE_NOT_RUNNING &&
!genradix_ptr_alloc(&c->gc_stripes, idx, gfp)) !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
return -ENOMEM; return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
return 0; return 0;
} }
@ -1326,7 +1326,7 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
s = kzalloc(sizeof(*s), GFP_KERNEL); s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s) if (!s)
return -ENOMEM; return -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
mutex_init(&s->lock); mutex_init(&s->lock);
closure_init(&s->iodone, NULL); closure_init(&s->iodone, NULL);
@ -1688,8 +1688,8 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
return h; return h;
if (!h->s) { if (!h->s) {
if (ec_new_stripe_alloc(c, h)) { ret = ec_new_stripe_alloc(c, h);
ret = -ENOMEM; if (ret) {
bch_err(c, "failed to allocate new stripe"); bch_err(c, "failed to allocate new stripe");
goto err; goto err;
} }

View File

@ -4,6 +4,79 @@
#define BCH_ERRCODES() \ #define BCH_ERRCODES() \
x(ENOMEM, ENOMEM_stripe_buf) \ x(ENOMEM, ENOMEM_stripe_buf) \
x(ENOMEM, ENOMEM_replicas_table) \
x(ENOMEM, ENOMEM_cpu_replicas) \
x(ENOMEM, ENOMEM_replicas_gc) \
x(ENOMEM, ENOMEM_disk_groups_validate) \
x(ENOMEM, ENOMEM_disk_groups_to_cpu) \
x(ENOMEM, ENOMEM_mark_snapshot) \
x(ENOMEM, ENOMEM_mark_stripe) \
x(ENOMEM, ENOMEM_mark_stripe_ptr) \
x(ENOMEM, ENOMEM_btree_key_cache_create) \
x(ENOMEM, ENOMEM_btree_key_cache_fill) \
x(ENOMEM, ENOMEM_btree_key_cache_insert) \
x(ENOMEM, ENOMEM_trans_kmalloc) \
x(ENOMEM, ENOMEM_trans_log_msg) \
x(ENOMEM, ENOMEM_do_encrypt) \
x(ENOMEM, ENOMEM_ec_read_extent) \
x(ENOMEM, ENOMEM_ec_stripe_mem_alloc) \
x(ENOMEM, ENOMEM_ec_new_stripe_alloc) \
x(ENOMEM, ENOMEM_fs_btree_cache_init) \
x(ENOMEM, ENOMEM_fs_btree_key_cache_init) \
x(ENOMEM, ENOMEM_fs_counters_init) \
x(ENOMEM, ENOMEM_fs_btree_write_buffer_init) \
x(ENOMEM, ENOMEM_io_clock_init) \
x(ENOMEM, ENOMEM_blacklist_table_init) \
x(ENOMEM, ENOMEM_sb_realloc_injected) \
x(ENOMEM, ENOMEM_sb_bio_realloc) \
x(ENOMEM, ENOMEM_sb_buf_realloc) \
x(ENOMEM, ENOMEM_sb_journal_validate) \
x(ENOMEM, ENOMEM_sb_journal_v2_validate) \
x(ENOMEM, ENOMEM_journal_entry_add) \
x(ENOMEM, ENOMEM_journal_read_buf_realloc) \
x(ENOMEM, ENOMEM_btree_interior_update_worker_init)\
x(ENOMEM, ENOMEM_btree_interior_update_pool_init) \
x(ENOMEM, ENOMEM_bio_read_init) \
x(ENOMEM, ENOMEM_bio_read_split_init) \
x(ENOMEM, ENOMEM_bio_write_init) \
x(ENOMEM, ENOMEM_bio_bounce_pages_init) \
x(ENOMEM, ENOMEM_writepage_bioset_init) \
x(ENOMEM, ENOMEM_dio_read_bioset_init) \
x(ENOMEM, ENOMEM_dio_write_bioset_init) \
x(ENOMEM, ENOMEM_nocow_flush_bioset_init) \
x(ENOMEM, ENOMEM_promote_table_init) \
x(ENOMEM, ENOMEM_compression_bounce_read_init) \
x(ENOMEM, ENOMEM_compression_bounce_write_init) \
x(ENOMEM, ENOMEM_compression_workspace_init) \
x(ENOMEM, ENOMEM_decompression_workspace_init) \
x(ENOMEM, ENOMEM_bucket_gens) \
x(ENOMEM, ENOMEM_buckets_nouse) \
x(ENOMEM, ENOMEM_usage_init) \
x(ENOMEM, ENOMEM_btree_node_read_all_replicas) \
x(ENOMEM, ENOMEM_btree_node_reclaim) \
x(ENOMEM, ENOMEM_btree_node_mem_alloc) \
x(ENOMEM, ENOMEM_btree_cache_cannibalize_lock) \
x(ENOMEM, ENOMEM_buckets_waiting_for_journal_init)\
x(ENOMEM, ENOMEM_buckets_waiting_for_journal_set) \
x(ENOMEM, ENOMEM_set_nr_journal_buckets) \
x(ENOMEM, ENOMEM_dev_journal_init) \
x(ENOMEM, ENOMEM_journal_pin_fifo) \
x(ENOMEM, ENOMEM_journal_buf) \
x(ENOMEM, ENOMEM_gc_start) \
x(ENOMEM, ENOMEM_gc_alloc_start) \
x(ENOMEM, ENOMEM_gc_reflink_start) \
x(ENOMEM, ENOMEM_gc_gens) \
x(ENOMEM, ENOMEM_gc_repair_key) \
x(ENOMEM, ENOMEM_fsck_extent_ends_at) \
x(ENOMEM, ENOMEM_fsck_add_nlink) \
x(ENOMEM, ENOMEM_journal_key_insert) \
x(ENOMEM, ENOMEM_journal_keys_sort) \
x(ENOMEM, ENOMEM_journal_replay) \
x(ENOMEM, ENOMEM_read_superblock_clean) \
x(ENOMEM, ENOMEM_fs_alloc) \
x(ENOMEM, ENOMEM_fs_name_alloc) \
x(ENOMEM, ENOMEM_fs_other_alloc) \
x(ENOMEM, ENOMEM_dev_alloc) \
x(ENOSPC, ENOSPC_disk_reservation) \ x(ENOSPC, ENOSPC_disk_reservation) \
x(ENOSPC, ENOSPC_bucket_alloc) \ x(ENOSPC, ENOSPC_bucket_alloc) \
x(ENOSPC, ENOSPC_disk_label_add) \ x(ENOSPC, ENOSPC_disk_label_add) \
@ -14,9 +87,11 @@
x(ENOSPC, ENOSPC_subvolume_create) \ x(ENOSPC, ENOSPC_subvolume_create) \
x(ENOSPC, ENOSPC_sb) \ x(ENOSPC, ENOSPC_sb) \
x(ENOSPC, ENOSPC_sb_journal) \ x(ENOSPC, ENOSPC_sb_journal) \
x(ENOSPC, ENOSPC_sb_journal_seq_blacklist) \
x(ENOSPC, ENOSPC_sb_quota) \ x(ENOSPC, ENOSPC_sb_quota) \
x(ENOSPC, ENOSPC_sb_replicas) \ x(ENOSPC, ENOSPC_sb_replicas) \
x(ENOSPC, ENOSPC_sb_members) \ x(ENOSPC, ENOSPC_sb_members) \
x(ENOSPC, ENOSPC_sb_crypt) \
x(0, open_buckets_empty) \ x(0, open_buckets_empty) \
x(0, freelist_empty) \ x(0, freelist_empty) \
x(BCH_ERR_freelist_empty, no_buckets_found) \ x(BCH_ERR_freelist_empty, no_buckets_found) \

View File

@ -690,7 +690,21 @@ unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
unsigned durability = 0; unsigned durability = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry) bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
durability += bch2_extent_ptr_durability(c,& p); durability += bch2_extent_ptr_durability(c, &p);
return durability;
}
static unsigned bch2_bkey_durability_safe(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
const union bch_extent_entry *entry;
struct extent_ptr_decoded p;
unsigned durability = 0;
bkey_for_each_ptr_decode(k.k, ptrs, p, entry)
if (p.ptr.dev < c->sb.nr_devices && c->devs[p.ptr.dev])
durability += bch2_extent_ptr_durability(c, &p);
return durability; return durability;
} }
@ -990,7 +1004,7 @@ void bch2_bkey_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
bool first = true; bool first = true;
if (c) if (c)
prt_printf(out, "durability: %u ", bch2_bkey_durability(c, k)); prt_printf(out, "durability: %u ", bch2_bkey_durability_safe(c, k));
bkey_extent_entry_for_each(ptrs, entry) { bkey_extent_entry_for_each(ptrs, entry) {
if (!first) if (!first)

View File

@ -3706,16 +3706,22 @@ int bch2_fs_fsio_init(struct bch_fs *c)
if (bioset_init(&c->writepage_bioset, if (bioset_init(&c->writepage_bioset,
4, offsetof(struct bch_writepage_io, op.wbio.bio), 4, offsetof(struct bch_writepage_io, op.wbio.bio),
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS))
bioset_init(&c->dio_read_bioset, return -BCH_ERR_ENOMEM_writepage_bioset_init;
if (bioset_init(&c->dio_read_bioset,
4, offsetof(struct dio_read, rbio.bio), 4, offsetof(struct dio_read, rbio.bio),
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS))
bioset_init(&c->dio_write_bioset, return -BCH_ERR_ENOMEM_dio_read_bioset_init;
if (bioset_init(&c->dio_write_bioset,
4, offsetof(struct dio_write, op.wbio.bio), 4, offsetof(struct dio_write, op.wbio.bio),
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS))
bioset_init(&c->nocow_flush_bioset, return -BCH_ERR_ENOMEM_dio_write_bioset_init;
if (bioset_init(&c->nocow_flush_bioset,
1, offsetof(struct nocow_flush, bio), 0)) 1, offsetof(struct nocow_flush, bio), 0))
ret = -ENOMEM; return -BCH_ERR_ENOMEM_nocow_flush_bioset_init;
pr_verbose_init(c->opts, "ret %i", ret); pr_verbose_init(c->opts, "ret %i", ret);
return ret; return ret;

View File

@ -1237,7 +1237,7 @@ static int extent_ends_at(extent_ends *extent_ends,
sizeof(seen->ids.data[0]) * seen->ids.size, sizeof(seen->ids.data[0]) * seen->ids.size,
GFP_KERNEL); GFP_KERNEL);
if (!n.seen.ids.data) if (!n.seen.ids.data)
return -ENOMEM; return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
darray_for_each(*extent_ends, i) { darray_for_each(*extent_ends, i) {
if (i->snapshot == k.k->p.snapshot) { if (i->snapshot == k.k->p.snapshot) {
@ -2141,7 +2141,7 @@ static int add_nlink(struct bch_fs *c, struct nlink_table *t,
if (!d) { if (!d) {
bch_err(c, "fsck: error allocating memory for nlink_table, size %zu", bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
new_size); new_size);
return -ENOMEM; return -BCH_ERR_ENOMEM_fsck_add_nlink;
} }
if (t->d) if (t->d)

View File

@ -3024,18 +3024,26 @@ void bch2_fs_io_exit(struct bch_fs *c)
int bch2_fs_io_init(struct bch_fs *c) int bch2_fs_io_init(struct bch_fs *c)
{ {
if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio), if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS))
bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio), return -BCH_ERR_ENOMEM_bio_read_init;
BIOSET_NEED_BVECS) ||
bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), if (bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
BIOSET_NEED_BVECS) || BIOSET_NEED_BVECS))
mempool_init_page_pool(&c->bio_bounce_pages, return -BCH_ERR_ENOMEM_bio_read_split_init;
if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio),
BIOSET_NEED_BVECS))
return -BCH_ERR_ENOMEM_bio_write_init;
if (mempool_init_page_pool(&c->bio_bounce_pages,
max_t(unsigned, max_t(unsigned,
c->opts.btree_node_size, c->opts.btree_node_size,
c->opts.encoded_extent_max) / c->opts.encoded_extent_max) /
PAGE_SIZE, 0) || PAGE_SIZE, 0))
rhashtable_init(&c->promote_table, &bch_promote_params)) return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
return -ENOMEM;
if (rhashtable_init(&c->promote_table, &bch_promote_params))
return -BCH_ERR_ENOMEM_promote_table_init;
return 0; return 0;
} }

View File

@ -769,7 +769,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL); new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL); new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
if (!bu || !ob || !new_buckets || !new_bucket_seq) { if (!bu || !ob || !new_buckets || !new_bucket_seq) {
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
goto err_free; goto err_free;
} }
@ -942,7 +942,7 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
unsigned nr; unsigned nr;
if (dynamic_fault("bcachefs:add:journal_alloc")) if (dynamic_fault("bcachefs:add:journal_alloc"))
return -ENOMEM; return -BCH_ERR_ENOMEM_set_nr_journal_buckets;
/* 1/128th of the device by default: */ /* 1/128th of the device by default: */
nr = ca->mi.nbuckets >> 7; nr = ca->mi.nbuckets >> 7;
@ -1034,7 +1034,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL); init_fifo(&j->pin, roundup_pow_of_two(nr + 1), GFP_KERNEL);
if (!j->pin.data) { if (!j->pin.data) {
bch_err(c, "error reallocating journal fifo (%llu open entries)", nr); bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
return -ENOMEM; return -BCH_ERR_ENOMEM_journal_pin_fifo;
} }
} }
@ -1128,19 +1128,19 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
if (!ja->bucket_seq) if (!ja->bucket_seq)
return -ENOMEM; return -BCH_ERR_ENOMEM_dev_journal_init;
nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE); nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); ca->journal.bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
if (!ca->journal.bio) if (!ca->journal.bio)
return -ENOMEM; return -BCH_ERR_ENOMEM_dev_journal_init;
bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0); bio_init(ca->journal.bio, NULL, ca->journal.bio->bi_inline_vecs, nr_bvecs, 0);
ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL); ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
if (!ja->buckets) if (!ja->buckets)
return -ENOMEM; return -BCH_ERR_ENOMEM_dev_journal_init;
if (journal_buckets_v2) { if (journal_buckets_v2) {
unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2); unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
@ -1194,7 +1194,7 @@ int bch2_fs_journal_init(struct journal *j)
{ .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v); { .cur_entry_offset = JOURNAL_ENTRY_CLOSED_VAL }).v);
if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) { if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL))) {
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_journal_pin_fifo;
goto out; goto out;
} }
@ -1202,7 +1202,7 @@ int bch2_fs_journal_init(struct journal *j)
j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN; j->buf[i].buf_size = JOURNAL_ENTRY_SIZE_MIN;
j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL); j->buf[i].data = kvpmalloc(j->buf[i].buf_size, GFP_KERNEL);
if (!j->buf[i].data) { if (!j->buf[i].data) {
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_journal_buf;
goto out; goto out;
} }
} }

View File

@ -119,7 +119,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
journal_entry_radix_idx(c, le64_to_cpu(j->seq)), journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
GFP_KERNEL); GFP_KERNEL);
if (!_i) if (!_i)
return -ENOMEM; return -BCH_ERR_ENOMEM_journal_entry_add;
/* /*
* Duplicate journal entries? If so we want the one that didn't have a * Duplicate journal entries? If so we want the one that didn't have a
@ -149,7 +149,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
replace: replace:
i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
if (!i) if (!i)
return -ENOMEM; return -BCH_ERR_ENOMEM_journal_entry_add;
i->nr_ptrs = 0; i->nr_ptrs = 0;
i->csum_good = entry_ptr.csum_good; i->csum_good = entry_ptr.csum_good;
@ -836,12 +836,12 @@ static int journal_read_buf_realloc(struct journal_read_buf *b,
/* the bios are sized for this many pages, max: */ /* the bios are sized for this many pages, max: */
if (new_size > JOURNAL_ENTRY_SIZE_MAX) if (new_size > JOURNAL_ENTRY_SIZE_MAX)
return -ENOMEM; return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
new_size = roundup_pow_of_two(new_size); new_size = roundup_pow_of_two(new_size);
n = kvpmalloc(new_size, GFP_KERNEL); n = kvpmalloc(new_size, GFP_KERNEL);
if (!n) if (!n)
return -ENOMEM; return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
kvpfree(b->data, b->size); kvpfree(b->data, b->size);
b->data = n; b->data = n;

View File

@ -33,7 +33,7 @@ static int bch2_sb_journal_validate(struct bch_sb *sb,
b = kmalloc_array(nr, sizeof(u64), GFP_KERNEL); b = kmalloc_array(nr, sizeof(u64), GFP_KERNEL);
if (!b) if (!b)
return -ENOMEM; return -BCH_ERR_ENOMEM_sb_journal_validate;
for (i = 0; i < nr; i++) for (i = 0; i < nr; i++)
b[i] = le64_to_cpu(journal->buckets[i]); b[i] = le64_to_cpu(journal->buckets[i]);
@ -116,7 +116,7 @@ static int bch2_sb_journal_v2_validate(struct bch_sb *sb,
b = kmalloc_array(nr, sizeof(*b), GFP_KERNEL); b = kmalloc_array(nr, sizeof(*b), GFP_KERNEL);
if (!b) if (!b)
return -ENOMEM; return -BCH_ERR_ENOMEM_sb_journal_v2_validate;
for (i = 0; i < nr; i++) { for (i = 0; i < nr; i++) {
b[i].start = le64_to_cpu(journal->d[i].start); b[i].start = le64_to_cpu(journal->d[i].start);

View File

@ -103,7 +103,7 @@ int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb, bl = bch2_sb_resize_journal_seq_blacklist(&c->disk_sb,
sb_blacklist_u64s(nr + 1)); sb_blacklist_u64s(nr + 1));
if (!bl) { if (!bl) {
ret = -ENOMEM; ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
goto out; goto out;
} }
@ -168,7 +168,7 @@ int bch2_blacklist_table_initialize(struct bch_fs *c)
t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr, t = kzalloc(sizeof(*t) + sizeof(t->entries[0]) * nr,
GFP_KERNEL); GFP_KERNEL);
if (!t) if (!t)
return -ENOMEM; return -BCH_ERR_ENOMEM_blacklist_table_init;
t->nr = nr; t->nr = nr;

View File

@ -60,7 +60,6 @@ struct moving_io {
static void move_free(struct moving_io *io) static void move_free(struct moving_io *io)
{ {
struct moving_context *ctxt = io->write.ctxt; struct moving_context *ctxt = io->write.ctxt;
struct bch_fs *c = ctxt->c;
if (io->b) if (io->b)
atomic_dec(&io->b->count); atomic_dec(&io->b->count);
@ -296,6 +295,7 @@ static int bch2_move_extent(struct btree_trans *trans,
if (!io) if (!io)
goto err; goto err;
INIT_LIST_HEAD(&io->io_list);
io->write.ctxt = ctxt; io->write.ctxt = ctxt;
io->read_sectors = k.k->size; io->read_sectors = k.k->size;
io->write_sectors = k.k->size; io->write_sectors = k.k->size;

View File

@ -228,7 +228,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
if (!new_keys.d) { if (!new_keys.d) {
bch_err(c, "%s: error allocating new key array (size %zu)", bch_err(c, "%s: error allocating new key array (size %zu)",
__func__, new_keys.size); __func__, new_keys.size);
return -ENOMEM; return -BCH_ERR_ENOMEM_journal_key_insert;
} }
/* Since @keys was full, there was no gap: */ /* Since @keys was full, there was no gap: */
@ -266,7 +266,7 @@ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL); n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
if (!n) if (!n)
return -ENOMEM; return -BCH_ERR_ENOMEM_journal_key_insert;
bkey_copy(n, k); bkey_copy(n, k);
ret = bch2_journal_key_insert_take(c, id, level, n); ret = bch2_journal_key_insert_take(c, id, level, n);
@ -502,8 +502,11 @@ static int journal_keys_sort(struct bch_fs *c)
keys->size = roundup_pow_of_two(nr_keys); keys->size = roundup_pow_of_two(nr_keys);
keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL); keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
if (!keys->d) if (!keys->d) {
return -ENOMEM; bch_err(c, "Failed to allocate buffer for sorted journal keys (%zu keys)",
nr_keys);
return -BCH_ERR_ENOMEM_journal_keys_sort;
}
genradix_for_each(&c->journal_entries, iter, _i) { genradix_for_each(&c->journal_entries, iter, _i) {
i = *_i; i = *_i;
@ -601,7 +604,7 @@ static int bch2_journal_replay(struct bch_fs *c, u64 start_seq, u64 end_seq)
keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL); keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
if (!keys_sorted) if (!keys_sorted)
return -ENOMEM; return -BCH_ERR_ENOMEM_journal_replay;
for (i = 0; i < keys->nr; i++) for (i = 0; i < keys->nr; i++)
keys_sorted[i] = &keys->d[i]; keys_sorted[i] = &keys->d[i];
@ -905,7 +908,7 @@ static struct bch_sb_field_clean *read_superblock_clean(struct bch_fs *c)
GFP_KERNEL); GFP_KERNEL);
if (!clean) { if (!clean) {
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
return ERR_PTR(-ENOMEM); return ERR_PTR(-BCH_ERR_ENOMEM_read_superblock_clean);
} }
ret = bch2_sb_clean_validate_late(c, clean, READ); ret = bch2_sb_clean_validate_late(c, clean, READ);

View File

@ -336,7 +336,7 @@ out:
return ret; return ret;
err: err:
bch_err(c, "error updating replicas table: memory allocation failure"); bch_err(c, "error updating replicas table: memory allocation failure");
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_replicas_table;
goto out; goto out;
} }
@ -383,14 +383,18 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c,
if (c->replicas_gc.entries && if (c->replicas_gc.entries &&
!__replicas_has_entry(&c->replicas_gc, new_entry)) { !__replicas_has_entry(&c->replicas_gc, new_entry)) {
new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry); new_gc = cpu_replicas_add_entry(&c->replicas_gc, new_entry);
if (!new_gc.entries) if (!new_gc.entries) {
ret = -BCH_ERR_ENOMEM_cpu_replicas;
goto err; goto err;
}
} }
if (!__replicas_has_entry(&c->replicas, new_entry)) { if (!__replicas_has_entry(&c->replicas, new_entry)) {
new_r = cpu_replicas_add_entry(&c->replicas, new_entry); new_r = cpu_replicas_add_entry(&c->replicas, new_entry);
if (!new_r.entries) if (!new_r.entries) {
ret = -BCH_ERR_ENOMEM_cpu_replicas;
goto err; goto err;
}
ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r); ret = bch2_cpu_replicas_to_sb_replicas(c, &new_r);
if (ret) if (ret)
@ -425,8 +429,7 @@ out:
return ret; return ret;
err: err:
bch_err(c, "error adding replicas entry: memory allocation failure"); bch_err(c, "error adding replicas entry: %s", bch2_err_str(ret));
ret = -ENOMEM;
goto out; goto out;
} }
@ -478,7 +481,7 @@ int bch2_replicas_gc_end(struct bch_fs *c, int ret)
bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) { bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
n = cpu_replicas_add_entry(&c->replicas_gc, e); n = cpu_replicas_add_entry(&c->replicas_gc, e);
if (!n.entries) { if (!n.entries) {
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_cpu_replicas;
goto err; goto err;
} }
@ -533,7 +536,7 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
if (!c->replicas_gc.entries) { if (!c->replicas_gc.entries) {
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
bch_err(c, "error allocating c->replicas_gc"); bch_err(c, "error allocating c->replicas_gc");
return -ENOMEM; return -BCH_ERR_ENOMEM_replicas_gc;
} }
for_each_cpu_replicas_entry(&c->replicas, e) for_each_cpu_replicas_entry(&c->replicas, e)
@ -562,7 +565,7 @@ retry:
new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL); new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
if (!new.entries) { if (!new.entries) {
bch_err(c, "error allocating c->replicas_gc"); bch_err(c, "error allocating c->replicas_gc");
return -ENOMEM; return -BCH_ERR_ENOMEM_replicas_gc;
} }
mutex_lock(&c->sb_lock); mutex_lock(&c->sb_lock);
@ -621,7 +624,7 @@ int bch2_replicas_set_usage(struct bch_fs *c,
n = cpu_replicas_add_entry(&c->replicas, r); n = cpu_replicas_add_entry(&c->replicas, r);
if (!n.entries) if (!n.entries)
return -ENOMEM; return -BCH_ERR_ENOMEM_cpu_replicas;
ret = replicas_table_update(c, &n); ret = replicas_table_update(c, &n);
if (ret) if (ret)
@ -655,7 +658,7 @@ __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL); cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
if (!cpu_r->entries) if (!cpu_r->entries)
return -ENOMEM; return -BCH_ERR_ENOMEM_cpu_replicas;
cpu_r->nr = nr; cpu_r->nr = nr;
cpu_r->entry_size = entry_size; cpu_r->entry_size = entry_size;
@ -687,7 +690,7 @@ __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL); cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
if (!cpu_r->entries) if (!cpu_r->entries)
return -ENOMEM; return -BCH_ERR_ENOMEM_cpu_replicas;
cpu_r->nr = nr; cpu_r->nr = nr;
cpu_r->entry_size = entry_size; cpu_r->entry_size = entry_size;
@ -717,9 +720,8 @@ int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r); ret = __bch2_sb_replicas_to_cpu_replicas(sb_v1, &new_r);
else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb))) else if ((sb_v0 = bch2_sb_get_replicas_v0(c->disk_sb.sb)))
ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r); ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_v0, &new_r);
if (ret) if (ret)
return -ENOMEM; return ret;
bch2_cpu_replicas_sort(&new_r); bch2_cpu_replicas_sort(&new_r);
@ -881,8 +883,9 @@ static int bch2_sb_replicas_validate(struct bch_sb *sb, struct bch_sb_field *f,
struct bch_replicas_cpu cpu_r; struct bch_replicas_cpu cpu_r;
int ret; int ret;
if (__bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r)) ret = __bch2_sb_replicas_to_cpu_replicas(sb_r, &cpu_r);
return -ENOMEM; if (ret)
return ret;
ret = bch2_cpu_replicas_validate(&cpu_r, sb, err); ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
kfree(cpu_r.entries); kfree(cpu_r.entries);
@ -919,8 +922,9 @@ static int bch2_sb_replicas_v0_validate(struct bch_sb *sb, struct bch_sb_field *
struct bch_replicas_cpu cpu_r; struct bch_replicas_cpu cpu_r;
int ret; int ret;
if (__bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r)) ret = __bch2_sb_replicas_v0_to_cpu_replicas(sb_r, &cpu_r);
return -ENOMEM; if (ret)
return ret;
ret = bch2_cpu_replicas_validate(&cpu_r, sb, err); ret = bch2_cpu_replicas_validate(&cpu_r, sb, err);
kfree(cpu_r.entries); kfree(cpu_r.entries);

View File

@ -87,7 +87,7 @@ int bch2_mark_snapshot(struct btree_trans *trans,
U32_MAX - new.k->p.offset, U32_MAX - new.k->p.offset,
GFP_KERNEL); GFP_KERNEL);
if (!t) if (!t)
return -ENOMEM; return -BCH_ERR_ENOMEM_mark_snapshot;
if (new.k->type == KEY_TYPE_snapshot) { if (new.k->type == KEY_TYPE_snapshot) {
struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new); struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);

View File

@ -136,14 +136,14 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
return 0; return 0;
if (dynamic_fault("bcachefs:add:super_realloc")) if (dynamic_fault("bcachefs:add:super_realloc"))
return -ENOMEM; return -BCH_ERR_ENOMEM_sb_realloc_injected;
if (sb->have_bio) { if (sb->have_bio) {
unsigned nr_bvecs = DIV_ROUND_UP(new_buffer_size, PAGE_SIZE); unsigned nr_bvecs = DIV_ROUND_UP(new_buffer_size, PAGE_SIZE);
bio = bio_kmalloc(nr_bvecs, GFP_KERNEL); bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
if (!bio) if (!bio)
return -ENOMEM; return -BCH_ERR_ENOMEM_sb_bio_realloc;
bio_init(bio, NULL, bio->bi_inline_vecs, nr_bvecs, 0); bio_init(bio, NULL, bio->bi_inline_vecs, nr_bvecs, 0);
@ -153,7 +153,7 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO); new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO);
if (!new_sb) if (!new_sb)
return -ENOMEM; return -BCH_ERR_ENOMEM_sb_buf_realloc;
sb->sb = new_sb; sb->sb = new_sb;
sb->buffer_size = new_buffer_size; sb->buffer_size = new_buffer_size;
@ -559,8 +559,9 @@ reread:
} }
if (bytes > sb->buffer_size) { if (bytes > sb->buffer_size) {
if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s))) ret = bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s));
return -ENOMEM; if (ret)
return ret;
goto reread; goto reread;
} }

View File

@ -647,7 +647,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO); c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
if (!c) { if (!c) {
c = ERR_PTR(-ENOMEM); c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
goto out; goto out;
} }
@ -737,7 +737,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
strscpy(c->name, name.buf, sizeof(c->name)); strscpy(c->name, name.buf, sizeof(c->name));
printbuf_exit(&name); printbuf_exit(&name);
ret = name.allocation_failure ? -ENOMEM : 0; ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
if (ret) if (ret)
goto err; goto err;
@ -801,7 +801,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) || mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048) ||
!(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits, !(c->unused_inode_hints = kcalloc(1U << c->inode_shard_bits,
sizeof(u64), GFP_KERNEL))) { sizeof(u64), GFP_KERNEL))) {
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_fs_other_alloc;
goto err; goto err;
} }
@ -1182,7 +1182,7 @@ out:
err: err:
if (ca) if (ca)
bch2_dev_free(ca); bch2_dev_free(ca);
ret = -ENOMEM; ret = -BCH_ERR_ENOMEM_dev_alloc;
goto out; goto out;
} }