Update bcachefs sources to a41cc7750fb8 bcachefs: Add flags to subvolume_to_text()

This commit is contained in:
Kent Overstreet 2025-06-02 17:24:53 -04:00
parent 315508358d
commit 838bbd755e
67 changed files with 987 additions and 750 deletions

View File

@ -1 +1 @@
d316ba08b04582f7dfc7d66b5c97cb50bf863a1c
a41cc7750fb8af26a79323542de3d8244fe90bfc

View File

@ -21,7 +21,6 @@
#include "error.h"
#include "lru.h"
#include "recovery.h"
#include "trace.h"
#include "varint.h"
#include <linux/kthread.h>
@ -866,7 +865,7 @@ int bch2_trigger_alloc(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
if (!ca)
return -BCH_ERR_trigger_alloc;
return bch_err_throw(c, trigger_alloc);
struct bch_alloc_v4 old_a_convert;
const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
@ -1045,7 +1044,7 @@ fsck_err:
invalid_bucket:
bch2_fs_inconsistent(c, "reference to invalid bucket\n%s",
(bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
ret = -BCH_ERR_trigger_alloc;
ret = bch_err_throw(c, trigger_alloc);
goto err;
}
@ -1459,7 +1458,7 @@ delete:
ret = bch2_btree_bit_mod_iter(trans, iter, false) ?:
bch2_trans_commit(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc) ?:
-BCH_ERR_transaction_restart_commit;
bch_err_throw(c, transaction_restart_commit);
goto out;
} else {
/*
@ -1782,13 +1781,14 @@ int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress)
{
struct bch_fs *c = ca->fs;
int ret;
mutex_lock(&ca->discard_buckets_in_flight_lock);
struct discard_in_flight *i =
darray_find_p(ca->discard_buckets_in_flight, i, i->bucket == bucket);
if (i) {
ret = -BCH_ERR_EEXIST_discard_in_flight_add;
ret = bch_err_throw(c, EEXIST_discard_in_flight_add);
goto out;
}

View File

@ -227,7 +227,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c,
track_event_change(&c->times[BCH_TIME_blocked_allocate_open_bucket], true);
spin_unlock(&c->freelist_lock);
return ERR_PTR(-BCH_ERR_open_buckets_empty);
return ERR_PTR(bch_err_throw(c, open_buckets_empty));
}
/* Recheck under lock: */
@ -533,7 +533,7 @@ again:
track_event_change(&c->times[BCH_TIME_blocked_allocate], true);
ob = ERR_PTR(-BCH_ERR_freelist_empty);
ob = ERR_PTR(bch_err_throw(c, freelist_empty));
goto err;
}
@ -558,7 +558,7 @@ alloc:
}
err:
if (!ob)
ob = ERR_PTR(-BCH_ERR_no_buckets_found);
ob = ERR_PTR(bch_err_throw(c, no_buckets_found));
if (!IS_ERR(ob))
ob->data_type = req->data_type;
@ -709,7 +709,7 @@ inline int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
struct closure *cl)
{
struct bch_fs *c = trans->c;
int ret = -BCH_ERR_insufficient_devices;
int ret = 0;
BUG_ON(req->nr_effective >= req->nr_replicas);
@ -738,13 +738,16 @@ inline int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
continue;
}
if (add_new_bucket(c, req, ob)) {
ret = 0;
ret = add_new_bucket(c, req, ob);
if (ret)
break;
}
}
return ret;
if (ret == 1)
return 0;
if (ret)
return ret;
return bch_err_throw(c, insufficient_devices);
}
/* Allocate from stripes: */
@ -1373,11 +1376,11 @@ err:
goto retry;
if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
ret = -BCH_ERR_bucket_alloc_blocked;
ret = bch_err_throw(c, bucket_alloc_blocked);
if (cl && !(flags & BCH_WRITE_alloc_nowait) &&
bch2_err_matches(ret, BCH_ERR_freelist_empty))
ret = -BCH_ERR_bucket_alloc_blocked;
ret = bch_err_throw(c, bucket_alloc_blocked);
return ret;
}

View File

@ -142,7 +142,7 @@ static noinline int backpointer_mod_err(struct btree_trans *trans,
}
if (!will_check && __bch2_inconsistent_error(c, &buf))
ret = -BCH_ERR_erofs_unfixed_errors;
ret = bch_err_throw(c, erofs_unfixed_errors);
bch_err(c, "%s", buf.buf);
printbuf_exit(&buf);
@ -295,7 +295,7 @@ static struct btree *__bch2_backpointer_get_node(struct btree_trans *trans,
return b;
if (btree_node_will_make_reachable(b)) {
b = ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node);
b = ERR_PTR(bch_err_throw(c, backpointer_to_overwritten_btree_node));
} else {
int ret = backpointer_target_not_found(trans, bp, bkey_i_to_s_c(&b->key),
last_flushed, commit);
@ -353,7 +353,7 @@ static struct bkey_s_c __bch2_backpointer_get_key(struct btree_trans *trans,
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
} else {
struct btree *b = __bch2_backpointer_get_node(trans, bp, iter, last_flushed, commit);
if (b == ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node))
if (b == ERR_PTR(bch_err_throw(c, backpointer_to_overwritten_btree_node)))
return bkey_s_c_null;
if (IS_ERR_OR_NULL(b))
return ((struct bkey_s_c) { .k = ERR_CAST(b) });
@ -651,7 +651,7 @@ check_existing_bp:
prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, other_extent);
bch_err(c, "%s", buf.buf);
ret = -BCH_ERR_fsck_repair_unimplemented;
ret = bch_err_throw(c, fsck_repair_unimplemented);
goto err;
missing:
printbuf_reset(&buf);
@ -953,7 +953,7 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b
sectors[ALLOC_cached] > a->cached_sectors ||
sectors[ALLOC_stripe] > a->stripe_sectors) {
ret = check_bucket_backpointers_to_extents(trans, ca, alloc_k.k->p) ?:
-BCH_ERR_transaction_restart_nested;
bch_err_throw(c, transaction_restart_nested);
goto err;
}
@ -1351,7 +1351,7 @@ static int bch2_bucket_bitmap_set(struct bch_dev *ca, struct bucket_bitmap *b, u
b->buckets = kvcalloc(BITS_TO_LONGS(ca->mi.nbuckets),
sizeof(unsigned long), GFP_KERNEL);
if (!b->buckets)
return -BCH_ERR_ENOMEM_backpointer_mismatches_bitmap;
return bch_err_throw(ca->fs, ENOMEM_backpointer_mismatches_bitmap);
}
b->nr += !__test_and_set_bit(bit, b->buckets);
@ -1360,7 +1360,8 @@ static int bch2_bucket_bitmap_set(struct bch_dev *ca, struct bucket_bitmap *b, u
return 0;
}
int bch2_bucket_bitmap_resize(struct bucket_bitmap *b, u64 old_size, u64 new_size)
int bch2_bucket_bitmap_resize(struct bch_dev *ca, struct bucket_bitmap *b,
u64 old_size, u64 new_size)
{
scoped_guard(mutex, &b->lock) {
if (!b->buckets)
@ -1369,7 +1370,7 @@ int bch2_bucket_bitmap_resize(struct bucket_bitmap *b, u64 old_size, u64 new_siz
unsigned long *n = kvcalloc(BITS_TO_LONGS(new_size),
sizeof(unsigned long), GFP_KERNEL);
if (!n)
return -BCH_ERR_ENOMEM_backpointer_mismatches_bitmap;
return bch_err_throw(ca->fs, ENOMEM_backpointer_mismatches_bitmap);
memcpy(n, b->buckets,
BITS_TO_LONGS(min(old_size, new_size)) * sizeof(unsigned long));

View File

@ -194,7 +194,7 @@ static inline bool bch2_bucket_bitmap_test(struct bucket_bitmap *b, u64 i)
return bitmap && test_bit(i, bitmap);
}
int bch2_bucket_bitmap_resize(struct bucket_bitmap *, u64, u64);
int bch2_bucket_bitmap_resize(struct bch_dev *, struct bucket_bitmap *, u64, u64);
void bch2_bucket_bitmap_free(struct bucket_bitmap *);
#endif /* _BCACHEFS_BACKPOINTERS_BACKGROUND_H */

View File

@ -183,6 +183,16 @@
#define pr_fmt(fmt) "%s() " fmt "\n", __func__
#endif
#ifdef CONFIG_BCACHEFS_DEBUG
#define ENUMERATED_REF_DEBUG
#endif
#ifndef dynamic_fault
#define dynamic_fault(...) 0
#endif
#define race_fault(...) dynamic_fault("bcachefs:race")
#include <linux/backing-dev-defs.h>
#include <linux/bug.h>
#include <linux/bio.h>
@ -219,15 +229,30 @@
#include "time_stats.h"
#include "util.h"
#ifdef CONFIG_BCACHEFS_DEBUG
#define ENUMERATED_REF_DEBUG
#endif
#include "alloc_types.h"
#include "async_objs_types.h"
#include "btree_gc_types.h"
#include "btree_types.h"
#include "btree_node_scan_types.h"
#include "btree_write_buffer_types.h"
#include "buckets_types.h"
#include "buckets_waiting_for_journal_types.h"
#include "clock_types.h"
#include "disk_groups_types.h"
#include "ec_types.h"
#include "enumerated_ref_types.h"
#include "journal_types.h"
#include "keylist_types.h"
#include "quota_types.h"
#include "rebalance_types.h"
#include "recovery_passes_types.h"
#include "replicas_types.h"
#include "sb-members_types.h"
#include "subvolume_types.h"
#include "super_types.h"
#include "thread_with_file_types.h"
#ifndef dynamic_fault
#define dynamic_fault(...) 0
#endif
#define race_fault(...) dynamic_fault("bcachefs:race")
#include "trace.h"
#define count_event(_c, _name) this_cpu_inc((_c)->counters[BCH_COUNTER_##_name])
@ -380,6 +405,14 @@ do { \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
static inline int __bch2_err_trace(struct bch_fs *c, int err)
{
trace_error_throw(c, err, _THIS_IP_);
return err;
}
#define bch_err_throw(_c, _err) __bch2_err_trace(_c, -BCH_ERR_##_err)
/* Parameters that are useful for debugging, but should always be compiled in: */
#define BCH_DEBUG_PARAMS_ALWAYS() \
BCH_DEBUG_PARAM(key_merging_disabled, \
@ -486,29 +519,6 @@ enum bch_time_stats {
BCH_TIME_STAT_NR
};
#include "alloc_types.h"
#include "async_objs_types.h"
#include "btree_gc_types.h"
#include "btree_types.h"
#include "btree_node_scan_types.h"
#include "btree_write_buffer_types.h"
#include "buckets_types.h"
#include "buckets_waiting_for_journal_types.h"
#include "clock_types.h"
#include "disk_groups_types.h"
#include "ec_types.h"
#include "enumerated_ref_types.h"
#include "journal_types.h"
#include "keylist_types.h"
#include "quota_types.h"
#include "rebalance_types.h"
#include "recovery_passes_types.h"
#include "replicas_types.h"
#include "sb-members_types.h"
#include "subvolume_types.h"
#include "super_types.h"
#include "thread_with_file_types.h"
/* Number of nodes btree coalesce will try to coalesce at once */
#define GC_MERGE_NODES 4U

View File

@ -149,7 +149,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
b->data = kvmalloc(btree_buf_bytes(b), gfp);
if (!b->data)
return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
return bch_err_throw(c, ENOMEM_btree_node_mem_alloc);
#ifdef __KERNEL__
b->aux_data = kvmalloc(btree_aux_data_bytes(b), gfp);
#else
@ -162,7 +162,7 @@ static int btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
if (!b->aux_data) {
kvfree(b->data);
b->data = NULL;
return -BCH_ERR_ENOMEM_btree_node_mem_alloc;
return bch_err_throw(c, ENOMEM_btree_node_mem_alloc);
}
return 0;
@ -353,21 +353,21 @@ static int __btree_node_reclaim_checks(struct bch_fs *c, struct btree *b,
if (btree_node_noevict(b)) {
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_noevict]++;
return -BCH_ERR_ENOMEM_btree_node_reclaim;
return bch_err_throw(c, ENOMEM_btree_node_reclaim);
}
if (btree_node_write_blocked(b)) {
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_blocked]++;
return -BCH_ERR_ENOMEM_btree_node_reclaim;
return bch_err_throw(c, ENOMEM_btree_node_reclaim);
}
if (btree_node_will_make_reachable(b)) {
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_will_make_reachable]++;
return -BCH_ERR_ENOMEM_btree_node_reclaim;
return bch_err_throw(c, ENOMEM_btree_node_reclaim);
}
if (btree_node_dirty(b)) {
if (!flush) {
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_dirty]++;
return -BCH_ERR_ENOMEM_btree_node_reclaim;
return bch_err_throw(c, ENOMEM_btree_node_reclaim);
}
if (locked) {
@ -393,7 +393,7 @@ static int __btree_node_reclaim_checks(struct bch_fs *c, struct btree *b,
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_read_in_flight]++;
else if (btree_node_write_in_flight(b))
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_write_in_flight]++;
return -BCH_ERR_ENOMEM_btree_node_reclaim;
return bch_err_throw(c, ENOMEM_btree_node_reclaim);
}
if (locked)
@ -424,13 +424,13 @@ retry_unlocked:
if (!six_trylock_intent(&b->c.lock)) {
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_lock_intent]++;
return -BCH_ERR_ENOMEM_btree_node_reclaim;
return bch_err_throw(c, ENOMEM_btree_node_reclaim);
}
if (!six_trylock_write(&b->c.lock)) {
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_lock_write]++;
six_unlock_intent(&b->c.lock);
return -BCH_ERR_ENOMEM_btree_node_reclaim;
return bch_err_throw(c, ENOMEM_btree_node_reclaim);
}
/* recheck under lock */
@ -682,7 +682,7 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
return 0;
err:
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
return bch_err_throw(c, ENOMEM_fs_btree_cache_init);
}
void bch2_fs_btree_cache_init_early(struct btree_cache *bc)
@ -727,7 +727,7 @@ int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure
if (!cl) {
trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
return -BCH_ERR_ENOMEM_btree_cache_cannibalize_lock;
return bch_err_throw(c, ENOMEM_btree_cache_cannibalize_lock);
}
closure_wait(&bc->alloc_wait, cl);
@ -741,7 +741,7 @@ int bch2_btree_cache_cannibalize_lock(struct btree_trans *trans, struct closure
}
trace_and_count(c, btree_cache_cannibalize_lock_fail, trans);
return -BCH_ERR_btree_cache_cannibalize_lock_blocked;
return bch_err_throw(c, btree_cache_cannibalize_lock_blocked);
success:
trace_and_count(c, btree_cache_cannibalize_lock, trans);

View File

@ -150,7 +150,7 @@ static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new)
return -BCH_ERR_ENOMEM_gc_repair_key;
return bch_err_throw(c, ENOMEM_gc_repair_key);
btree_ptr_to_v2(b, new);
b->data->min_key = new_min;
@ -190,7 +190,7 @@ static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
if (!new)
return -BCH_ERR_ENOMEM_gc_repair_key;
return bch_err_throw(c, ENOMEM_gc_repair_key);
btree_ptr_to_v2(b, new);
b->data->max_key = new_max;
@ -476,7 +476,8 @@ again:
if (ret)
goto err;
ret = bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan);
ret = lockrestart_do(trans,
bch2_btree_repair_topology_recurse(trans, cur, pulled_from_scan));
six_unlock_read(&cur->c.lock);
cur = NULL;
@ -522,46 +523,60 @@ fsck_err:
return ret;
}
static int bch2_check_root(struct btree_trans *trans, enum btree_id i,
bool *reconstructed_root)
{
struct bch_fs *c = trans->c;
struct btree_root *r = bch2_btree_id_root(c, i);
struct printbuf buf = PRINTBUF;
int ret = 0;
bch2_btree_id_to_text(&buf, i);
if (r->error) {
bch_info(c, "btree root %s unreadable, must recover from scan", buf.buf);
r->alive = false;
r->error = 0;
if (!bch2_btree_has_scanned_nodes(c, i)) {
__fsck_err(trans,
FSCK_CAN_FIX|(!btree_id_important(i) ? FSCK_AUTOFIX : 0),
btree_root_unreadable_and_scan_found_nothing,
"no nodes found for btree %s, continue?", buf.buf);
bch2_btree_root_alloc_fake_trans(trans, i, 0);
} else {
bch2_btree_root_alloc_fake_trans(trans, i, 1);
bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
if (ret)
goto err;
}
*reconstructed_root = true;
}
err:
fsck_err:
printbuf_exit(&buf);
return ret;
}
int bch2_check_topology(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
struct bpos pulled_from_scan = POS_MIN;
struct printbuf buf = PRINTBUF;
int ret = 0;
bch2_trans_srcu_unlock(trans);
for (unsigned i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
struct btree_root *r = bch2_btree_id_root(c, i);
bool reconstructed_root = false;
recover:
ret = lockrestart_do(trans, bch2_check_root(trans, i, &reconstructed_root));
if (ret)
break;
printbuf_reset(&buf);
bch2_btree_id_to_text(&buf, i);
if (r->error) {
reconstruct_root:
bch_info(c, "btree root %s unreadable, must recover from scan", buf.buf);
r->alive = false;
r->error = 0;
if (!bch2_btree_has_scanned_nodes(c, i)) {
__fsck_err(trans,
FSCK_CAN_FIX|(!btree_id_important(i) ? FSCK_AUTOFIX : 0),
btree_root_unreadable_and_scan_found_nothing,
"no nodes found for btree %s, continue?", buf.buf);
bch2_btree_root_alloc_fake_trans(trans, i, 0);
} else {
bch2_btree_root_alloc_fake_trans(trans, i, 1);
bch2_shoot_down_journal_keys(c, i, 1, BTREE_MAX_DEPTH, POS_MIN, SPOS_MAX);
ret = bch2_get_scanned_nodes(c, i, 0, POS_MIN, SPOS_MAX);
if (ret)
break;
}
reconstructed_root = true;
}
struct btree_root *r = bch2_btree_id_root(c, i);
struct btree *b = r->b;
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
@ -575,17 +590,21 @@ reconstruct_root:
r->b = NULL;
if (!reconstructed_root)
goto reconstruct_root;
if (!reconstructed_root) {
r->error = -EIO;
goto recover;
}
struct printbuf buf = PRINTBUF;
bch2_btree_id_to_text(&buf, i);
bch_err(c, "empty btree root %s", buf.buf);
printbuf_exit(&buf);
bch2_btree_root_alloc_fake_trans(trans, i, 0);
r->alive = false;
ret = 0;
}
}
fsck_err:
printbuf_exit(&buf);
bch2_trans_put(trans);
return ret;
}
@ -935,7 +954,7 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
ret = genradix_prealloc(&ca->buckets_gc, ca->mi.nbuckets, GFP_KERNEL);
if (ret) {
bch2_dev_put(ca);
ret = -BCH_ERR_ENOMEM_gc_alloc_start;
ret = bch_err_throw(c, ENOMEM_gc_alloc_start);
break;
}
}
@ -1180,7 +1199,7 @@ int bch2_gc_gens(struct bch_fs *c)
ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
if (!ca->oldest_gen) {
bch2_dev_put(ca);
ret = -BCH_ERR_ENOMEM_gc_gens;
ret = bch_err_throw(c, ENOMEM_gc_gens);
goto err;
}

View File

@ -557,7 +557,7 @@ static int __btree_err(int ret,
const char *fmt, ...)
{
if (c->recovery.curr_pass == BCH_RECOVERY_PASS_scan_for_btree_nodes)
return -BCH_ERR_fsck_fix;
return bch_err_throw(c, fsck_fix);
bool have_retry = false;
int ret2;
@ -572,9 +572,9 @@ static int __btree_err(int ret,
}
if (!have_retry && ret == -BCH_ERR_btree_node_read_err_want_retry)
ret = -BCH_ERR_btree_node_read_err_fixable;
ret = bch_err_throw(c, btree_node_read_err_fixable);
if (!have_retry && ret == -BCH_ERR_btree_node_read_err_must_retry)
ret = -BCH_ERR_btree_node_read_err_bad_node;
ret = bch_err_throw(c, btree_node_read_err_bad_node);
bch2_sb_error_count(c, err_type);
@ -609,7 +609,7 @@ static int __btree_err(int ret,
}
if (!have_retry)
ret = -BCH_ERR_fsck_fix;
ret = bch_err_throw(c, fsck_fix);
goto out;
case -BCH_ERR_btree_node_read_err_bad_node:
prt_str(&out, ", ");
@ -638,7 +638,7 @@ static int __btree_err(int ret,
}
if (!have_retry)
ret = -BCH_ERR_fsck_fix;
ret = bch_err_throw(c, fsck_fix);
goto out;
case -BCH_ERR_btree_node_read_err_bad_node:
prt_str(&out, ", ");
@ -1687,7 +1687,7 @@ static int btree_node_read_all_replicas(struct bch_fs *c, struct btree *b, bool
ra = kzalloc(sizeof(*ra), GFP_NOFS);
if (!ra)
return -BCH_ERR_ENOMEM_btree_node_read_all_replicas;
return bch_err_throw(c, ENOMEM_btree_node_read_all_replicas);
closure_init(&ra->cl, NULL);
ra->c = c;
@ -1869,7 +1869,7 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
bch2_btree_node_hash_remove(&c->btree_cache, b);
mutex_unlock(&c->btree_cache.lock);
ret = -BCH_ERR_btree_node_read_error;
ret = bch_err_throw(c, btree_node_read_error);
goto err;
}
@ -2019,7 +2019,7 @@ int bch2_btree_node_scrub(struct btree_trans *trans,
struct bch_fs *c = trans->c;
if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_node_scrub))
return -BCH_ERR_erofs_no_writes;
return bch_err_throw(c, erofs_no_writes);
struct extent_ptr_decoded pick;
int ret = bch2_bkey_pick_read_device(c, k, NULL, &pick, dev);
@ -2029,7 +2029,7 @@ int bch2_btree_node_scrub(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_get_ioref(c, pick.ptr.dev, READ,
BCH_DEV_READ_REF_btree_node_scrub);
if (!ca) {
ret = -BCH_ERR_device_offline;
ret = bch_err_throw(c, device_offline);
goto err;
}
@ -2166,7 +2166,7 @@ static void btree_node_write_work(struct work_struct *work)
bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key))) {
ret = -BCH_ERR_btree_node_write_all_failed;
ret = bch_err_throw(c, btree_node_write_all_failed);
goto err;
}

View File

@ -938,7 +938,7 @@ static noinline_for_stack int btree_node_missing_err(struct btree_trans *trans,
bch2_fs_fatal_error(c, "%s", buf.buf);
printbuf_exit(&buf);
return -BCH_ERR_btree_need_topology_repair;
return bch_err_throw(c, btree_need_topology_repair);
}
static __always_inline int btree_path_down(struct btree_trans *trans,
@ -1006,7 +1006,7 @@ static int bch2_btree_path_traverse_all(struct btree_trans *trans)
int ret = 0;
if (trans->in_traverse_all)
return -BCH_ERR_transaction_restart_in_traverse_all;
return bch_err_throw(c, transaction_restart_in_traverse_all);
trans->in_traverse_all = true;
retry_all:

View File

@ -963,16 +963,6 @@ struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_trans *,
_p; \
})
#define bch2_trans_run(_c, _do) \
({ \
struct btree_trans *trans = bch2_trans_get(_c); \
int _ret = (_do); \
bch2_trans_put(trans); \
_ret; \
})
#define bch2_trans_do(_c, _do) bch2_trans_run(_c, lockrestart_do(trans, _do))
struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
void bch2_trans_put(struct btree_trans *);
@ -990,6 +980,27 @@ unsigned bch2_trans_get_fn_idx(const char *);
__bch2_trans_get(_c, trans_fn_idx); \
})
/*
* We don't use DEFINE_CLASS() because using a function for the constructor
* breaks bch2_trans_get()'s use of __func__
*/
typedef struct btree_trans * class_btree_trans_t;
static inline void class_btree_trans_destructor(struct btree_trans **p)
{
struct btree_trans *trans = *p;
bch2_trans_put(trans);
}
#define class_btree_trans_constructor(_c) bch2_trans_get(_c)
#define bch2_trans_run(_c, _do) \
({ \
CLASS(btree_trans, trans)(_c); \
(_do); \
})
#define bch2_trans_do(_c, _do) bch2_trans_run(_c, lockrestart_do(trans, _do))
void bch2_btree_trans_to_text(struct printbuf *, struct btree_trans *);
void bch2_fs_btree_iter_exit(struct bch_fs *);

View File

@ -292,7 +292,7 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
if (!new_keys.data) {
bch_err(c, "%s: error allocating new key array (size %zu)",
__func__, new_keys.size);
return -BCH_ERR_ENOMEM_journal_key_insert;
return bch_err_throw(c, ENOMEM_journal_key_insert);
}
/* Since @keys was full, there was no gap: */
@ -331,7 +331,7 @@ int bch2_journal_key_insert(struct bch_fs *c, enum btree_id id,
n = kmalloc(bkey_bytes(&k->k), GFP_KERNEL);
if (!n)
return -BCH_ERR_ENOMEM_journal_key_insert;
return bch_err_throw(c, ENOMEM_journal_key_insert);
bkey_copy(n, k);
ret = bch2_journal_key_insert_take(c, id, level, n);
@ -736,7 +736,7 @@ int bch2_journal_keys_sort(struct bch_fs *c)
if (keys->nr * 8 > keys->size * 7) {
bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu keys at seq %llu",
keys->nr, keys->size, nr_read, le64_to_cpu(i->j.seq));
return -BCH_ERR_ENOMEM_journal_keys_sort;
return bch_err_throw(c, ENOMEM_journal_keys_sort);
}
BUG_ON(darray_push(keys, n));

View File

@ -238,7 +238,7 @@ static int btree_key_cache_create(struct btree_trans *trans,
if (unlikely(!ck)) {
bch_err(c, "error allocating memory for key cache item, btree %s",
bch2_btree_id_str(ck_path->btree_id));
return -BCH_ERR_ENOMEM_btree_key_cache_create;
return bch_err_throw(c, ENOMEM_btree_key_cache_create);
}
}
@ -256,7 +256,7 @@ static int btree_key_cache_create(struct btree_trans *trans,
if (unlikely(!new_k)) {
bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
bch2_btree_id_str(ck->key.btree_id), key_u64s);
ret = -BCH_ERR_ENOMEM_btree_key_cache_fill;
ret = bch_err_throw(c, ENOMEM_btree_key_cache_fill);
} else if (ret) {
kfree(new_k);
goto err;
@ -822,20 +822,20 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *bc)
bc->nr_pending = alloc_percpu(size_t);
if (!bc->nr_pending)
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
return bch_err_throw(c, ENOMEM_fs_btree_cache_init);
if (rcu_pending_init(&bc->pending[0], &c->btree_trans_barrier, __bkey_cached_free) ||
rcu_pending_init(&bc->pending[1], &c->btree_trans_barrier, __bkey_cached_free))
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
return bch_err_throw(c, ENOMEM_fs_btree_cache_init);
if (rhashtable_init(&bc->table, &bch2_btree_key_cache_params))
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
return bch_err_throw(c, ENOMEM_fs_btree_cache_init);
bc->table_init_done = true;
shrink = shrinker_alloc(0, "%s-btree_key_cache", c->name);
if (!shrink)
return -BCH_ERR_ENOMEM_fs_btree_cache_init;
return bch_err_throw(c, ENOMEM_fs_btree_cache_init);
bc->shrink = shrink;
shrink->count_objects = bch2_btree_key_cache_count;
shrink->scan_objects = bch2_btree_key_cache_scan;

View File

@ -122,55 +122,22 @@ static int found_btree_node_cmp_cookie(const void *_l, const void *_r)
* Given two found btree nodes, if their sequence numbers are equal, take the
* one that's readable:
*/
static int found_btree_node_cmp_time(struct bch_fs *c,
struct found_btree_node *l,
struct found_btree_node *r)
static int found_btree_node_cmp_time(const struct found_btree_node *l,
const struct found_btree_node *r)
{
int cmp1 = cmp_int(l->seq, r->seq);
int cmp2 = l->journal_seq && r->journal_seq
? cmp_int(l->journal_seq, r->journal_seq)
: 0;
int cmp = cmp2 ?: cmp1;
if (!cmp || (cmp1 && cmp2 && cmp1 != cmp2)) {
struct printbuf buf = PRINTBUF;
bch2_log_msg_start(c, &buf);
if (cmp)
prt_printf(&buf, "found btree nodes in scan where seq, journal seq disagree on node age\n");
else
prt_printf(&buf, "found btree nodes where we don't know which is newer\n");
found_btree_node_to_text(&buf, c, l);
found_btree_node_to_text(&buf, c, r);
bch2_print_str(c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
}
return cmp2 ?: cmp1;
}
static int found_btree_node_cmp_time_nowarn(const struct found_btree_node *l,
const struct found_btree_node *r)
{
int cmp1 = cmp_int(l->seq, r->seq);
int cmp2 = l->journal_seq && r->journal_seq
? cmp_int(l->journal_seq, r->journal_seq)
: 0;
return cmp2 ?: cmp1;
return cmp2 ?: cmp1;
return cmp_int(l->seq, r->seq) ?:
cmp_int(l->journal_seq, r->journal_seq);
}
static int found_btree_node_cmp_pos(const void *_l, const void *_r)
{
const struct found_btree_node *l = (void *) _l;
const struct found_btree_node *r = (void *) _r;
const struct found_btree_node *l = _l;
const struct found_btree_node *r = _r;
return cmp_int(l->btree_id, r->btree_id) ?:
-cmp_int(l->level, r->level) ?:
bpos_cmp(l->min_key, r->min_key) ?:
-found_btree_node_cmp_time_nowarn(l, r);
-found_btree_node_cmp_time(l, r);
}
static inline bool found_btree_node_cmp_pos_less(const void *l, const void *r, void *arg)
@ -368,7 +335,7 @@ static int handle_overwrites(struct bch_fs *c,
while ((r = min_heap_peek(nodes_heap)) &&
nodes_overlap(l, r)) {
int cmp = found_btree_node_cmp_time(c, l, r);
int cmp = found_btree_node_cmp_time(l, r);
if (cmp > 0) {
if (bpos_cmp(l->max_key, r->max_key) >= 0)

View File

@ -376,7 +376,7 @@ static inline int btree_key_can_insert(struct btree_trans *trans,
struct btree *b, unsigned u64s)
{
if (!bch2_btree_node_insert_fits(b, u64s))
return -BCH_ERR_btree_insert_btree_node_full;
return bch_err_throw(trans->c, btree_insert_btree_node_full);
return 0;
}
@ -394,9 +394,10 @@ btree_key_can_insert_cached_slowpath(struct btree_trans *trans, unsigned flags,
new_k = kmalloc(new_u64s * sizeof(u64), GFP_KERNEL);
if (!new_k) {
bch_err(trans->c, "error allocating memory for key cache key, btree %s u64s %u",
struct bch_fs *c = trans->c;
bch_err(c, "error allocating memory for key cache key, btree %s u64s %u",
bch2_btree_id_str(path->btree_id), new_u64s);
return -BCH_ERR_ENOMEM_btree_key_cache_insert;
return bch_err_throw(c, ENOMEM_btree_key_cache_insert);
}
ret = bch2_trans_relock(trans) ?:
@ -432,7 +433,7 @@ static int btree_key_can_insert_cached(struct btree_trans *trans, unsigned flags
if (watermark < BCH_WATERMARK_reclaim &&
!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
bch2_btree_key_cache_must_wait(c))
return -BCH_ERR_btree_insert_need_journal_reclaim;
return bch_err_throw(c, btree_insert_need_journal_reclaim);
/*
* bch2_varint_decode can read past the end of the buffer by at most 7
@ -894,7 +895,7 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
*/
if ((flags & BCH_TRANS_COMMIT_journal_reclaim) &&
watermark < BCH_WATERMARK_reclaim) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
ret = bch_err_throw(c, journal_reclaim_would_deadlock);
goto out;
}
@ -1024,7 +1025,7 @@ int __bch2_trans_commit(struct btree_trans *trans, unsigned flags)
if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags)))
ret = do_bch2_trans_commit_to_journal_replay(trans);
else
ret = -BCH_ERR_erofs_trans_commit;
ret = bch_err_throw(c, erofs_trans_commit);
goto out_reset;
}
@ -1106,7 +1107,7 @@ err:
* restart:
*/
if (flags & BCH_TRANS_COMMIT_no_journal_res) {
ret = -BCH_ERR_transaction_restart_nested;
ret = bch_err_throw(c, transaction_restart_nested);
goto out;
}

View File

@ -587,7 +587,7 @@ int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
BUG_ON(k.k->type != KEY_TYPE_deleted);
if (bkey_gt(k.k->p, end)) {
ret = -BCH_ERR_ENOSPC_btree_slot;
ret = bch_err_throw(trans->c, ENOSPC_btree_slot);
goto err;
}

View File

@ -684,12 +684,31 @@ static void btree_update_nodes_written(struct btree_update *as)
/*
* Wait for any in flight writes to finish before we free the old nodes
* on disk:
* on disk. But we haven't pinned those old nodes in the btree cache,
* they might have already been evicted.
*
* The update we're completing deleted references to those nodes from the
* btree, so we know if they've been evicted they can't be pulled back in.
* We just have to check if the nodes we have pointers to are still those
* old nodes, and haven't been reused.
*
* This can't be done locklessly because the data buffer might have been
* vmalloc allocated, and they're not RCU freed. We also need the
* __no_kmsan_checks annotation because even with the btree node read
* lock, nothing tells us that the data buffer has been initialized (if
* the btree node has been reused for a different node, and the data
* buffer swapped for a new data buffer).
*/
for (i = 0; i < as->nr_old_nodes; i++) {
b = as->old_nodes[i];
if (btree_node_seq_matches(b, as->old_nodes_seq[i]))
bch2_trans_begin(trans);
btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
bool seq_matches = btree_node_seq_matches(b, as->old_nodes_seq[i]);
six_unlock_read(&b->c.lock);
bch2_trans_unlock_long(trans);
if (seq_matches)
wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight_inner,
TASK_UNINTERRUPTIBLE);
}
@ -1244,7 +1263,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
if (bch2_err_matches(ret, ENOSPC) &&
(flags & BCH_TRANS_COMMIT_journal_reclaim) &&
watermark < BCH_WATERMARK_reclaim) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
ret = bch_err_throw(c, journal_reclaim_would_deadlock);
goto err;
}
@ -2177,7 +2196,7 @@ static int get_iter_to_node(struct btree_trans *trans, struct btree_iter *iter,
if (btree_iter_path(trans, iter)->l[b->c.level].b != b) {
/* node has been freed: */
BUG_ON(!btree_node_dying(b));
ret = -BCH_ERR_btree_node_dying;
ret = bch_err_throw(trans->c, btree_node_dying);
goto err;
}
@ -2791,16 +2810,16 @@ int bch2_fs_btree_interior_update_init(struct bch_fs *c)
c->btree_interior_update_worker =
alloc_workqueue("btree_update", WQ_UNBOUND|WQ_MEM_RECLAIM, 8);
if (!c->btree_interior_update_worker)
return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
return bch_err_throw(c, ENOMEM_btree_interior_update_worker_init);
c->btree_node_rewrite_worker =
alloc_ordered_workqueue("btree_node_rewrite", WQ_UNBOUND);
if (!c->btree_node_rewrite_worker)
return -BCH_ERR_ENOMEM_btree_interior_update_worker_init;
return bch_err_throw(c, ENOMEM_btree_interior_update_worker_init);
if (mempool_init_kmalloc_pool(&c->btree_interior_update_pool, 1,
sizeof(struct btree_update)))
return -BCH_ERR_ENOMEM_btree_interior_update_pool_init;
return bch_err_throw(c, ENOMEM_btree_interior_update_pool_init);
return 0;
}

View File

@ -394,7 +394,7 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
bool accounting_accumulated = false;
do {
if (race_fault()) {
ret = -BCH_ERR_journal_reclaim_would_deadlock;
ret = bch_err_throw(c, journal_reclaim_would_deadlock);
break;
}
@ -633,7 +633,7 @@ int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
struct bch_fs *c = trans->c;
if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_btree_write_buffer))
return -BCH_ERR_erofs_no_writes;
return bch_err_throw(c, erofs_no_writes);
int ret = bch2_btree_write_buffer_flush_nocheck_rw(trans);
enumerated_ref_put(&c->writes, BCH_WRITE_REF_btree_write_buffer);
@ -676,7 +676,7 @@ int bch2_btree_write_buffer_maybe_flush(struct btree_trans *trans,
goto err;
bch2_bkey_buf_copy(last_flushed, c, tmp.k);
ret = -BCH_ERR_transaction_restart_write_buffer_flush;
ret = bch_err_throw(c, transaction_restart_write_buffer_flush);
}
err:
bch2_bkey_buf_exit(&tmp, c);

View File

@ -224,7 +224,7 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
switch (g->data_type) {
case BCH_DATA_sb:
bch_err(c, "btree and superblock in the same bucket - cannot repair");
ret = -BCH_ERR_fsck_repair_unimplemented;
ret = bch_err_throw(c, fsck_repair_unimplemented);
goto out;
case BCH_DATA_journal:
ret = bch2_dev_journal_bucket_delete(ca, PTR_BUCKET_NR(ca, &p.ptr));
@ -440,7 +440,7 @@ static int bucket_ref_update_err(struct btree_trans *trans, struct printbuf *buf
* us an error code for rewinding recovery
*/
if (!ret)
ret = -BCH_ERR_bucket_ref_update;
ret = bch_err_throw(c, bucket_ref_update);
} else {
/* Always ignore overwrite errors, so that deletion works */
ret = 0;
@ -632,7 +632,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_tryget(c, p.ptr.dev);
if (unlikely(!ca)) {
if (insert && p.ptr.dev != BCH_SB_MEMBER_INVALID)
ret = -BCH_ERR_trigger_pointer;
ret = bch_err_throw(c, trigger_pointer);
goto err;
}
@ -640,7 +640,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (!bucket_valid(ca, bucket.offset)) {
if (insert) {
bch2_dev_bucket_missing(ca, bucket.offset);
ret = -BCH_ERR_trigger_pointer;
ret = bch_err_throw(c, trigger_pointer);
}
goto err;
}
@ -662,7 +662,7 @@ static int bch2_trigger_pointer(struct btree_trans *trans,
if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n %s",
p.ptr.dev,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = -BCH_ERR_trigger_pointer;
ret = bch_err_throw(c, trigger_pointer);
goto err;
}
@ -688,6 +688,8 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
s64 sectors,
enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
if (flags & BTREE_TRIGGER_transactional) {
struct btree_iter iter;
struct bkey_i_stripe *s = bch2_bkey_get_mut_typed(trans, &iter,
@ -705,7 +707,7 @@ static int bch2_trigger_stripe_ptr(struct btree_trans *trans,
bch2_trans_inconsistent(trans,
"stripe pointer doesn't match stripe %llu",
(u64) p.ec.idx);
ret = -BCH_ERR_trigger_stripe_pointer;
ret = bch_err_throw(c, trigger_stripe_pointer);
goto err;
}
@ -725,13 +727,11 @@ err:
}
if (flags & BTREE_TRIGGER_gc) {
struct bch_fs *c = trans->c;
struct gc_stripe *m = genradix_ptr_alloc(&c->gc_stripes, p.ec.idx, GFP_KERNEL);
if (!m) {
bch_err(c, "error allocating memory for gc_stripes, idx %llu",
(u64) p.ec.idx);
return -BCH_ERR_ENOMEM_mark_stripe_ptr;
return bch_err_throw(c, ENOMEM_mark_stripe_ptr);
}
gc_stripe_lock(m);
@ -746,7 +746,7 @@ err:
__bch2_inconsistent_error(c, &buf);
bch2_print_str(c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
return -BCH_ERR_trigger_stripe_pointer;
return bch_err_throw(c, trigger_stripe_pointer);
}
m->block_sectors[p.ec.block] += sectors;
@ -1014,7 +1014,7 @@ static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
bch2_print_str(c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
if (!ret)
ret = -BCH_ERR_metadata_bucket_inconsistency;
ret = bch_err_throw(c, metadata_bucket_inconsistency);
goto err;
}
@ -1067,7 +1067,7 @@ static int bch2_mark_metadata_bucket(struct btree_trans *trans, struct bch_dev *
err_unlock:
bucket_unlock(g);
err:
return -BCH_ERR_metadata_bucket_inconsistency;
return bch_err_throw(c, metadata_bucket_inconsistency);
}
int bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
@ -1282,7 +1282,7 @@ recalculate:
ret = 0;
} else {
atomic64_set(&c->sectors_available, sectors_available);
ret = -BCH_ERR_ENOSPC_disk_reservation;
ret = bch_err_throw(c, ENOSPC_disk_reservation);
}
mutex_unlock(&c->sectors_available_lock);
@ -1311,7 +1311,7 @@ int bch2_buckets_nouse_alloc(struct bch_fs *c)
GFP_KERNEL|__GFP_ZERO);
if (!ca->buckets_nouse) {
bch2_dev_put(ca);
return -BCH_ERR_ENOMEM_buckets_nouse;
return bch_err_throw(c, ENOMEM_buckets_nouse);
}
}
@ -1336,12 +1336,12 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
lockdep_assert_held(&c->state_lock);
if (resize && ca->buckets_nouse)
return -BCH_ERR_no_resize_with_buckets_nouse;
return bch_err_throw(c, no_resize_with_buckets_nouse);
bucket_gens = bch2_kvmalloc(struct_size(bucket_gens, b, nbuckets),
GFP_KERNEL|__GFP_ZERO);
if (!bucket_gens) {
ret = -BCH_ERR_ENOMEM_bucket_gens;
ret = bch_err_throw(c, ENOMEM_bucket_gens);
goto err;
}
@ -1360,9 +1360,9 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
sizeof(bucket_gens->b[0]) * copy);
}
ret = bch2_bucket_bitmap_resize(&ca->bucket_backpointer_mismatch,
ret = bch2_bucket_bitmap_resize(ca, &ca->bucket_backpointer_mismatch,
ca->mi.nbuckets, nbuckets) ?:
bch2_bucket_bitmap_resize(&ca->bucket_backpointer_empty,
bch2_bucket_bitmap_resize(ca, &ca->bucket_backpointer_empty,
ca->mi.nbuckets, nbuckets);
rcu_assign_pointer(ca->bucket_gens, bucket_gens);
@ -1389,7 +1389,7 @@ int bch2_dev_buckets_alloc(struct bch_fs *c, struct bch_dev *ca)
{
ca->usage = alloc_percpu(struct bch_dev_usage_full);
if (!ca->usage)
return -BCH_ERR_ENOMEM_usage_init;
return bch_err_throw(c, ENOMEM_usage_init);
return bch2_dev_buckets_resize(c, ca, ca->mi.nbuckets);
}

View File

@ -108,7 +108,8 @@ int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
realloc:
n = kvmalloc(sizeof(*n) + (sizeof(n->d[0]) << new_bits), GFP_KERNEL);
if (!n) {
ret = -BCH_ERR_ENOMEM_buckets_waiting_for_journal_set;
struct bch_fs *c = container_of(b, struct bch_fs, buckets_waiting_for_journal);
ret = bch_err_throw(c, ENOMEM_buckets_waiting_for_journal_set);
goto out;
}

View File

@ -618,7 +618,7 @@ static long bch2_ioctl_disk_get_idx(struct bch_fs *c,
if (ca->dev == dev)
return ca->dev_idx;
return -BCH_ERR_ENOENT_dev_idx_not_found;
return bch_err_throw(c, ENOENT_dev_idx_not_found);
}
static long bch2_ioctl_disk_resize(struct bch_fs *c,

View File

@ -173,7 +173,7 @@ int bch2_encrypt(struct bch_fs *c, unsigned type,
if (bch2_fs_inconsistent_on(!c->chacha20_key_set,
c, "attempting to encrypt without encryption key"))
return -BCH_ERR_no_encryption_key;
return bch_err_throw(c, no_encryption_key);
bch2_chacha20(&c->chacha20_key, nonce, data, len);
return 0;
@ -262,7 +262,7 @@ int __bch2_encrypt_bio(struct bch_fs *c, unsigned type,
if (bch2_fs_inconsistent_on(!c->chacha20_key_set,
c, "attempting to encrypt without encryption key"))
return -BCH_ERR_no_encryption_key;
return bch_err_throw(c, no_encryption_key);
bch2_chacha20_init(chacha_state, &c->chacha20_key, nonce);
@ -375,7 +375,7 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
prt_str(&buf, ")");
WARN_RATELIMIT(1, "%s", buf.buf);
printbuf_exit(&buf);
return -BCH_ERR_recompute_checksum;
return bch_err_throw(c, recompute_checksum);
}
for (i = splits; i < splits + ARRAY_SIZE(splits); i++) {
@ -659,7 +659,7 @@ int bch2_enable_encryption(struct bch_fs *c, bool keyed)
crypt = bch2_sb_field_resize(&c->disk_sb, crypt,
sizeof(*crypt) / sizeof(u64));
if (!crypt) {
ret = -BCH_ERR_ENOSPC_sb_crypt;
ret = bch_err_throw(c, ENOSPC_sb_crypt);
goto err;
}

View File

@ -187,7 +187,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
__bch2_compression_types[crc.compression_type]))
ret = bch2_check_set_has_compressed_data(c, opt);
else
ret = -BCH_ERR_compression_workspace_not_initialized;
ret = bch_err_throw(c, compression_workspace_not_initialized);
if (ret)
goto err;
}
@ -200,7 +200,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
ret2 = LZ4_decompress_safe_partial(src_data.b, dst_data,
src_len, dst_len, dst_len);
if (ret2 != dst_len)
ret = -BCH_ERR_decompress_lz4;
ret = bch_err_throw(c, decompress_lz4);
break;
case BCH_COMPRESSION_TYPE_gzip: {
z_stream strm = {
@ -219,7 +219,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
mempool_free(workspace, workspace_pool);
if (ret2 != Z_STREAM_END)
ret = -BCH_ERR_decompress_gzip;
ret = bch_err_throw(c, decompress_gzip);
break;
}
case BCH_COMPRESSION_TYPE_zstd: {
@ -227,7 +227,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
size_t real_src_len = le32_to_cpup(src_data.b);
if (real_src_len > src_len - 4) {
ret = -BCH_ERR_decompress_zstd_src_len_bad;
ret = bch_err_throw(c, decompress_zstd_src_len_bad);
goto err;
}
@ -241,7 +241,7 @@ static int __bio_uncompress(struct bch_fs *c, struct bio *src,
mempool_free(workspace, workspace_pool);
if (ret2 != dst_len)
ret = -BCH_ERR_decompress_zstd;
ret = bch_err_throw(c, decompress_zstd);
break;
}
default:
@ -270,7 +270,7 @@ int bch2_bio_uncompress_inplace(struct bch_write_op *op,
bch2_write_op_error(op, op->pos.offset,
"extent too big to decompress (%u > %u)",
crc->uncompressed_size << 9, c->opts.encoded_extent_max);
return -BCH_ERR_decompress_exceeded_max_encoded_extent;
return bch_err_throw(c, decompress_exceeded_max_encoded_extent);
}
data = __bounce_alloc(c, dst_len, WRITE);
@ -314,7 +314,7 @@ int bch2_bio_uncompress(struct bch_fs *c, struct bio *src,
if (crc.uncompressed_size << 9 > c->opts.encoded_extent_max ||
crc.compressed_size << 9 > c->opts.encoded_extent_max)
return -BCH_ERR_decompress_exceeded_max_encoded_extent;
return bch_err_throw(c, decompress_exceeded_max_encoded_extent);
dst_data = dst_len == dst_iter.bi_size
? __bio_map_or_bounce(c, dst, dst_iter, WRITE)
@ -656,12 +656,12 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
if (!mempool_initialized(&c->compression_bounce[READ]) &&
mempool_init_kvmalloc_pool(&c->compression_bounce[READ],
1, c->opts.encoded_extent_max))
return -BCH_ERR_ENOMEM_compression_bounce_read_init;
return bch_err_throw(c, ENOMEM_compression_bounce_read_init);
if (!mempool_initialized(&c->compression_bounce[WRITE]) &&
mempool_init_kvmalloc_pool(&c->compression_bounce[WRITE],
1, c->opts.encoded_extent_max))
return -BCH_ERR_ENOMEM_compression_bounce_write_init;
return bch_err_throw(c, ENOMEM_compression_bounce_write_init);
for (i = compression_types;
i < compression_types + ARRAY_SIZE(compression_types);
@ -675,7 +675,7 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
if (mempool_init_kvmalloc_pool(
&c->compress_workspace[i->type],
1, i->compress_workspace))
return -BCH_ERR_ENOMEM_compression_workspace_init;
return bch_err_throw(c, ENOMEM_compression_workspace_init);
}
return 0;

View File

@ -255,7 +255,7 @@ static int data_update_invalid_bkey(struct data_update *m,
bch2_print_str(c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
return -BCH_ERR_invalid_bkey;
return bch_err_throw(c, invalid_bkey);
}
static int __bch2_data_update_index_update(struct btree_trans *trans,
@ -772,7 +772,7 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m)
{
if ((m->op.flags & BCH_WRITE_alloc_nowait) &&
unlikely(c->open_buckets_nr_free <= bch2_open_buckets_reserved(m->op.watermark)))
return -BCH_ERR_data_update_done_would_block;
return bch_err_throw(c, data_update_done_would_block);
unsigned target = m->op.flags & BCH_WRITE_only_specified_devs
? m->op.target
@ -802,9 +802,9 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m)
}
if (!nr_replicas)
return -BCH_ERR_data_update_done_no_rw_devs;
return bch_err_throw(c, data_update_done_no_rw_devs);
if (nr_replicas < m->op.nr_replicas)
return -BCH_ERR_insufficient_devices;
return bch_err_throw(c, insufficient_devices);
return 0;
}
@ -822,21 +822,16 @@ int bch2_data_update_init(struct btree_trans *trans,
int ret = 0;
if (k.k->p.snapshot) {
/*
* We'll go ERO if we see a key for a missing snapshot, and if
* we're still in recovery we want to give that a chance to
* repair:
*/
if (unlikely(test_bit(BCH_FS_in_recovery, &c->flags) &&
bch2_snapshot_id_state(c, k.k->p.snapshot) == SNAPSHOT_ID_empty))
return -BCH_ERR_data_update_done_no_snapshot;
ret = bch2_check_key_has_snapshot(trans, iter, k);
if (bch2_err_matches(ret, BCH_ERR_recovery_will_run)) {
/* Can't repair yet, waiting on other recovery passes */
return bch_err_throw(c, data_update_done_no_snapshot);
}
if (ret < 0)
return ret;
if (ret) /* key was deleted */
return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?:
-BCH_ERR_data_update_done_no_snapshot;
bch_err_throw(c, data_update_done_no_snapshot);
ret = 0;
}
@ -943,7 +938,7 @@ int bch2_data_update_init(struct btree_trans *trans,
if (iter)
ret = bch2_extent_drop_ptrs(trans, iter, k, io_opts, &m->data_opts);
if (!ret)
ret = -BCH_ERR_data_update_done_no_writes_needed;
ret = bch_err_throw(c, data_update_done_no_writes_needed);
goto out_bkey_buf_exit;
}
@ -974,19 +969,19 @@ int bch2_data_update_init(struct btree_trans *trans,
}
if (!bkey_get_dev_refs(c, k)) {
ret = -BCH_ERR_data_update_done_no_dev_refs;
ret = bch_err_throw(c, data_update_done_no_dev_refs);
goto out_put_disk_res;
}
if (c->opts.nocow_enabled &&
!bkey_nocow_lock(c, ctxt, ptrs)) {
ret = -BCH_ERR_nocow_lock_blocked;
ret = bch_err_throw(c, nocow_lock_blocked);
goto out_put_dev_refs;
}
if (unwritten) {
ret = bch2_update_unwritten_extent(trans, m) ?:
-BCH_ERR_data_update_done_unwritten;
bch_err_throw(c, data_update_done_unwritten);
goto out_nocow_unlock;
}

View File

@ -231,23 +231,73 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
prt_printf(out, " type %s", bch2_d_type_str(d.v->d_type));
}
static struct bkey_i_dirent *dirent_alloc_key(struct btree_trans *trans,
int bch2_dirent_init_name(struct bkey_i_dirent *dirent,
const struct bch_hash_info *hash_info,
const struct qstr *name,
const struct qstr *cf_name)
{
EBUG_ON(hash_info->cf_encoding == NULL && cf_name);
int cf_len = 0;
if (name->len > BCH_NAME_MAX)
return -ENAMETOOLONG;
dirent->v.d_casefold = hash_info->cf_encoding != NULL;
if (!dirent->v.d_casefold) {
memcpy(&dirent->v.d_name[0], name->name, name->len);
memset(&dirent->v.d_name[name->len], 0,
bkey_val_bytes(&dirent->k) -
offsetof(struct bch_dirent, d_name) -
name->len);
} else {
memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
char *cf_out = &dirent->v.d_cf_name_block.d_names[name->len];
if (cf_name) {
cf_len = cf_name->len;
memcpy(cf_out, cf_name->name, cf_name->len);
} else {
cf_len = utf8_casefold(hash_info->cf_encoding, name,
cf_out,
bkey_val_end(bkey_i_to_s(&dirent->k_i)) - (void *) cf_out);
if (cf_len <= 0)
return cf_len;
}
memset(&dirent->v.d_cf_name_block.d_names[name->len + cf_len], 0,
bkey_val_bytes(&dirent->k) -
offsetof(struct bch_dirent, d_cf_name_block.d_names) -
name->len + cf_len);
dirent->v.d_cf_name_block.d_name_len = cpu_to_le16(name->len);
dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_len);
EBUG_ON(bch2_dirent_get_casefold_name(dirent_i_to_s_c(dirent)).len != cf_len);
}
unsigned u64s = dirent_val_u64s(name->len, cf_len);
BUG_ON(u64s > bkey_val_u64s(&dirent->k));
set_bkey_val_u64s(&dirent->k, u64s);
return 0;
}
struct bkey_i_dirent *bch2_dirent_create_key(struct btree_trans *trans,
const struct bch_hash_info *hash_info,
subvol_inum dir,
u8 type,
int name_len, int cf_name_len,
const struct qstr *name,
const struct qstr *cf_name,
u64 dst)
{
struct bkey_i_dirent *dirent;
unsigned u64s = BKEY_U64s + dirent_val_u64s(name_len, cf_name_len);
BUG_ON(u64s > U8_MAX);
dirent = bch2_trans_kmalloc(trans, u64s * sizeof(u64));
struct bkey_i_dirent *dirent = bch2_trans_kmalloc(trans, BKEY_U64s_MAX * sizeof(u64));
if (IS_ERR(dirent))
return dirent;
bkey_dirent_init(&dirent->k_i);
dirent->k.u64s = u64s;
dirent->k.u64s = BKEY_U64s_MAX;
if (type != DT_SUBVOL) {
dirent->v.d_inum = cpu_to_le64(dst);
@ -258,75 +308,12 @@ static struct bkey_i_dirent *dirent_alloc_key(struct btree_trans *trans,
dirent->v.d_type = type;
dirent->v.d_unused = 0;
dirent->v.d_casefold = cf_name_len ? 1 : 0;
return dirent;
}
static void dirent_init_regular_name(struct bkey_i_dirent *dirent,
const struct qstr *name)
{
EBUG_ON(dirent->v.d_casefold);
memcpy(&dirent->v.d_name[0], name->name, name->len);
memset(&dirent->v.d_name[name->len], 0,
bkey_val_bytes(&dirent->k) -
offsetof(struct bch_dirent, d_name) -
name->len);
}
static void dirent_init_casefolded_name(struct bkey_i_dirent *dirent,
const struct qstr *name,
const struct qstr *cf_name)
{
EBUG_ON(!dirent->v.d_casefold);
EBUG_ON(!cf_name->len);
dirent->v.d_cf_name_block.d_name_len = cpu_to_le16(name->len);
dirent->v.d_cf_name_block.d_cf_name_len = cpu_to_le16(cf_name->len);
memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
memcpy(&dirent->v.d_cf_name_block.d_names[name->len], cf_name->name, cf_name->len);
memset(&dirent->v.d_cf_name_block.d_names[name->len + cf_name->len], 0,
bkey_val_bytes(&dirent->k) -
offsetof(struct bch_dirent, d_cf_name_block.d_names) -
name->len + cf_name->len);
EBUG_ON(bch2_dirent_get_casefold_name(dirent_i_to_s_c(dirent)).len != cf_name->len);
}
static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
const struct bch_hash_info *hash_info,
subvol_inum dir,
u8 type,
const struct qstr *name,
const struct qstr *cf_name,
u64 dst)
{
struct bkey_i_dirent *dirent;
struct qstr _cf_name;
if (name->len > BCH_NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
if (hash_info->cf_encoding && !cf_name) {
int ret = bch2_casefold(trans, hash_info, name, &_cf_name);
if (ret)
return ERR_PTR(ret);
cf_name = &_cf_name;
}
dirent = dirent_alloc_key(trans, dir, type, name->len, cf_name ? cf_name->len : 0, dst);
if (IS_ERR(dirent))
return dirent;
if (cf_name)
dirent_init_casefolded_name(dirent, name, cf_name);
else
dirent_init_regular_name(dirent, name);
int ret = bch2_dirent_init_name(dirent, hash_info, name, cf_name);
if (ret)
return ERR_PTR(ret);
EBUG_ON(bch2_dirent_get_name(dirent_i_to_s_c(dirent)).len != name->len);
return dirent;
}
@ -341,7 +328,7 @@ int bch2_dirent_create_snapshot(struct btree_trans *trans,
struct bkey_i_dirent *dirent;
int ret;
dirent = dirent_create_key(trans, hash_info, dir_inum, type, name, NULL, dst_inum);
dirent = bch2_dirent_create_key(trans, hash_info, dir_inum, type, name, NULL, dst_inum);
ret = PTR_ERR_OR_ZERO(dirent);
if (ret)
return ret;
@ -365,7 +352,7 @@ int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
struct bkey_i_dirent *dirent;
int ret;
dirent = dirent_create_key(trans, hash_info, dir, type, name, NULL, dst_inum);
dirent = bch2_dirent_create_key(trans, hash_info, dir, type, name, NULL, dst_inum);
ret = PTR_ERR_OR_ZERO(dirent);
if (ret)
return ret;
@ -470,8 +457,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
*src_offset = dst_iter.pos.offset;
/* Create new dst key: */
new_dst = dirent_create_key(trans, dst_hash, dst_dir, 0, dst_name,
dst_hash->cf_encoding ? &dst_name_lookup : NULL, 0);
new_dst = bch2_dirent_create_key(trans, dst_hash, dst_dir, 0, dst_name,
dst_hash->cf_encoding ? &dst_name_lookup : NULL, 0);
ret = PTR_ERR_OR_ZERO(new_dst);
if (ret)
goto out;
@ -481,8 +468,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
/* Create new src key: */
if (mode == BCH_RENAME_EXCHANGE) {
new_src = dirent_create_key(trans, src_hash, src_dir, 0, src_name,
src_hash->cf_encoding ? &src_name_lookup : NULL, 0);
new_src = bch2_dirent_create_key(trans, src_hash, src_dir, 0, src_name,
src_hash->cf_encoding ? &src_name_lookup : NULL, 0);
ret = PTR_ERR_OR_ZERO(new_src);
if (ret)
goto out;
@ -648,7 +635,7 @@ int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
if (d.v->d_type == DT_SUBVOL && le32_to_cpu(d.v->d_parent_subvol) != subvol)
continue;
ret = -BCH_ERR_ENOTEMPTY_dir_not_empty;
ret = bch_err_throw(trans->c, ENOTEMPTY_dir_not_empty);
break;
}
bch2_trans_iter_exit(trans, &iter);
@ -705,8 +692,9 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum,
subvol_inum target;
bool need_second_pass = false;
int ret2 = bch2_str_hash_check_key(trans, NULL, &bch2_dirent_hash_desc,
hash_info, &iter, k) ?:
hash_info, &iter, k, &need_second_pass) ?:
bch2_dirent_read_target(trans, inum, dirent, &target);
if (ret2 > 0)
continue;
@ -737,7 +725,7 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
ret = bch2_inode_unpack(k, inode);
goto found;
}
ret = -BCH_ERR_ENOENT_inode;
ret = bch_err_throw(trans->c, ENOENT_inode);
found:
bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
bch2_trans_iter_exit(trans, &iter);

View File

@ -38,7 +38,7 @@ static inline int bch2_maybe_casefold(struct btree_trans *trans,
}
}
struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent d);
struct qstr bch2_dirent_get_name(struct bkey_s_c_dirent);
static inline unsigned dirent_val_u64s(unsigned len, unsigned cf_len)
{
@ -59,6 +59,14 @@ static inline void dirent_copy_target(struct bkey_i_dirent *dst,
dst->v.d_type = src.v->d_type;
}
int bch2_dirent_init_name(struct bkey_i_dirent *,
const struct bch_hash_info *,
const struct qstr *,
const struct qstr *);
struct bkey_i_dirent *bch2_dirent_create_key(struct btree_trans *,
const struct bch_hash_info *, subvol_inum, u8,
const struct qstr *, const struct qstr *, u64);
int bch2_dirent_create_snapshot(struct btree_trans *, u32, u64, u32,
const struct bch_hash_info *, u8,
const struct qstr *, u64, u64 *,

View File

@ -390,7 +390,7 @@ static int __bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accoun
err:
free_percpu(n.v[1]);
free_percpu(n.v[0]);
return -BCH_ERR_ENOMEM_disk_accounting;
return bch_err_throw(c, ENOMEM_disk_accounting);
}
int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a,
@ -401,7 +401,7 @@ int bch2_accounting_mem_insert(struct bch_fs *c, struct bkey_s_c_accounting a,
if (mode != BCH_ACCOUNTING_read &&
accounting_to_replicas(&r.e, a.k->p) &&
!bch2_replicas_marked_locked(c, &r.e))
return -BCH_ERR_btree_insert_need_mark_replicas;
return bch_err_throw(c, btree_insert_need_mark_replicas);
percpu_up_read(&c->mark_lock);
percpu_down_write(&c->mark_lock);
@ -419,7 +419,7 @@ int bch2_accounting_mem_insert_locked(struct bch_fs *c, struct bkey_s_c_accounti
if (mode != BCH_ACCOUNTING_read &&
accounting_to_replicas(&r.e, a.k->p) &&
!bch2_replicas_marked_locked(c, &r.e))
return -BCH_ERR_btree_insert_need_mark_replicas;
return bch_err_throw(c, btree_insert_need_mark_replicas);
return __bch2_accounting_mem_insert(c, a);
}
@ -559,7 +559,7 @@ int bch2_gc_accounting_start(struct bch_fs *c)
sizeof(u64), GFP_KERNEL);
if (!e->v[1]) {
bch2_accounting_free_counters(acc, true);
ret = -BCH_ERR_ENOMEM_disk_accounting;
ret = bch_err_throw(c, ENOMEM_disk_accounting);
break;
}
}
@ -737,7 +737,7 @@ invalid_device:
bch2_disk_accounting_mod(trans, acc, v, nr, false)) ?:
-BCH_ERR_remove_disk_accounting_entry;
} else {
ret = -BCH_ERR_remove_disk_accounting_entry;
ret = bch_err_throw(c, remove_disk_accounting_entry);
}
goto fsck_err;
}

View File

@ -130,7 +130,7 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
cpu_g = kzalloc(struct_size(cpu_g, entries, nr_groups), GFP_KERNEL);
if (!cpu_g)
return -BCH_ERR_ENOMEM_disk_groups_to_cpu;
return bch_err_throw(c, ENOMEM_disk_groups_to_cpu);
cpu_g->nr = nr_groups;

View File

@ -213,7 +213,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
a->dirty_sectors,
a->stripe, s.k->p.offset,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -BCH_ERR_mark_stripe;
ret = bch_err_throw(c, mark_stripe);
goto err;
}
@ -224,7 +224,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
a->dirty_sectors,
a->cached_sectors,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -BCH_ERR_mark_stripe;
ret = bch_err_throw(c, mark_stripe);
goto err;
}
} else {
@ -234,7 +234,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
bucket.inode, bucket.offset, a->gen,
a->stripe,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -BCH_ERR_mark_stripe;
ret = bch_err_throw(c, mark_stripe);
goto err;
}
@ -244,7 +244,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
bch2_data_type_str(a->data_type),
bch2_data_type_str(data_type),
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -BCH_ERR_mark_stripe;
ret = bch_err_throw(c, mark_stripe);
goto err;
}
@ -256,7 +256,7 @@ static int __mark_stripe_bucket(struct btree_trans *trans,
a->dirty_sectors,
a->cached_sectors,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -BCH_ERR_mark_stripe;
ret = bch_err_throw(c, mark_stripe);
goto err;
}
}
@ -295,7 +295,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
if (unlikely(!ca)) {
if (ptr->dev != BCH_SB_MEMBER_INVALID && !(flags & BTREE_TRIGGER_overwrite))
ret = -BCH_ERR_mark_stripe;
ret = bch_err_throw(c, mark_stripe);
goto err;
}
@ -325,7 +325,7 @@ static int mark_stripe_bucket(struct btree_trans *trans,
if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n%s",
ptr->dev,
(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
ret = -BCH_ERR_mark_stripe;
ret = bch_err_throw(c, mark_stripe);
goto err;
}
@ -428,7 +428,7 @@ int bch2_trigger_stripe(struct btree_trans *trans,
gc = genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
if (!gc) {
bch_err(c, "error allocating memory for gc_stripes, idx %llu", idx);
return -BCH_ERR_ENOMEM_mark_stripe;
return bch_err_throw(c, ENOMEM_mark_stripe);
}
/*
@ -536,7 +536,8 @@ static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
}
/* XXX: this is a non-mempoolified memory allocation: */
static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
static int ec_stripe_buf_init(struct bch_fs *c,
struct ec_stripe_buf *buf,
unsigned offset, unsigned size)
{
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
@ -564,7 +565,7 @@ static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
return 0;
err:
ec_stripe_buf_exit(buf);
return -BCH_ERR_ENOMEM_stripe_buf;
return bch_err_throw(c, ENOMEM_stripe_buf);
}
/* Checksumming: */
@ -840,7 +841,7 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio,
buf = kzalloc(sizeof(*buf), GFP_NOFS);
if (!buf)
return -BCH_ERR_ENOMEM_ec_read_extent;
return bch_err_throw(c, ENOMEM_ec_read_extent);
ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
if (ret) {
@ -861,7 +862,7 @@ int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio,
goto err;
}
ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
ret = ec_stripe_buf_init(c, buf, offset, bio_sectors(&rbio->bio));
if (ret) {
msg = "-ENOMEM";
goto err;
@ -894,7 +895,7 @@ err:
bch_err_ratelimited(c,
"error doing reconstruct read: %s\n %s", msg, msgbuf.buf);
printbuf_exit(&msgbuf);
ret = -BCH_ERR_stripe_reconstruct;
ret = bch_err_throw(c, stripe_reconstruct);
goto out;
}
@ -904,7 +905,7 @@ static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
{
if (c->gc_pos.phase != GC_PHASE_not_running &&
!genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
return bch_err_throw(c, ENOMEM_ec_stripe_mem_alloc);
return 0;
}
@ -1129,7 +1130,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
bch2_fs_inconsistent(c, "%s", buf.buf);
printbuf_exit(&buf);
return -BCH_ERR_erasure_coding_found_btree_node;
return bch_err_throw(c, erasure_coding_found_btree_node);
}
k = bch2_backpointer_get_key(trans, bp, &iter, BTREE_ITER_intent, last_flushed);
@ -1195,7 +1196,7 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev);
if (!ca)
return -BCH_ERR_ENOENT_dev_not_found;
return bch_err_throw(c, ENOENT_dev_not_found);
struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr);
@ -1256,7 +1257,7 @@ static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE,
BCH_DEV_WRITE_REF_ec_bucket_zero);
if (!ca) {
s->err = -BCH_ERR_erofs_no_writes;
s->err = bch_err_throw(c, erofs_no_writes);
return;
}
@ -1320,7 +1321,7 @@ static void ec_stripe_create(struct ec_stripe_new *s)
if (ec_do_recov(c, &s->existing_stripe)) {
bch_err(c, "error creating stripe: error reading existing stripe");
ret = -BCH_ERR_ec_block_read;
ret = bch_err_throw(c, ec_block_read);
goto err;
}
@ -1346,7 +1347,7 @@ static void ec_stripe_create(struct ec_stripe_new *s)
if (ec_nr_failed(&s->new_stripe)) {
bch_err(c, "error creating stripe: error writing redundancy buckets");
ret = -BCH_ERR_ec_block_write;
ret = bch_err_throw(c, ec_block_write);
goto err;
}
@ -1865,7 +1866,7 @@ static int init_new_stripe_from_existing(struct bch_fs *c, struct ec_stripe_new
s->nr_data = existing_v->nr_blocks -
existing_v->nr_redundant;
int ret = ec_stripe_buf_init(&s->existing_stripe, 0, le16_to_cpu(existing_v->sectors));
int ret = ec_stripe_buf_init(c, &s->existing_stripe, 0, le16_to_cpu(existing_v->sectors));
if (ret) {
bch2_stripe_close(c, s);
return ret;
@ -1925,7 +1926,7 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
}
bch2_trans_iter_exit(trans, &lru_iter);
if (!ret)
ret = -BCH_ERR_stripe_alloc_blocked;
ret = bch_err_throw(c, stripe_alloc_blocked);
if (ret == 1)
ret = 0;
if (ret)
@ -1966,7 +1967,7 @@ static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_st
continue;
}
ret = -BCH_ERR_ENOSPC_stripe_create;
ret = bch_err_throw(c, ENOSPC_stripe_create);
break;
}
@ -2024,7 +2025,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
if (!h->s) {
h->s = ec_new_stripe_alloc(c, h);
if (!h->s) {
ret = -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
ret = bch_err_throw(c, ENOMEM_ec_new_stripe_alloc);
bch_err(c, "failed to allocate new stripe");
goto err;
}
@ -2089,7 +2090,7 @@ alloc_existing:
goto err;
allocate_buf:
ret = ec_stripe_buf_init(&s->new_stripe, 0, h->blocksize);
ret = ec_stripe_buf_init(c, &s->new_stripe, 0, h->blocksize);
if (ret)
goto err;
@ -2115,6 +2116,7 @@ int bch2_invalidate_stripe_to_dev(struct btree_trans *trans,
if (k.k->type != KEY_TYPE_stripe)
return 0;
struct bch_fs *c = trans->c;
struct bkey_i_stripe *s =
bch2_bkey_make_mut_typed(trans, iter, &k, 0, stripe);
int ret = PTR_ERR_OR_ZERO(s);
@ -2146,17 +2148,17 @@ int bch2_invalidate_stripe_to_dev(struct btree_trans *trans,
if (ptr->dev == dev_idx)
ptr->dev = BCH_SB_MEMBER_INVALID;
struct bch_dev *ca = bch2_dev_rcu(trans->c, ptr->dev);
struct bch_dev *ca = bch2_dev_rcu(c, ptr->dev);
nr_good += ca && ca->mi.state != BCH_MEMBER_STATE_failed;
}
if (nr_good < s->v.nr_blocks && !(flags & BCH_FORCE_IF_DATA_DEGRADED))
return -BCH_ERR_remove_would_lose_data;
return bch_err_throw(c, remove_would_lose_data);
unsigned nr_data = s->v.nr_blocks - s->v.nr_redundant;
if (nr_good < nr_data && !(flags & BCH_FORCE_IF_DATA_LOST))
return -BCH_ERR_remove_would_lose_data;
return bch_err_throw(c, remove_would_lose_data);
sectors = -sectors;
@ -2177,14 +2179,15 @@ static int bch2_invalidate_stripe_to_dev_from_alloc(struct btree_trans *trans, s
return 0;
if (a->stripe_sectors) {
bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data");
return -BCH_ERR_invalidate_stripe_to_dev;
struct bch_fs *c = trans->c;
bch_err(c, "trying to invalidate device in stripe when bucket has stripe data");
return bch_err_throw(c, invalidate_stripe_to_dev);
}
struct btree_iter iter;
struct bkey_s_c_stripe s =
bch2_bkey_get_iter_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe),
BTREE_ITER_slots, stripe);
BTREE_ITER_slots, stripe);
int ret = bkey_err(s);
if (ret)
return ret;

View File

@ -182,9 +182,12 @@
x(BCH_ERR_fsck, fsck_errors_not_fixed) \
x(BCH_ERR_fsck, fsck_repair_unimplemented) \
x(BCH_ERR_fsck, fsck_repair_impossible) \
x(EINVAL, restart_recovery) \
x(EINVAL, cannot_rewind_recovery) \
x(EINVAL, recovery_will_run) \
x(BCH_ERR_recovery_will_run, restart_recovery) \
x(BCH_ERR_recovery_will_run, cannot_rewind_recovery) \
x(BCH_ERR_recovery_will_run, recovery_pass_will_run) \
x(0, data_update_done) \
x(0, bkey_was_deleted) \
x(BCH_ERR_data_update_done, data_update_done_would_block) \
x(BCH_ERR_data_update_done, data_update_done_unwritten) \
x(BCH_ERR_data_update_done, data_update_done_no_writes_needed) \

View File

@ -100,10 +100,10 @@ int __bch2_topology_error(struct bch_fs *c, struct printbuf *out)
set_bit(BCH_FS_topology_error, &c->flags);
if (!test_bit(BCH_FS_in_recovery, &c->flags)) {
__bch2_inconsistent_error(c, out);
return -BCH_ERR_btree_need_topology_repair;
return bch_err_throw(c, btree_need_topology_repair);
} else {
return bch2_run_explicit_recovery_pass(c, out, BCH_RECOVERY_PASS_check_topology, 0) ?:
-BCH_ERR_btree_node_read_validate_error;
bch_err_throw(c, btree_node_read_validate_error);
}
}
@ -403,23 +403,23 @@ int bch2_fsck_err_opt(struct bch_fs *c,
if (test_bit(BCH_FS_in_fsck, &c->flags)) {
if (!(flags & (FSCK_CAN_FIX|FSCK_CAN_IGNORE)))
return -BCH_ERR_fsck_repair_unimplemented;
return bch_err_throw(c, fsck_repair_unimplemented);
switch (c->opts.fix_errors) {
case FSCK_FIX_exit:
return -BCH_ERR_fsck_errors_not_fixed;
return bch_err_throw(c, fsck_errors_not_fixed);
case FSCK_FIX_yes:
if (flags & FSCK_CAN_FIX)
return -BCH_ERR_fsck_fix;
return bch_err_throw(c, fsck_fix);
fallthrough;
case FSCK_FIX_no:
if (flags & FSCK_CAN_IGNORE)
return -BCH_ERR_fsck_ignore;
return -BCH_ERR_fsck_errors_not_fixed;
return bch_err_throw(c, fsck_ignore);
return bch_err_throw(c, fsck_errors_not_fixed);
case FSCK_FIX_ask:
if (flags & FSCK_AUTOFIX)
return -BCH_ERR_fsck_fix;
return -BCH_ERR_fsck_ask;
return bch_err_throw(c, fsck_fix);
return bch_err_throw(c, fsck_ask);
default:
BUG();
}
@ -427,12 +427,12 @@ int bch2_fsck_err_opt(struct bch_fs *c,
if ((flags & FSCK_AUTOFIX) &&
(c->opts.errors == BCH_ON_ERROR_continue ||
c->opts.errors == BCH_ON_ERROR_fix_safe))
return -BCH_ERR_fsck_fix;
return bch_err_throw(c, fsck_fix);
if (c->opts.errors == BCH_ON_ERROR_continue &&
(flags & FSCK_CAN_IGNORE))
return -BCH_ERR_fsck_ignore;
return -BCH_ERR_fsck_errors_not_fixed;
return bch_err_throw(c, fsck_ignore);
return bch_err_throw(c, fsck_errors_not_fixed);
}
}
@ -474,8 +474,8 @@ int __bch2_fsck_err(struct bch_fs *c,
if (test_bit(err, c->sb.errors_silent))
return flags & FSCK_CAN_FIX
? -BCH_ERR_fsck_fix
: -BCH_ERR_fsck_ignore;
? bch_err_throw(c, fsck_fix)
: bch_err_throw(c, fsck_ignore);
printbuf_indent_add_nextline(out, 2);
@ -517,10 +517,10 @@ int __bch2_fsck_err(struct bch_fs *c,
prt_str(out, ", ");
if (flags & FSCK_CAN_FIX) {
prt_actioning(out, action);
ret = -BCH_ERR_fsck_fix;
ret = bch_err_throw(c, fsck_fix);
} else {
prt_str(out, ", continuing");
ret = -BCH_ERR_fsck_ignore;
ret = bch_err_throw(c, fsck_ignore);
}
goto print;
@ -532,18 +532,18 @@ int __bch2_fsck_err(struct bch_fs *c,
"run fsck, and forward to devs so error can be marked for self-healing");
inconsistent = true;
print = true;
ret = -BCH_ERR_fsck_errors_not_fixed;
ret = bch_err_throw(c, fsck_errors_not_fixed);
} else if (flags & FSCK_CAN_FIX) {
prt_str(out, ", ");
prt_actioning(out, action);
ret = -BCH_ERR_fsck_fix;
ret = bch_err_throw(c, fsck_fix);
} else {
prt_str(out, ", continuing");
ret = -BCH_ERR_fsck_ignore;
ret = bch_err_throw(c, fsck_ignore);
}
} else if (c->opts.fix_errors == FSCK_FIX_exit) {
prt_str(out, ", exiting");
ret = -BCH_ERR_fsck_errors_not_fixed;
ret = bch_err_throw(c, fsck_errors_not_fixed);
} else if (flags & FSCK_CAN_FIX) {
int fix = s && s->fix
? s->fix
@ -562,33 +562,33 @@ int __bch2_fsck_err(struct bch_fs *c,
: FSCK_FIX_yes;
ret = ret & 1
? -BCH_ERR_fsck_fix
: -BCH_ERR_fsck_ignore;
? bch_err_throw(c, fsck_fix)
: bch_err_throw(c, fsck_ignore);
} else if (fix == FSCK_FIX_yes ||
(c->opts.nochanges &&
!(flags & FSCK_CAN_IGNORE))) {
prt_str(out, ", ");
prt_actioning(out, action);
ret = -BCH_ERR_fsck_fix;
ret = bch_err_throw(c, fsck_fix);
} else {
prt_str(out, ", not ");
prt_actioning(out, action);
ret = -BCH_ERR_fsck_ignore;
ret = bch_err_throw(c, fsck_ignore);
}
} else {
if (flags & FSCK_CAN_IGNORE) {
prt_str(out, ", continuing");
ret = -BCH_ERR_fsck_ignore;
ret = bch_err_throw(c, fsck_ignore);
} else {
prt_str(out, " (repair unimplemented)");
ret = -BCH_ERR_fsck_repair_unimplemented;
ret = bch_err_throw(c, fsck_repair_unimplemented);
}
}
if (bch2_err_matches(ret, BCH_ERR_fsck_ignore) &&
(c->opts.fix_errors == FSCK_FIX_exit ||
!(flags & FSCK_CAN_IGNORE)))
ret = -BCH_ERR_fsck_errors_not_fixed;
ret = bch_err_throw(c, fsck_errors_not_fixed);
if (test_bit(BCH_FS_in_fsck, &c->flags) &&
(!bch2_err_matches(ret, BCH_ERR_fsck_fix) &&
@ -620,6 +620,9 @@ print:
if (s)
s->ret = ret;
if (trans)
ret = bch2_trans_log_str(trans, bch2_sb_error_strs[err]) ?: ret;
err_unlock:
mutex_unlock(&c->fsck_error_msgs_lock);
err:
@ -657,12 +660,12 @@ int __bch2_bkey_fsck_err(struct bch_fs *c,
const char *fmt, ...)
{
if (from.flags & BCH_VALIDATE_silent)
return -BCH_ERR_fsck_delete_bkey;
return bch_err_throw(c, fsck_delete_bkey);
unsigned fsck_flags = 0;
if (!(from.flags & (BCH_VALIDATE_write|BCH_VALIDATE_commit))) {
if (test_bit(err, c->sb.errors_silent))
return -BCH_ERR_fsck_delete_bkey;
return bch_err_throw(c, fsck_delete_bkey);
fsck_flags |= FSCK_AUTOFIX|FSCK_CAN_FIX;
}

View File

@ -173,7 +173,7 @@ do { \
if (!bch2_err_matches(_ret, BCH_ERR_fsck_fix) && \
!bch2_err_matches(_ret, BCH_ERR_fsck_ignore)) \
ret = _ret; \
ret = -BCH_ERR_fsck_delete_bkey; \
ret = bch_err_throw(c, fsck_delete_bkey); \
goto fsck_err; \
} while (0)

View File

@ -193,7 +193,7 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
bool have_dirty_ptrs = false, have_pick = false;
if (k.k->type == KEY_TYPE_error)
return -BCH_ERR_key_type_error;
return bch_err_throw(c, key_type_error);
rcu_read_lock();
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
@ -286,17 +286,17 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
if (!have_dirty_ptrs)
return 0;
if (have_missing_devs)
return -BCH_ERR_no_device_to_read_from;
return bch_err_throw(c, no_device_to_read_from);
if (have_csum_errors)
return -BCH_ERR_data_read_csum_err;
return bch_err_throw(c, data_read_csum_err);
if (have_io_errors)
return -BCH_ERR_data_read_io_err;
return bch_err_throw(c, data_read_io_err);
/*
* If we get here, we have pointers (bkey_ptrs_validate() ensures that),
* but they don't point to valid devices:
*/
return -BCH_ERR_no_devices_valid;
return bch_err_throw(c, no_devices_valid);
}
/* KEY_TYPE_btree_ptr: */
@ -1515,7 +1515,7 @@ int bch2_bkey_ptrs_validate(struct bch_fs *c, struct bkey_s_c k,
struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
prt_printf(err, "invalid compression opt %u:%u",
opt.type, opt.level);
return -BCH_ERR_invalid_bkey;
return bch_err_throw(c, invalid_bkey);
}
#endif
break;

View File

@ -447,7 +447,7 @@ static int __bch2_folio_reservation_get(struct bch_fs *c,
if (!reserved) {
bch2_disk_reservation_put(c, &disk_res);
return -BCH_ERR_ENOSPC_disk_reservation;
return bch_err_throw(c, ENOSPC_disk_reservation);
}
break;
}

View File

@ -268,13 +268,13 @@ static long bch2_ioctl_subvolume_create(struct bch_fs *c, struct file *filp,
}
if (dst_dentry->d_inode) {
error = -BCH_ERR_EEXIST_subvolume_create;
error = bch_err_throw(c, EEXIST_subvolume_create);
goto err3;
}
dir = dst_path.dentry->d_inode;
if (IS_DEADDIR(dir)) {
error = -BCH_ERR_ENOENT_directory_dead;
error = bch_err_throw(c, ENOENT_directory_dead);
goto err3;
}

View File

@ -2007,14 +2007,14 @@ retry:
goto err;
if (k.k->type != KEY_TYPE_dirent) {
ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
ret = bch_err_throw(c, ENOENT_dirent_doesnt_match_inode);
goto err;
}
d = bkey_s_c_to_dirent(k);
ret = bch2_dirent_read_target(trans, inode_inum(dir), d, &target);
if (ret > 0)
ret = -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
ret = bch_err_throw(c, ENOENT_dirent_doesnt_match_inode);
if (ret)
goto err;

View File

@ -23,14 +23,15 @@
#include <linux/bsearch.h>
#include <linux/dcache.h> /* struct qstr */
static int dirent_points_to_inode_nowarn(struct bkey_s_c_dirent d,
static int dirent_points_to_inode_nowarn(struct bch_fs *c,
struct bkey_s_c_dirent d,
struct bch_inode_unpacked *inode)
{
if (d.v->d_type == DT_SUBVOL
? le32_to_cpu(d.v->d_child_subvol) == inode->bi_subvol
: le64_to_cpu(d.v->d_inum) == inode->bi_inum)
return 0;
return -BCH_ERR_ENOENT_dirent_doesnt_match_inode;
return bch_err_throw(c, ENOENT_dirent_doesnt_match_inode);
}
static void dirent_inode_mismatch_msg(struct printbuf *out,
@ -49,7 +50,7 @@ static int dirent_points_to_inode(struct bch_fs *c,
struct bkey_s_c_dirent dirent,
struct bch_inode_unpacked *inode)
{
int ret = dirent_points_to_inode_nowarn(dirent, inode);
int ret = dirent_points_to_inode_nowarn(c, dirent, inode);
if (ret) {
struct printbuf buf = PRINTBUF;
dirent_inode_mismatch_msg(&buf, c, dirent, inode);
@ -152,7 +153,7 @@ static int find_snapshot_tree_subvol(struct btree_trans *trans,
goto found;
}
}
ret = -BCH_ERR_ENOENT_no_snapshot_tree_subvol;
ret = bch_err_throw(trans->c, ENOENT_no_snapshot_tree_subvol);
found:
bch2_trans_iter_exit(trans, &iter);
return ret;
@ -229,7 +230,7 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
if (d_type != DT_DIR) {
bch_err(c, "error looking up lost+found: not a directory");
return -BCH_ERR_ENOENT_not_directory;
return bch_err_throw(c, ENOENT_not_directory);
}
/*
@ -531,7 +532,7 @@ static int reconstruct_subvol(struct btree_trans *trans, u32 snapshotid, u32 sub
if (!bch2_snapshot_is_leaf(c, snapshotid)) {
bch_err(c, "need to reconstruct subvol, but have interior node snapshot");
return -BCH_ERR_fsck_repair_unimplemented;
return bch_err_throw(c, fsck_repair_unimplemented);
}
/*
@ -939,7 +940,7 @@ lookup_inode_for_snapshot(struct btree_trans *trans, struct inode_walker *w, str
if (ret)
goto fsck_err;
ret = -BCH_ERR_transaction_restart_nested;
ret = bch_err_throw(c, transaction_restart_nested);
goto fsck_err;
}
@ -1041,7 +1042,7 @@ static int check_inode_dirent_inode(struct btree_trans *trans,
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
if ((ret || dirent_points_to_inode_nowarn(d, inode)) &&
if ((ret || dirent_points_to_inode_nowarn(c, d, inode)) &&
inode->bi_subvol &&
(inode->bi_flags & BCH_INODE_has_child_snapshot)) {
/* Older version of a renamed subvolume root: we won't have a
@ -1062,7 +1063,7 @@ static int check_inode_dirent_inode(struct btree_trans *trans,
trans, inode_points_to_missing_dirent,
"inode points to missing dirent\n%s",
(bch2_inode_unpacked_to_text(&buf, inode), buf.buf)) ||
fsck_err_on(!ret && dirent_points_to_inode_nowarn(d, inode),
fsck_err_on(!ret && dirent_points_to_inode_nowarn(c, d, inode),
trans, inode_points_to_wrong_dirent,
"%s",
(printbuf_reset(&buf),
@ -1453,7 +1454,7 @@ static int check_key_has_inode(struct btree_trans *trans,
goto err;
inode->last_pos.inode--;
ret = -BCH_ERR_transaction_restart_nested;
ret = bch_err_throw(c, transaction_restart_nested);
goto err;
}
@ -1570,7 +1571,7 @@ static int extent_ends_at(struct bch_fs *c,
sizeof(seen->ids.data[0]) * seen->ids.size,
GFP_KERNEL);
if (!n.seen.ids.data)
return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
return bch_err_throw(c, ENOMEM_fsck_extent_ends_at);
__darray_for_each(extent_ends->e, i) {
if (i->snapshot == k.k->p.snapshot) {
@ -1620,7 +1621,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
bch_err(c, "%s: error finding first overlapping extent when repairing, got%s",
__func__, buf.buf);
ret = -BCH_ERR_internal_fsck_err;
ret = bch_err_throw(c, internal_fsck_err);
goto err;
}
@ -1645,7 +1646,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
pos2.size != k2.k->size) {
bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
__func__, buf.buf);
ret = -BCH_ERR_internal_fsck_err;
ret = bch_err_throw(c, internal_fsck_err);
goto err;
}
@ -1693,7 +1694,7 @@ static int overlapping_extents_found(struct btree_trans *trans,
* We overwrote the second extent - restart
* check_extent() from the top:
*/
ret = -BCH_ERR_transaction_restart_nested;
ret = bch_err_throw(c, transaction_restart_nested);
}
}
fsck_err:
@ -2046,7 +2047,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
(bch2_bkey_val_to_text(&buf, c, d.s_c), buf.buf))) {
if (!new_parent_subvol) {
bch_err(c, "could not find a subvol for snapshot %u", d.k->p.snapshot);
return -BCH_ERR_fsck_repair_unimplemented;
return bch_err_throw(c, fsck_repair_unimplemented);
}
struct bkey_i_dirent *new_dirent = bch2_bkey_make_mut_typed(trans, iter, &d.s_c, 0, dirent);
@ -2108,7 +2109,7 @@ static int check_dirent_to_subvol(struct btree_trans *trans, struct btree_iter *
if (ret) {
bch_err(c, "subvol %u points to missing inode root %llu", target_subvol, target_inum);
ret = -BCH_ERR_fsck_repair_unimplemented;
ret = bch_err_throw(c, fsck_repair_unimplemented);
goto err;
}
@ -2140,7 +2141,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
struct bch_hash_info *hash_info,
struct inode_walker *dir,
struct inode_walker *target,
struct snapshots_seen *s)
struct snapshots_seen *s,
bool *need_second_pass)
{
struct bch_fs *c = trans->c;
struct inode_walker_entry *i;
@ -2182,7 +2184,10 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
*hash_info = bch2_hash_info_init(c, &i->inode);
dir->first_this_inode = false;
ret = bch2_str_hash_check_key(trans, s, &bch2_dirent_hash_desc, hash_info, iter, k);
hash_info->cf_encoding = bch2_inode_casefold(c, &i->inode) ? c->cf_encoding : NULL;
ret = bch2_str_hash_check_key(trans, s, &bch2_dirent_hash_desc, hash_info,
iter, k, need_second_pass);
if (ret < 0)
goto err;
if (ret) {
@ -2203,31 +2208,34 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k),
buf.buf))) {
struct qstr name = bch2_dirent_get_name(d);
u32 subvol = d.v->d_type == DT_SUBVOL
? le32_to_cpu(d.v->d_parent_subvol)
: 0;
subvol_inum dir_inum = { .subvol = d.v->d_type == DT_SUBVOL
? le32_to_cpu(d.v->d_parent_subvol)
: 0,
};
u64 target = d.v->d_type == DT_SUBVOL
? le32_to_cpu(d.v->d_child_subvol)
: le64_to_cpu(d.v->d_inum);
u64 dir_offset;
struct qstr name = bch2_dirent_get_name(d);
ret = bch2_hash_delete_at(trans,
struct bkey_i_dirent *new_d =
bch2_dirent_create_key(trans, hash_info, dir_inum,
d.v->d_type, &name, NULL, target);
ret = PTR_ERR_OR_ZERO(new_d);
if (ret)
goto out;
new_d->k.p.inode = d.k->p.inode;
new_d->k.p.snapshot = d.k->p.snapshot;
struct btree_iter dup_iter = {};
ret = bch2_hash_delete_at(trans,
bch2_dirent_hash_desc, hash_info, iter,
BTREE_UPDATE_internal_snapshot_node) ?:
bch2_dirent_create_snapshot(trans, subvol,
d.k->p.inode, d.k->p.snapshot,
hash_info,
d.v->d_type,
&name,
target,
&dir_offset,
BTREE_ITER_with_updates|
BTREE_UPDATE_internal_snapshot_node|
STR_HASH_must_create) ?:
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
/* might need another check_dirents pass */
bch2_str_hash_repair_key(trans, s,
&bch2_dirent_hash_desc, hash_info,
iter, bkey_i_to_s_c(&new_d->k_i),
&dup_iter, bkey_s_c_null,
need_second_pass);
goto out;
}
@ -2295,7 +2303,6 @@ out:
err:
fsck_err:
printbuf_exit(&buf);
bch_err_fn(c, ret);
return ret;
}
@ -2309,17 +2316,30 @@ int bch2_check_dirents(struct bch_fs *c)
struct inode_walker target = inode_walker_init();
struct snapshots_seen s;
struct bch_hash_info hash_info;
bool need_second_pass = false, did_second_pass = false;
snapshots_seen_init(&s);
again:
int ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_dirents,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s)) ?:
check_dirent(trans, &iter, k, &hash_info, &dir, &target, &s,
&need_second_pass)) ?:
check_subdir_count_notnested(trans, &dir));
if (!ret && need_second_pass && !did_second_pass) {
bch_info(c, "check_dirents requires second pass");
swap(did_second_pass, need_second_pass);
goto again;
}
if (!ret && need_second_pass) {
bch_err(c, "dirents not repairing");
ret = -EINVAL;
}
snapshots_seen_exit(&s);
inode_walker_exit(&dir);
inode_walker_exit(&target);
@ -2333,16 +2353,14 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
struct inode_walker *inode)
{
struct bch_fs *c = trans->c;
struct inode_walker_entry *i;
int ret;
ret = bch2_check_key_has_snapshot(trans, iter, k);
int ret = bch2_check_key_has_snapshot(trans, iter, k);
if (ret < 0)
return ret;
if (ret)
return 0;
i = walk_inode(trans, inode, k);
struct inode_walker_entry *i = walk_inode(trans, inode, k);
ret = PTR_ERR_OR_ZERO(i);
if (ret)
return ret;
@ -2358,9 +2376,9 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
*hash_info = bch2_hash_info_init(c, &i->inode);
inode->first_this_inode = false;
ret = bch2_str_hash_check_key(trans, NULL, &bch2_xattr_hash_desc, hash_info, iter, k);
bch_err_fn(c, ret);
return ret;
bool need_second_pass = false;
return bch2_str_hash_check_key(trans, NULL, &bch2_xattr_hash_desc, hash_info,
iter, k, &need_second_pass);
}
/*
@ -2749,7 +2767,7 @@ static int add_nlink(struct bch_fs *c, struct nlink_table *t,
if (!d) {
bch_err(c, "fsck: error allocating memory for nlink_table, size %zu",
new_size);
return -BCH_ERR_ENOMEM_fsck_add_nlink;
return bch_err_throw(c, ENOMEM_fsck_add_nlink);
}
if (t->d)

View File

@ -1041,7 +1041,7 @@ again:
goto found_slot;
if (!ret && start == min)
ret = -BCH_ERR_ENOSPC_inode_create;
ret = bch_err_throw(trans->c, ENOSPC_inode_create);
if (ret) {
bch2_trans_iter_exit(trans, iter);
@ -1161,7 +1161,7 @@ retry:
bch2_fs_inconsistent(c,
"inode %llu:%u not found when deleting",
inum.inum, snapshot);
ret = -BCH_ERR_ENOENT_inode;
ret = bch_err_throw(c, ENOENT_inode);
goto err;
}
@ -1328,7 +1328,7 @@ retry:
bch2_fs_inconsistent(c,
"inode %llu:%u not found when deleting",
inum, snapshot);
ret = -BCH_ERR_ENOENT_inode;
ret = bch_err_throw(c, ENOENT_inode);
goto err;
}

View File

@ -91,7 +91,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
opts.data_replicas,
BCH_WATERMARK_normal, 0, &cl, &wp);
if (bch2_err_matches(ret, BCH_ERR_operation_blocked))
ret = -BCH_ERR_transaction_restart_nested;
ret = bch_err_throw(c, transaction_restart_nested);
if (ret)
goto err;

View File

@ -137,21 +137,21 @@ static inline int should_promote(struct bch_fs *c, struct bkey_s_c k,
BUG_ON(!opts.promote_target);
if (!(flags & BCH_READ_may_promote))
return -BCH_ERR_nopromote_may_not;
return bch_err_throw(c, nopromote_may_not);
if (bch2_bkey_has_target(c, k, opts.promote_target))
return -BCH_ERR_nopromote_already_promoted;
return bch_err_throw(c, nopromote_already_promoted);
if (bkey_extent_is_unwritten(k))
return -BCH_ERR_nopromote_unwritten;
return bch_err_throw(c, nopromote_unwritten);
if (bch2_target_congested(c, opts.promote_target))
return -BCH_ERR_nopromote_congested;
return bch_err_throw(c, nopromote_congested);
}
if (rhashtable_lookup_fast(&c->promote_table, &pos,
bch_promote_params))
return -BCH_ERR_nopromote_in_flight;
return bch_err_throw(c, nopromote_in_flight);
return 0;
}
@ -239,7 +239,7 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans,
struct promote_op *op = kzalloc(sizeof(*op), GFP_KERNEL);
if (!op) {
ret = -BCH_ERR_nopromote_enomem;
ret = bch_err_throw(c, nopromote_enomem);
goto err_put;
}
@ -248,7 +248,7 @@ static struct bch_read_bio *__promote_alloc(struct btree_trans *trans,
if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
bch_promote_params)) {
ret = -BCH_ERR_nopromote_in_flight;
ret = bch_err_throw(c, nopromote_in_flight);
goto err;
}
@ -544,7 +544,7 @@ retry:
if (!bkey_and_val_eq(k, bkey_i_to_s_c(u->k.k))) {
/* extent we wanted to read no longer exists: */
rbio->ret = -BCH_ERR_data_read_key_overwritten;
rbio->ret = bch_err_throw(trans->c, data_read_key_overwritten);
goto err;
}
@ -1035,7 +1035,7 @@ int __bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *orig,
if ((bch2_bkey_extent_flags(k) & BIT_ULL(BCH_EXTENT_FLAG_poisoned)) &&
!orig->data_update)
return -BCH_ERR_extent_poisoned;
return bch_err_throw(c, extent_poisoned);
retry_pick:
ret = bch2_bkey_pick_read_device(c, k, failed, &pick, dev);
@ -1073,7 +1073,7 @@ retry_pick:
bch_err_ratelimited(c, "%s", buf.buf);
printbuf_exit(&buf);
ret = -BCH_ERR_data_read_no_encryption_key;
ret = bch_err_throw(c, data_read_no_encryption_key);
goto err;
}
@ -1127,7 +1127,7 @@ retry_pick:
if (ca)
enumerated_ref_put(&ca->io_ref[READ],
BCH_DEV_READ_REF_io_read);
rbio->ret = -BCH_ERR_data_read_buffer_too_small;
rbio->ret = bch_err_throw(c, data_read_buffer_too_small);
goto out_read_done;
}
@ -1332,7 +1332,7 @@ hole:
* have to signal that:
*/
if (u)
orig->ret = -BCH_ERR_data_read_key_overwritten;
orig->ret = bch_err_throw(c, data_read_key_overwritten);
zero_fill_bio_iter(&orig->bio, iter);
out_read_done:
@ -1509,18 +1509,18 @@ int bch2_fs_io_read_init(struct bch_fs *c)
c->opts.btree_node_size,
c->opts.encoded_extent_max) /
PAGE_SIZE, 0))
return -BCH_ERR_ENOMEM_bio_bounce_pages_init;
return bch_err_throw(c, ENOMEM_bio_bounce_pages_init);
if (bioset_init(&c->bio_read, 1, offsetof(struct bch_read_bio, bio),
BIOSET_NEED_BVECS))
return -BCH_ERR_ENOMEM_bio_read_init;
return bch_err_throw(c, ENOMEM_bio_read_init);
if (bioset_init(&c->bio_read_split, 1, offsetof(struct bch_read_bio, bio),
BIOSET_NEED_BVECS))
return -BCH_ERR_ENOMEM_bio_read_split_init;
return bch_err_throw(c, ENOMEM_bio_read_split_init);
if (rhashtable_init(&c->promote_table, &bch_promote_params))
return -BCH_ERR_ENOMEM_promote_table_init;
return bch_err_throw(c, ENOMEM_promote_table_init);
return 0;
}

View File

@ -91,6 +91,8 @@ static inline int bch2_read_indirect_extent(struct btree_trans *trans,
return 0;
*data_btree = BTREE_ID_reflink;
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter,
offset_into_extent,
@ -102,10 +104,10 @@ static inline int bch2_read_indirect_extent(struct btree_trans *trans,
if (bkey_deleted(k.k)) {
bch2_trans_iter_exit(trans, &iter);
return -BCH_ERR_missing_indirect_extent;
return bch_err_throw(c, missing_indirect_extent);
}
bch2_bkey_buf_reassemble(extent, trans->c, k);
bch2_bkey_buf_reassemble(extent, c, k);
bch2_trans_iter_exit(trans, &iter);
return 0;
}

View File

@ -558,6 +558,7 @@ static void bch2_write_done(struct closure *cl)
static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
{
struct bch_fs *c = op->c;
struct keylist *keys = &op->insert_keys;
struct bkey_i *src, *dst = keys->keys, *n;
@ -569,7 +570,7 @@ static noinline int bch2_write_drop_io_error_ptrs(struct bch_write_op *op)
test_bit(ptr->dev, op->failed.d));
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(src)))
return -BCH_ERR_data_write_io;
return bch_err_throw(c, data_write_io);
}
if (dst != src)
@ -976,7 +977,7 @@ csum_err:
op->crc.csum_type < BCH_CSUM_NR
? __bch2_csum_types[op->crc.csum_type]
: "(unknown)");
return -BCH_ERR_data_write_csum;
return bch_err_throw(c, data_write_csum);
}
static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
@ -1287,7 +1288,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
static void __bch2_nocow_write_done(struct bch_write_op *op)
{
if (unlikely(op->flags & BCH_WRITE_io_error)) {
op->error = -BCH_ERR_data_write_io;
op->error = bch_err_throw(op->c, data_write_io);
} else if (unlikely(op->flags & BCH_WRITE_convert_unwritten))
bch2_nocow_write_convert_unwritten(op);
}
@ -1480,10 +1481,10 @@ err_bucket_stale:
"pointer to invalid bucket in nocow path on device %llu\n %s",
stale_at->b.inode,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = -BCH_ERR_data_write_invalid_ptr;
ret = bch_err_throw(c, data_write_invalid_ptr);
} else {
/* We can retry this: */
ret = -BCH_ERR_transaction_restart;
ret = bch_err_throw(c, transaction_restart);
}
printbuf_exit(&buf);
@ -1690,18 +1691,18 @@ CLOSURE_CALLBACK(bch2_write)
if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) {
bch2_write_op_error(op, op->pos.offset, "misaligned write");
op->error = -BCH_ERR_data_write_misaligned;
op->error = bch_err_throw(c, data_write_misaligned);
goto err;
}
if (c->opts.nochanges) {
op->error = -BCH_ERR_erofs_no_writes;
op->error = bch_err_throw(c, erofs_no_writes);
goto err;
}
if (!(op->flags & BCH_WRITE_move) &&
!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_write)) {
op->error = -BCH_ERR_erofs_no_writes;
op->error = bch_err_throw(c, erofs_no_writes);
goto err;
}
@ -1773,7 +1774,7 @@ int bch2_fs_io_write_init(struct bch_fs *c)
{
if (bioset_init(&c->bio_write, 1, offsetof(struct bch_write_bio, bio), BIOSET_NEED_BVECS) ||
bioset_init(&c->replica_set, 4, offsetof(struct bch_write_bio, bio), 0))
return -BCH_ERR_ENOMEM_bio_write_init;
return bch_err_throw(c, ENOMEM_bio_write_init);
return 0;
}

View File

@ -397,7 +397,7 @@ static int journal_entry_open(struct journal *j)
BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
if (j->blocked)
return -BCH_ERR_journal_blocked;
return bch_err_throw(c, journal_blocked);
if (j->cur_entry_error)
return j->cur_entry_error;
@ -407,23 +407,23 @@ static int journal_entry_open(struct journal *j)
return ret;
if (!fifo_free(&j->pin))
return -BCH_ERR_journal_pin_full;
return bch_err_throw(c, journal_pin_full);
if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf))
return -BCH_ERR_journal_max_in_flight;
return bch_err_throw(c, journal_max_in_flight);
if (atomic64_read(&j->seq) - j->seq_write_started == JOURNAL_STATE_BUF_NR)
return -BCH_ERR_journal_max_open;
return bch_err_throw(c, journal_max_open);
if (unlikely(journal_cur_seq(j) >= JOURNAL_SEQ_MAX)) {
bch_err(c, "cannot start: journal seq overflow");
if (bch2_fs_emergency_read_only_locked(c))
bch_err(c, "fatal error - emergency read only");
return -BCH_ERR_journal_shutdown;
return bch_err_throw(c, journal_shutdown);
}
if (!j->free_buf && !buf->data)
return -BCH_ERR_journal_buf_enomem; /* will retry after write completion frees up a buf */
return bch_err_throw(c, journal_buf_enomem); /* will retry after write completion frees up a buf */
BUG_ON(!j->cur_entry_sectors);
@ -447,7 +447,7 @@ static int journal_entry_open(struct journal *j)
u64s = clamp_t(int, u64s, 0, JOURNAL_ENTRY_CLOSED_VAL - 1);
if (u64s <= (ssize_t) j->early_journal_entries.nr)
return -BCH_ERR_journal_full;
return bch_err_throw(c, journal_full);
if (fifo_empty(&j->pin) && j->reclaim_thread)
wake_up_process(j->reclaim_thread);
@ -464,7 +464,7 @@ static int journal_entry_open(struct journal *j)
journal_cur_seq(j));
if (bch2_fs_emergency_read_only_locked(c))
bch_err(c, "fatal error - emergency read only");
return -BCH_ERR_journal_shutdown;
return bch_err_throw(c, journal_shutdown);
}
BUG_ON(j->pin.back - 1 != atomic64_read(&j->seq));
@ -597,16 +597,16 @@ retry:
return ret;
if (j->blocked)
return -BCH_ERR_journal_blocked;
return bch_err_throw(c, journal_blocked);
if ((flags & BCH_WATERMARK_MASK) < j->watermark) {
ret = -BCH_ERR_journal_full;
ret = bch_err_throw(c, journal_full);
can_discard = j->can_discard;
goto out;
}
if (nr_unwritten_journal_entries(j) == ARRAY_SIZE(j->buf) && !journal_entry_is_open(j)) {
ret = -BCH_ERR_journal_max_in_flight;
ret = bch_err_throw(c, journal_max_in_flight);
goto out;
}
@ -647,7 +647,7 @@ out:
goto retry;
if (journal_error_check_stuck(j, ret, flags))
ret = -BCH_ERR_journal_stuck;
ret = bch_err_throw(c, journal_stuck);
if (ret == -BCH_ERR_journal_max_in_flight &&
track_event_change(&c->times[BCH_TIME_blocked_journal_max_in_flight], true) &&
@ -812,6 +812,7 @@ out:
int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
struct closure *parent)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct journal_buf *buf;
int ret = 0;
@ -827,7 +828,7 @@ int bch2_journal_flush_seq_async(struct journal *j, u64 seq,
/* Recheck under lock: */
if (j->err_seq && seq >= j->err_seq) {
ret = -BCH_ERR_journal_flush_err;
ret = bch_err_throw(c, journal_flush_err);
goto out;
}
@ -998,7 +999,7 @@ int bch2_journal_meta(struct journal *j)
struct bch_fs *c = container_of(j, struct bch_fs, journal);
if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_journal))
return -BCH_ERR_erofs_no_writes;
return bch_err_throw(c, erofs_no_writes);
int ret = __bch2_journal_meta(j);
enumerated_ref_put(&c->writes, BCH_WRITE_REF_journal);
@ -1131,7 +1132,7 @@ static int bch2_set_nr_journal_buckets_iter(struct bch_dev *ca, unsigned nr,
new_buckets = kcalloc(nr, sizeof(u64), GFP_KERNEL);
new_bucket_seq = kcalloc(nr, sizeof(u64), GFP_KERNEL);
if (!bu || !ob || !new_buckets || !new_bucket_seq) {
ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
ret = bch_err_throw(c, ENOMEM_set_nr_journal_buckets);
goto err_free;
}
@ -1322,7 +1323,7 @@ int bch2_dev_journal_bucket_delete(struct bch_dev *ca, u64 b)
u64 *new_buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);;
if (!new_buckets)
return -BCH_ERR_ENOMEM_set_nr_journal_buckets;
return bch_err_throw(c, ENOMEM_set_nr_journal_buckets);
memcpy(new_buckets, ja->buckets, ja->nr * sizeof(u64));
memmove(&new_buckets[pos],
@ -1372,14 +1373,14 @@ int bch2_dev_journal_alloc(struct bch_dev *ca, bool new_fs)
if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) {
bch_err(c, "cannot allocate journal, filesystem is an unresized image file");
return -BCH_ERR_erofs_filesystem_full;
return bch_err_throw(c, erofs_filesystem_full);
}
unsigned nr;
int ret;
if (dynamic_fault("bcachefs:add:journal_alloc")) {
ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
ret = bch_err_throw(c, ENOMEM_set_nr_journal_buckets);
goto err;
}
@ -1518,7 +1519,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
init_fifo(&j->pin, roundup_pow_of_two(nr), GFP_KERNEL);
if (!j->pin.data) {
bch_err(c, "error reallocating journal fifo (%llu open entries)", nr);
return -BCH_ERR_ENOMEM_journal_pin_fifo;
return bch_err_throw(c, ENOMEM_journal_pin_fifo);
}
j->replay_journal_seq = last_seq;
@ -1606,6 +1607,7 @@ void bch2_dev_journal_exit(struct bch_dev *ca)
int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
{
struct bch_fs *c = ca->fs;
struct journal_device *ja = &ca->journal;
struct bch_sb_field_journal *journal_buckets =
bch2_sb_field_get(sb, journal);
@ -1625,7 +1627,7 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
ja->bucket_seq = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
if (!ja->bucket_seq)
return -BCH_ERR_ENOMEM_dev_journal_init;
return bch_err_throw(c, ENOMEM_dev_journal_init);
unsigned nr_bvecs = DIV_ROUND_UP(JOURNAL_ENTRY_SIZE_MAX, PAGE_SIZE);
@ -1633,7 +1635,7 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
ja->bio[i] = kzalloc(struct_size(ja->bio[i], bio.bi_inline_vecs,
nr_bvecs), GFP_KERNEL);
if (!ja->bio[i])
return -BCH_ERR_ENOMEM_dev_journal_init;
return bch_err_throw(c, ENOMEM_dev_journal_init);
ja->bio[i]->ca = ca;
ja->bio[i]->buf_idx = i;
@ -1642,7 +1644,7 @@ int bch2_dev_journal_init(struct bch_dev *ca, struct bch_sb *sb)
ja->buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
if (!ja->buckets)
return -BCH_ERR_ENOMEM_dev_journal_init;
return bch_err_throw(c, ENOMEM_dev_journal_init);
if (journal_buckets_v2) {
unsigned nr = bch2_sb_field_journal_v2_nr_entries(journal_buckets_v2);
@ -1696,10 +1698,12 @@ void bch2_fs_journal_init_early(struct journal *j)
int bch2_fs_journal_init(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
j->free_buf_size = j->buf_size_want = JOURNAL_ENTRY_SIZE_MIN;
j->free_buf = kvmalloc(j->free_buf_size, GFP_KERNEL);
if (!j->free_buf)
return -BCH_ERR_ENOMEM_journal_buf;
return bch_err_throw(c, ENOMEM_journal_buf);
for (unsigned i = 0; i < ARRAY_SIZE(j->buf); i++)
j->buf[i].idx = i;
@ -1707,7 +1711,7 @@ int bch2_fs_journal_init(struct journal *j)
j->wq = alloc_workqueue("bcachefs_journal",
WQ_HIGHPRI|WQ_FREEZABLE|WQ_UNBOUND|WQ_MEM_RECLAIM, 512);
if (!j->wq)
return -BCH_ERR_ENOMEM_fs_other_alloc;
return bch_err_throw(c, ENOMEM_fs_other_alloc);
return 0;
}

View File

@ -199,7 +199,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
GFP_KERNEL);
if (!_i)
return -BCH_ERR_ENOMEM_journal_entry_add;
return bch_err_throw(c, ENOMEM_journal_entry_add);
/*
* Duplicate journal entries? If so we want the one that didn't have a
@ -242,7 +242,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
replace:
i = kvmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL);
if (!i)
return -BCH_ERR_ENOMEM_journal_entry_add;
return bch_err_throw(c, ENOMEM_journal_entry_add);
darray_init(&i->ptrs);
i->csum_good = entry_ptr.csum_good;
@ -322,7 +322,7 @@ static void journal_entry_err_msg(struct printbuf *out,
bch2_sb_error_count(c, BCH_FSCK_ERR_##_err); \
if (bch2_fs_inconsistent(c, \
"corrupt metadata before write: %s\n", _buf.buf)) {\
ret = -BCH_ERR_fsck_errors_not_fixed; \
ret = bch_err_throw(c, fsck_errors_not_fixed); \
goto fsck_err; \
} \
break; \
@ -1020,19 +1020,19 @@ struct journal_read_buf {
size_t size;
};
static int journal_read_buf_realloc(struct journal_read_buf *b,
static int journal_read_buf_realloc(struct bch_fs *c, struct journal_read_buf *b,
size_t new_size)
{
void *n;
/* the bios are sized for this many pages, max: */
if (new_size > JOURNAL_ENTRY_SIZE_MAX)
return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
return bch_err_throw(c, ENOMEM_journal_read_buf_realloc);
new_size = roundup_pow_of_two(new_size);
n = kvmalloc(new_size, GFP_KERNEL);
if (!n)
return -BCH_ERR_ENOMEM_journal_read_buf_realloc;
return bch_err_throw(c, ENOMEM_journal_read_buf_realloc);
kvfree(b->data);
b->data = n;
@ -1067,7 +1067,7 @@ reread:
bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
if (!bio)
return -BCH_ERR_ENOMEM_journal_read_bucket;
return bch_err_throw(c, ENOMEM_journal_read_bucket);
bio_init(bio, ca->disk_sb.bdev, bio->bi_inline_vecs, nr_bvecs, REQ_OP_READ);
bio->bi_iter.bi_sector = offset;
@ -1078,7 +1078,7 @@ reread:
kfree(bio);
if (!ret && bch2_meta_read_fault("journal"))
ret = -BCH_ERR_EIO_fault_injected;
ret = bch_err_throw(c, EIO_fault_injected);
bch2_account_io_completion(ca, BCH_MEMBER_ERROR_read,
submit_time, !ret);
@ -1106,7 +1106,7 @@ reread:
break;
case JOURNAL_ENTRY_REREAD:
if (vstruct_bytes(j) > buf->size) {
ret = journal_read_buf_realloc(buf,
ret = journal_read_buf_realloc(c, buf,
vstruct_bytes(j));
if (ret)
return ret;
@ -1206,7 +1206,7 @@ static CLOSURE_CALLBACK(bch2_journal_read_device)
if (!ja->nr)
goto out;
ret = journal_read_buf_realloc(&buf, PAGE_SIZE);
ret = journal_read_buf_realloc(c, &buf, PAGE_SIZE);
if (ret)
goto err;
@ -1691,7 +1691,7 @@ static CLOSURE_CALLBACK(journal_write_done)
: j->noflush_write_time, j->write_start_time);
if (!w->devs_written.nr) {
err = -BCH_ERR_journal_write_err;
err = bch_err_throw(c, journal_write_err);
} else {
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
w->devs_written);

View File

@ -226,7 +226,7 @@ void bch2_journal_space_available(struct journal *j)
bch_err(c, "%s", buf.buf);
printbuf_exit(&buf);
}
ret = -BCH_ERR_insufficient_journal_devices;
ret = bch_err_throw(c, insufficient_journal_devices);
goto out;
}
@ -240,7 +240,7 @@ void bch2_journal_space_available(struct journal *j)
total = j->space[journal_space_total].total;
if (!j->space[journal_space_discarded].next_entry)
ret = -BCH_ERR_journal_full;
ret = bch_err_throw(c, journal_full);
if ((j->space[journal_space_clean_ondisk].next_entry <
j->space[journal_space_clean_ondisk].total) &&

View File

@ -210,7 +210,7 @@ int bch2_journal_buckets_to_sb(struct bch_fs *c, struct bch_dev *ca,
j = bch2_sb_field_resize(&ca->disk_sb, journal_v2,
(sizeof(*j) + sizeof(j->d[0]) * nr_compacted) / sizeof(u64));
if (!j)
return -BCH_ERR_ENOSPC_sb_journal;
return bch_err_throw(c, ENOSPC_sb_journal);
bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);

View File

@ -78,7 +78,7 @@ int bch2_journal_seq_blacklist_add(struct bch_fs *c, u64 start, u64 end)
bl = bch2_sb_field_resize(&c->disk_sb, journal_seq_blacklist,
sb_blacklist_u64s(nr + 1));
if (!bl) {
ret = -BCH_ERR_ENOSPC_sb_journal_seq_blacklist;
ret = bch_err_throw(c, ENOSPC_sb_journal_seq_blacklist);
goto out;
}
@ -152,7 +152,7 @@ int bch2_blacklist_table_initialize(struct bch_fs *c)
t = kzalloc(struct_size(t, entries, nr), GFP_KERNEL);
if (!t)
return -BCH_ERR_ENOMEM_blacklist_table_init;
return bch_err_throw(c, ENOMEM_blacklist_table_init);
t->nr = nr;

View File

@ -35,7 +35,7 @@ static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s k,
nr_good = bch2_bkey_durability(c, k.s_c);
if ((!nr_good && !(flags & lost)) ||
(nr_good < replicas && !(flags & degraded)))
return -BCH_ERR_remove_would_lose_data;
return bch_err_throw(c, remove_would_lose_data);
return 0;
}
@ -156,7 +156,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c,
/* don't handle this yet: */
if (flags & BCH_FORCE_IF_METADATA_LOST)
return -BCH_ERR_remove_with_metadata_missing_unimplemented;
return bch_err_throw(c, remove_with_metadata_missing_unimplemented);
trans = bch2_trans_get(c);
bch2_bkey_buf_init(&k);

View File

@ -971,7 +971,7 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
if (data_opts.scrub &&
!bch2_dev_idx_is_online(c, data_opts.read_dev)) {
bch2_trans_iter_exit(trans, &iter);
ret = -BCH_ERR_device_offline;
ret = bch_err_throw(c, device_offline);
break;
}

View File

@ -287,7 +287,7 @@ int bch2_unlink_trans(struct btree_trans *trans,
}
if (deleting_subvol && !inode_u->bi_subvol) {
ret = -BCH_ERR_ENOENT_not_subvol;
ret = bch_err_throw(c, ENOENT_not_subvol);
goto err;
}
@ -633,7 +633,7 @@ static int __bch2_inum_to_path(struct btree_trans *trans,
break;
if (!inode.bi_dir && !inode.bi_dir_offset) {
ret = -BCH_ERR_ENOENT_inode_no_backpointer;
ret = bch_err_throw(trans->c, ENOENT_inode_no_backpointer);
goto disconnected;
}

View File

@ -527,7 +527,7 @@ int bch2_fs_quota_read(struct bch_fs *c)
struct bch_sb_field_quota *sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
if (!sb_quota) {
mutex_unlock(&c->sb_lock);
return -BCH_ERR_ENOSPC_sb_quota;
return bch_err_throw(c, ENOSPC_sb_quota);
}
bch2_sb_quota_read(c);
@ -572,7 +572,7 @@ static int bch2_quota_enable(struct super_block *sb, unsigned uflags)
mutex_lock(&c->sb_lock);
sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
if (!sb_quota) {
ret = -BCH_ERR_ENOSPC_sb_quota;
ret = bch_err_throw(c, ENOSPC_sb_quota);
goto unlock;
}
@ -726,7 +726,7 @@ static int bch2_quota_set_info(struct super_block *sb, int type,
mutex_lock(&c->sb_lock);
sb_quota = bch2_sb_get_or_create_quota(&c->disk_sb);
if (!sb_quota) {
ret = -BCH_ERR_ENOSPC_sb_quota;
ret = bch_err_throw(c, ENOSPC_sb_quota);
goto unlock;
}

View File

@ -443,7 +443,7 @@ static int do_rebalance_extent(struct moving_context *ctxt,
if (bch2_err_matches(ret, ENOMEM)) {
/* memory allocation failure, wait for some IO to finish */
bch2_move_ctxt_wait_for_io(ctxt);
ret = -BCH_ERR_transaction_restart_nested;
ret = bch_err_throw(c, transaction_restart_nested);
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
@ -795,7 +795,7 @@ static int check_rebalance_work_one(struct btree_trans *trans,
BTREE_ID_extents, POS_MIN,
BTREE_ITER_prefetch|
BTREE_ITER_all_snapshots);
return -BCH_ERR_transaction_restart_nested;
return bch_err_throw(c, transaction_restart_nested);
}
if (!extent_k.k && !rebalance_k.k)

View File

@ -879,7 +879,7 @@ int bch2_fs_recovery(struct bch_fs *c)
use_clean:
if (!clean) {
bch_err(c, "no superblock clean section found");
ret = -BCH_ERR_fsck_repair_impossible;
ret = bch_err_throw(c, fsck_repair_impossible);
goto err;
}

View File

@ -103,20 +103,20 @@ static void bch2_sb_recovery_passes_to_text(struct printbuf *out,
prt_tab(out);
bch2_pr_time_units(out, le32_to_cpu(i->last_runtime) * NSEC_PER_SEC);
if (BCH_RECOVERY_PASS_NO_RATELIMIT(i))
prt_str(out, " (no ratelimit)");
prt_newline(out);
}
}
static void bch2_sb_recovery_pass_complete(struct bch_fs *c,
enum bch_recovery_pass pass,
s64 start_time)
static struct recovery_pass_entry *bch2_sb_recovery_pass_entry(struct bch_fs *c,
enum bch_recovery_pass pass)
{
enum bch_recovery_pass_stable stable = bch2_recovery_pass_to_stable(pass);
s64 end_time = ktime_get_real_seconds();
mutex_lock(&c->sb_lock);
struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
__clear_bit_le64(stable, ext->recovery_passes_required);
lockdep_assert_held(&c->sb_lock);
struct bch_sb_field_recovery_passes *r =
bch2_sb_field_get(c->disk_sb.sb, recovery_passes);
@ -127,15 +127,43 @@ static void bch2_sb_recovery_pass_complete(struct bch_fs *c,
r = bch2_sb_field_resize(&c->disk_sb, recovery_passes, u64s);
if (!r) {
bch_err(c, "error creating recovery_passes sb section");
goto out;
return NULL;
}
}
r->start[stable].last_run = cpu_to_le64(end_time);
r->start[stable].last_runtime = cpu_to_le32(max(0, end_time - start_time));
out:
return r->start + stable;
}
static void bch2_sb_recovery_pass_complete(struct bch_fs *c,
enum bch_recovery_pass pass,
s64 start_time)
{
guard(mutex)(&c->sb_lock);
struct bch_sb_field_ext *ext = bch2_sb_field_get(c->disk_sb.sb, ext);
__clear_bit_le64(bch2_recovery_pass_to_stable(pass),
ext->recovery_passes_required);
struct recovery_pass_entry *e = bch2_sb_recovery_pass_entry(c, pass);
if (e) {
s64 end_time = ktime_get_real_seconds();
e->last_run = cpu_to_le64(end_time);
e->last_runtime = cpu_to_le32(max(0, end_time - start_time));
SET_BCH_RECOVERY_PASS_NO_RATELIMIT(e, false);
}
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
void bch2_recovery_pass_set_no_ratelimit(struct bch_fs *c,
enum bch_recovery_pass pass)
{
guard(mutex)(&c->sb_lock);
struct recovery_pass_entry *e = bch2_sb_recovery_pass_entry(c, pass);
if (e && !BCH_RECOVERY_PASS_NO_RATELIMIT(e)) {
SET_BCH_RECOVERY_PASS_NO_RATELIMIT(e, false);
bch2_write_super(c);
}
}
static bool bch2_recovery_pass_want_ratelimit(struct bch_fs *c, enum bch_recovery_pass pass)
@ -157,6 +185,9 @@ static bool bch2_recovery_pass_want_ratelimit(struct bch_fs *c, enum bch_recover
*/
ret = (u64) le32_to_cpu(i->last_runtime) * 100 >
ktime_get_real_seconds() - le64_to_cpu(i->last_run);
if (BCH_RECOVERY_PASS_NO_RATELIMIT(i))
ret = false;
}
return ret;
@ -329,7 +360,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c,
(!in_recovery || r->curr_pass >= BCH_RECOVERY_PASS_set_may_go_rw)) {
prt_printf(out, "need recovery pass %s (%u), but already rw\n",
bch2_recovery_passes[pass], pass);
ret = -BCH_ERR_cannot_rewind_recovery;
ret = bch_err_throw(c, cannot_rewind_recovery);
goto out;
}
@ -349,7 +380,7 @@ int __bch2_run_explicit_recovery_pass(struct bch_fs *c,
if (rewind) {
r->next_pass = pass;
r->passes_complete &= (1ULL << pass) >> 1;
ret = -BCH_ERR_restart_recovery;
ret = bch_err_throw(c, restart_recovery);
}
} else {
prt_printf(out, "scheduling recovery pass %s (%u)%s\n",
@ -384,6 +415,35 @@ int bch2_run_explicit_recovery_pass(struct bch_fs *c,
return ret;
}
/*
* Returns 0 if @pass has run recently, otherwise one of
* -BCH_ERR_restart_recovery
* -BCH_ERR_recovery_pass_will_run
*/
int bch2_require_recovery_pass(struct bch_fs *c,
struct printbuf *out,
enum bch_recovery_pass pass)
{
if (test_bit(BCH_FS_in_recovery, &c->flags) &&
c->recovery.passes_complete & BIT_ULL(pass))
return 0;
guard(mutex)(&c->sb_lock);
if (bch2_recovery_pass_want_ratelimit(c, pass))
return 0;
enum bch_run_recovery_pass_flags flags = 0;
int ret = 0;
if (recovery_pass_needs_set(c, pass, &flags)) {
ret = __bch2_run_explicit_recovery_pass(c, out, pass, flags);
bch2_write_super(c);
}
return ret ?: bch_err_throw(c, recovery_pass_will_run);
}
int bch2_run_print_explicit_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
{
enum bch_run_recovery_pass_flags flags = RUN_RECOVERY_PASS_nopersistent;

View File

@ -10,6 +10,8 @@ u64 bch2_recovery_passes_from_stable(u64 v);
u64 bch2_fsck_recovery_passes(void);
void bch2_recovery_pass_set_no_ratelimit(struct bch_fs *, enum bch_recovery_pass);
enum bch_run_recovery_pass_flags {
RUN_RECOVERY_PASS_nopersistent = BIT(0),
RUN_RECOVERY_PASS_ratelimit = BIT(1),
@ -24,6 +26,9 @@ int bch2_run_explicit_recovery_pass(struct bch_fs *, struct printbuf *,
enum bch_recovery_pass,
enum bch_run_recovery_pass_flags);
int bch2_require_recovery_pass(struct bch_fs *, struct printbuf *,
enum bch_recovery_pass);
int bch2_run_online_recovery_passes(struct bch_fs *, u64);
int bch2_run_recovery_passes(struct bch_fs *, enum bch_recovery_pass);

View File

@ -87,6 +87,8 @@ struct recovery_pass_entry {
__le32 flags;
};
LE32_BITMASK(BCH_RECOVERY_PASS_NO_RATELIMIT, struct recovery_pass_entry, flags, 0, 1)
struct bch_sb_field_recovery_passes {
struct bch_sb_field field;
struct recovery_pass_entry start[];

View File

@ -312,7 +312,7 @@ static int trans_trigger_reflink_p_segment(struct btree_trans *trans,
if (!bkey_refcount_c(k)) {
if (!(flags & BTREE_TRIGGER_overwrite))
ret = -BCH_ERR_missing_indirect_extent;
ret = bch_err_throw(c, missing_indirect_extent);
goto next;
}
@ -612,7 +612,7 @@ s64 bch2_remap_range(struct bch_fs *c,
int ret = 0, ret2 = 0;
if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_reflink))
return -BCH_ERR_erofs_no_writes;
return bch_err_throw(c, erofs_no_writes);
bch2_check_set_feature(c, BCH_FEATURE_reflink);
@ -848,7 +848,7 @@ int bch2_gc_reflink_start(struct bch_fs *c)
struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table,
c->reflink_gc_nr++, GFP_KERNEL);
if (!r) {
ret = -BCH_ERR_ENOMEM_gc_reflink_start;
ret = bch_err_throw(c, ENOMEM_gc_reflink_start);
break;
}

View File

@ -119,7 +119,7 @@ int bch2_replicas_entry_validate(struct bch_replicas_entry_v1 *r,
return 0;
bad:
bch2_replicas_entry_to_text(err, r);
return -BCH_ERR_invalid_replicas_entry;
return bch_err_throw(c, invalid_replicas_entry);
}
void bch2_cpu_replicas_to_text(struct printbuf *out,
@ -311,7 +311,7 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c,
!__replicas_has_entry(&c->replicas_gc, new_entry)) {
new_gc = cpu_replicas_add_entry(c, &c->replicas_gc, new_entry);
if (!new_gc.entries) {
ret = -BCH_ERR_ENOMEM_cpu_replicas;
ret = bch_err_throw(c, ENOMEM_cpu_replicas);
goto err;
}
}
@ -319,7 +319,7 @@ static int bch2_mark_replicas_slowpath(struct bch_fs *c,
if (!__replicas_has_entry(&c->replicas, new_entry)) {
new_r = cpu_replicas_add_entry(c, &c->replicas, new_entry);
if (!new_r.entries) {
ret = -BCH_ERR_ENOMEM_cpu_replicas;
ret = bch_err_throw(c, ENOMEM_cpu_replicas);
goto err;
}
@ -422,7 +422,7 @@ int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
if (!c->replicas_gc.entries) {
mutex_unlock(&c->sb_lock);
bch_err(c, "error allocating c->replicas_gc");
return -BCH_ERR_ENOMEM_replicas_gc;
return bch_err_throw(c, ENOMEM_replicas_gc);
}
for_each_cpu_replicas_entry(&c->replicas, e)
@ -458,7 +458,7 @@ retry:
new.entries = kcalloc(nr, new.entry_size, GFP_KERNEL);
if (!new.entries) {
bch_err(c, "error allocating c->replicas_gc");
return -BCH_ERR_ENOMEM_replicas_gc;
return bch_err_throw(c, ENOMEM_replicas_gc);
}
mutex_lock(&c->sb_lock);
@ -622,7 +622,7 @@ static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
sb_r = bch2_sb_field_resize(&c->disk_sb, replicas_v0,
DIV_ROUND_UP(bytes, sizeof(u64)));
if (!sb_r)
return -BCH_ERR_ENOSPC_sb_replicas;
return bch_err_throw(c, ENOSPC_sb_replicas);
bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas);
sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas_v0);
@ -667,7 +667,7 @@ static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
sb_r = bch2_sb_field_resize(&c->disk_sb, replicas,
DIV_ROUND_UP(bytes, sizeof(u64)));
if (!sb_r)
return -BCH_ERR_ENOSPC_sb_replicas;
return bch_err_throw(c, ENOSPC_sb_replicas);
bch2_sb_field_delete(&c->disk_sb, BCH_SB_FIELD_replicas_v0);
sb_r = bch2_sb_field_get(c->disk_sb.sb, replicas);

View File

@ -417,7 +417,7 @@ int bch2_sb_downgrade_update(struct bch_fs *c)
d = bch2_sb_field_resize(&c->disk_sb, downgrade, sb_u64s);
if (!d) {
ret = -BCH_ERR_ENOSPC_sb_downgrade;
ret = bch_err_throw(c, ENOSPC_sb_downgrade);
goto out;
}

View File

@ -101,7 +101,7 @@ static int sb_members_v2_resize_entries(struct bch_fs *c)
mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s);
if (!mi)
return -BCH_ERR_ENOSPC_sb_members_v2;
return bch_err_throw(c, ENOSPC_sb_members_v2);
for (int i = c->disk_sb.sb->nr_devices - 1; i >= 0; --i) {
void *dst = (void *) mi->_members + (i * sizeof(struct bch_member));

View File

@ -54,7 +54,7 @@ int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
BTREE_ITER_with_updates, snapshot_tree, s);
if (bch2_err_matches(ret, ENOENT))
ret = -BCH_ERR_ENOENT_snapshot_tree;
ret = bch_err_throw(trans->c, ENOENT_snapshot_tree);
return ret;
}
@ -67,7 +67,7 @@ __bch2_snapshot_tree_create(struct btree_trans *trans)
struct bkey_i_snapshot_tree *s_t;
if (ret == -BCH_ERR_ENOSPC_btree_slot)
ret = -BCH_ERR_ENOSPC_snapshot_tree;
ret = bch_err_throw(trans->c, ENOSPC_snapshot_tree);
if (ret)
return ERR_PTR(ret);
@ -285,7 +285,7 @@ static int bch2_snapshot_table_make_room(struct bch_fs *c, u32 id)
mutex_lock(&c->snapshot_table_lock);
int ret = snapshot_t_mut(c, id)
? 0
: -BCH_ERR_ENOMEM_mark_snapshot;
: bch_err_throw(c, ENOMEM_mark_snapshot);
mutex_unlock(&c->snapshot_table_lock);
return ret;
}
@ -304,7 +304,7 @@ static int __bch2_mark_snapshot(struct btree_trans *trans,
t = snapshot_t_mut(c, id);
if (!t) {
ret = -BCH_ERR_ENOMEM_mark_snapshot;
ret = bch_err_throw(c, ENOMEM_mark_snapshot);
goto err;
}
@ -1006,7 +1006,7 @@ int bch2_reconstruct_snapshots(struct bch_fs *c)
"snapshot node %u from tree %s missing, recreate?", *id, buf.buf)) {
if (t->nr > 1) {
bch_err(c, "cannot reconstruct snapshot trees with multiple nodes");
ret = -BCH_ERR_fsck_repair_unimplemented;
ret = bch_err_throw(c, fsck_repair_unimplemented);
goto err;
}
@ -1045,19 +1045,39 @@ int __bch2_check_key_has_snapshot(struct btree_trans *trans,
ret = bch2_btree_delete_at(trans, iter,
BTREE_UPDATE_internal_snapshot_node) ?: 1;
/*
* Snapshot missing: we should have caught this with btree_lost_data and
* kicked off reconstruct_snapshots, so if we end up here we have no
* idea what happened:
*/
if (fsck_err_on(state == SNAPSHOT_ID_empty,
trans, bkey_in_missing_snapshot,
"key in missing snapshot %s, delete?",
(bch2_btree_id_to_text(&buf, iter->btree_id),
prt_char(&buf, ' '),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
ret = bch2_btree_delete_at(trans, iter,
BTREE_UPDATE_internal_snapshot_node) ?: 1;
if (state == SNAPSHOT_ID_empty) {
/*
* Snapshot missing: we should have caught this with btree_lost_data and
* kicked off reconstruct_snapshots, so if we end up here we have no
* idea what happened.
*
* Do not delete unless we know that subvolumes and snapshots
* are consistent:
*
* XXX:
*
* We could be smarter here, and instead of using the generic
* recovery pass ratelimiting, track if there have been any
* changes to the snapshots or inodes btrees since those passes
* last ran.
*/
ret = bch2_require_recovery_pass(c, &buf, BCH_RECOVERY_PASS_check_snapshots) ?: ret;
ret = bch2_require_recovery_pass(c, &buf, BCH_RECOVERY_PASS_check_subvols) ?: ret;
if (c->sb.btrees_lost_data & BIT_ULL(BTREE_ID_snapshots))
ret = bch2_require_recovery_pass(c, &buf, BCH_RECOVERY_PASS_reconstruct_snapshots) ?: ret;
unsigned repair_flags = FSCK_CAN_IGNORE | (!ret ? FSCK_CAN_FIX : 0);
if (__fsck_err(trans, repair_flags, bkey_in_missing_snapshot,
"key in missing snapshot %s, delete?",
(bch2_btree_id_to_text(&buf, iter->btree_id),
prt_char(&buf, ' '),
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, iter,
BTREE_UPDATE_internal_snapshot_node) ?: 1;
}
}
fsck_err:
printbuf_exit(&buf);
return ret;
@ -1276,7 +1296,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
goto err;
if (!k.k || !k.k->p.offset) {
ret = -BCH_ERR_ENOSPC_snapshot_create;
ret = bch_err_throw(c, ENOSPC_snapshot_create);
goto err;
}
@ -1878,6 +1898,8 @@ err:
d->running = false;
mutex_unlock(&d->progress_lock);
bch2_trans_put(trans);
bch2_recovery_pass_set_no_ratelimit(c, BCH_RECOVERY_PASS_check_snapshots);
out_unlock:
mutex_unlock(&d->lock);
if (!bch2_err_matches(ret, EROFS))
@ -1913,7 +1935,7 @@ void bch2_delete_dead_snapshots_async(struct bch_fs *c)
BUG_ON(!test_bit(BCH_FS_may_go_rw, &c->flags));
if (!queue_work(c->write_ref_wq, &c->snapshot_delete.work))
if (!queue_work(system_long_wq, &c->snapshot_delete.work))
enumerated_ref_put(&c->writes, BCH_WRITE_REF_delete_dead_snapshots);
}

View File

@ -31,14 +31,15 @@ static int bch2_dirent_has_target(struct btree_trans *trans, struct bkey_s_c_dir
}
}
static noinline int fsck_rename_dirent(struct btree_trans *trans,
struct snapshots_seen *s,
const struct bch_hash_desc desc,
struct bch_hash_info *hash_info,
struct bkey_s_c_dirent old)
static int bch2_fsck_rename_dirent(struct btree_trans *trans,
struct snapshots_seen *s,
const struct bch_hash_desc desc,
struct bch_hash_info *hash_info,
struct bkey_s_c_dirent old,
bool *updated_before_k_pos)
{
struct qstr old_name = bch2_dirent_get_name(old);
struct bkey_i_dirent *new = bch2_trans_kmalloc(trans, bkey_bytes(old.k) + 32);
struct bkey_i_dirent *new = bch2_trans_kmalloc(trans, BKEY_U64s_MAX * sizeof(u64));
int ret = PTR_ERR_OR_ZERO(new);
if (ret)
return ret;
@ -47,31 +48,37 @@ static noinline int fsck_rename_dirent(struct btree_trans *trans,
dirent_copy_target(new, old);
new->k.p = old.k->p;
char *renamed_buf = bch2_trans_kmalloc(trans, old_name.len + 20);
ret = PTR_ERR_OR_ZERO(renamed_buf);
if (ret)
return ret;
for (unsigned i = 0; i < 1000; i++) {
unsigned len = sprintf(new->v.d_name, "%.*s.fsck_renamed-%u",
old_name.len, old_name.name, i);
unsigned u64s = BKEY_U64s + dirent_val_u64s(len, 0);
new->k.u64s = BKEY_U64s_MAX;
if (u64s > U8_MAX)
return -EINVAL;
struct qstr renamed_name = (struct qstr) QSTR_INIT(renamed_buf,
sprintf(renamed_buf, "%.*s.fsck_renamed-%u",
old_name.len, old_name.name, i));
new->k.u64s = u64s;
ret = bch2_dirent_init_name(new, hash_info, &renamed_name, NULL);
if (ret)
return ret;
ret = bch2_hash_set_in_snapshot(trans, bch2_dirent_hash_desc, hash_info,
(subvol_inum) { 0, old.k->p.inode },
old.k->p.snapshot, &new->k_i,
BTREE_UPDATE_internal_snapshot_node);
BTREE_UPDATE_internal_snapshot_node|
STR_HASH_must_create);
if (ret && !bch2_err_matches(ret, EEXIST))
goto err;
if (!ret)
break;
if (!ret) {
if (bpos_lt(new->k.p, old.k->p))
*updated_before_k_pos = true;
break;
}
}
if (ret)
goto err;
ret = bch2_fsck_update_backpointers(trans, s, desc, hash_info, &new->k_i);
err:
ret = ret ?: bch2_fsck_update_backpointers(trans, s, desc, hash_info, &new->k_i);
bch_err_fn(trans->c, ret);
return ret;
}
@ -191,7 +198,7 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans,
#endif
bch2_print_str(c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
ret = -BCH_ERR_fsck_repair_unimplemented;
ret = bch_err_throw(c, fsck_repair_unimplemented);
goto err;
}
@ -226,53 +233,20 @@ static noinline int check_inode_hash_info_matches_root(struct btree_trans *trans
return ret;
}
int __bch2_str_hash_check_key(struct btree_trans *trans,
struct snapshots_seen *s,
const struct bch_hash_desc *desc,
struct bch_hash_info *hash_info,
struct btree_iter *k_iter, struct bkey_s_c hash_k)
/* Put a str_hash key in its proper location, checking for duplicates */
int bch2_str_hash_repair_key(struct btree_trans *trans,
struct snapshots_seen *s,
const struct bch_hash_desc *desc,
struct bch_hash_info *hash_info,
struct btree_iter *k_iter, struct bkey_s_c k,
struct btree_iter *dup_iter, struct bkey_s_c dup_k,
bool *updated_before_k_pos)
{
struct bch_fs *c = trans->c;
struct btree_iter iter = {};
struct printbuf buf = PRINTBUF;
struct bkey_s_c k;
bool free_snapshots_seen = false;
int ret = 0;
u64 hash = desc->hash_bkey(hash_info, hash_k);
if (hash_k.k->p.offset < hash)
goto bad_hash;
for_each_btree_key_norestart(trans, iter, desc->btree_id,
SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
BTREE_ITER_slots|
BTREE_ITER_with_updates, k, ret) {
if (bkey_eq(k.k->p, hash_k.k->p))
break;
if (k.k->type == desc->key_type &&
!desc->cmp_bkey(k, hash_k))
goto duplicate_entries;
if (bkey_deleted(k.k)) {
bch2_trans_iter_exit(trans, &iter);
goto bad_hash;
}
}
out:
bch2_trans_iter_exit(trans, &iter);
printbuf_exit(&buf);
if (free_snapshots_seen)
darray_exit(&s->ids);
return ret;
bad_hash:
/*
* Before doing any repair, check hash_info itself:
*/
ret = check_inode_hash_info_matches_root(trans, hash_k.k->p.inode, hash_info);
if (ret)
goto out;
if (!s) {
s = bch2_trans_kmalloc(trans, sizeof(*s));
ret = PTR_ERR_OR_ZERO(s);
@ -289,27 +263,27 @@ bad_hash:
free_snapshots_seen = true;
}
if (fsck_err(trans, hash_table_key_wrong_offset,
"hash table key at wrong offset: btree %s inode %llu offset %llu, hashed to %llu\n%s",
bch2_btree_id_str(desc->btree_id), hash_k.k->p.inode, hash_k.k->p.offset, hash,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf))) {
struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, hash_k);
if (IS_ERR(new))
return PTR_ERR(new);
if (!dup_k.k) {
struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
ret = PTR_ERR_OR_ZERO(new);
if (ret)
goto out;
k = bch2_hash_set_or_get_in_snapshot(trans, &iter, *desc, hash_info,
(subvol_inum) { 0, hash_k.k->p.inode },
hash_k.k->p.snapshot, new,
dup_k = bch2_hash_set_or_get_in_snapshot(trans, dup_iter, *desc, hash_info,
(subvol_inum) { 0, new->k.p.inode },
new->k.p.snapshot, new,
STR_HASH_must_create|
BTREE_ITER_with_updates|
BTREE_UPDATE_internal_snapshot_node);
ret = bkey_err(k);
ret = bkey_err(dup_k);
if (ret)
goto out;
if (k.k)
if (dup_k.k)
goto duplicate_entries;
if (bpos_lt(new->k.p, k.k->p))
*updated_before_k_pos = true;
ret = bch2_insert_snapshot_whiteouts(trans, desc->btree_id,
k_iter->pos, new->k.p) ?:
bch2_hash_delete_at(trans, *desc, hash_info, k_iter,
@ -318,39 +292,108 @@ bad_hash:
bch2_fsck_update_backpointers(trans, s, *desc, hash_info, new) ?:
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?:
-BCH_ERR_transaction_restart_commit;
goto out;
}
fsck_err:
goto out;
} else {
duplicate_entries:
ret = hash_pick_winner(trans, *desc, hash_info, hash_k, k);
if (ret < 0)
goto out;
ret = hash_pick_winner(trans, *desc, hash_info, k, dup_k);
if (ret < 0)
goto out;
if (!fsck_err(trans, hash_table_key_duplicate,
"duplicate hash table keys%s:\n%s",
ret != 2 ? "" : ", both point to valid inodes",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, hash_k),
prt_newline(&buf),
bch2_bkey_val_to_text(&buf, c, k),
buf.buf)))
goto out;
if (!fsck_err(trans, hash_table_key_duplicate,
"duplicate hash table keys%s:\n%s",
ret != 2 ? "" : ", both point to valid inodes",
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k),
prt_newline(&buf),
bch2_bkey_val_to_text(&buf, c, dup_k),
buf.buf)))
goto out;
switch (ret) {
case 0:
ret = bch2_hash_delete_at(trans, *desc, hash_info, k_iter, 0);
break;
case 1:
ret = bch2_hash_delete_at(trans, *desc, hash_info, &iter, 0);
break;
case 2:
ret = fsck_rename_dirent(trans, s, *desc, hash_info, bkey_s_c_to_dirent(hash_k)) ?:
bch2_hash_delete_at(trans, *desc, hash_info, k_iter, 0);
goto out;
switch (ret) {
case 0:
ret = bch2_hash_delete_at(trans, *desc, hash_info, k_iter, 0);
break;
case 1:
ret = bch2_hash_delete_at(trans, *desc, hash_info, dup_iter, 0);
break;
case 2:
ret = bch2_fsck_rename_dirent(trans, s, *desc, hash_info,
bkey_s_c_to_dirent(k),
updated_before_k_pos) ?:
bch2_hash_delete_at(trans, *desc, hash_info, k_iter,
BTREE_ITER_with_updates);
goto out;
}
ret = bch2_trans_commit(trans, NULL, NULL, 0) ?:
-BCH_ERR_transaction_restart_commit;
}
out:
fsck_err:
bch2_trans_iter_exit(trans, dup_iter);
printbuf_exit(&buf);
if (free_snapshots_seen)
darray_exit(&s->ids);
return ret;
}
ret = bch2_trans_commit(trans, NULL, NULL, 0) ?:
-BCH_ERR_transaction_restart_commit;
int __bch2_str_hash_check_key(struct btree_trans *trans,
struct snapshots_seen *s,
const struct bch_hash_desc *desc,
struct bch_hash_info *hash_info,
struct btree_iter *k_iter, struct bkey_s_c hash_k,
bool *updated_before_k_pos)
{
struct bch_fs *c = trans->c;
struct btree_iter iter = {};
struct printbuf buf = PRINTBUF;
struct bkey_s_c k;
int ret = 0;
u64 hash = desc->hash_bkey(hash_info, hash_k);
if (hash_k.k->p.offset < hash)
goto bad_hash;
for_each_btree_key_norestart(trans, iter, desc->btree_id,
SPOS(hash_k.k->p.inode, hash, hash_k.k->p.snapshot),
BTREE_ITER_slots|
BTREE_ITER_with_updates, k, ret) {
if (bkey_eq(k.k->p, hash_k.k->p))
break;
if (k.k->type == desc->key_type &&
!desc->cmp_bkey(k, hash_k)) {
ret = check_inode_hash_info_matches_root(trans, hash_k.k->p.inode,
hash_info) ?:
bch2_str_hash_repair_key(trans, s, desc, hash_info,
k_iter, hash_k,
&iter, k, updated_before_k_pos);
break;
}
if (bkey_deleted(k.k))
goto bad_hash;
}
bch2_trans_iter_exit(trans, &iter);
out:
fsck_err:
printbuf_exit(&buf);
return ret;
bad_hash:
bch2_trans_iter_exit(trans, &iter);
/*
* Before doing any repair, check hash_info itself:
*/
ret = check_inode_hash_info_matches_root(trans, hash_k.k->p.inode, hash_info);
if (ret)
goto out;
if (fsck_err(trans, hash_table_key_wrong_offset,
"hash table key at wrong offset: should be at %llu\n%s",
hash,
(bch2_bkey_val_to_text(&buf, c, hash_k), buf.buf)))
ret = bch2_str_hash_repair_key(trans, s, desc, hash_info,
k_iter, hash_k,
&iter, bkey_s_c_null,
updated_before_k_pos);
goto out;
}

View File

@ -261,6 +261,7 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans,
struct bkey_i *insert,
enum btree_iter_update_trigger_flags flags)
{
struct bch_fs *c = trans->c;
struct btree_iter slot = {};
struct bkey_s_c k;
bool found = false;
@ -288,7 +289,7 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans,
}
if (!ret)
ret = -BCH_ERR_ENOSPC_str_hash_create;
ret = bch_err_throw(c, ENOSPC_str_hash_create);
out:
bch2_trans_iter_exit(trans, &slot);
bch2_trans_iter_exit(trans, iter);
@ -300,7 +301,7 @@ not_found:
bch2_trans_iter_exit(trans, &slot);
return k;
} else if (!found && (flags & STR_HASH_must_replace)) {
ret = -BCH_ERR_ENOENT_str_hash_set_must_replace;
ret = bch_err_throw(c, ENOENT_str_hash_set_must_replace);
} else {
if (!found && slot.path)
swap(*iter, slot);
@ -328,7 +329,7 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
return ret;
if (k.k) {
bch2_trans_iter_exit(trans, &iter);
return -BCH_ERR_EEXIST_str_hash_set;
return bch_err_throw(trans->c, EEXIST_str_hash_set);
}
return 0;
@ -397,17 +398,27 @@ int bch2_hash_delete(struct btree_trans *trans,
int bch2_repair_inode_hash_info(struct btree_trans *, struct bch_inode_unpacked *);
struct snapshots_seen;
int bch2_str_hash_repair_key(struct btree_trans *,
struct snapshots_seen *,
const struct bch_hash_desc *,
struct bch_hash_info *,
struct btree_iter *, struct bkey_s_c,
struct btree_iter *, struct bkey_s_c,
bool *);
int __bch2_str_hash_check_key(struct btree_trans *,
struct snapshots_seen *,
const struct bch_hash_desc *,
struct bch_hash_info *,
struct btree_iter *, struct bkey_s_c);
struct btree_iter *, struct bkey_s_c,
bool *);
static inline int bch2_str_hash_check_key(struct btree_trans *trans,
struct snapshots_seen *s,
const struct bch_hash_desc *desc,
struct bch_hash_info *hash_info,
struct btree_iter *k_iter, struct bkey_s_c hash_k)
struct btree_iter *k_iter, struct bkey_s_c hash_k,
bool *updated_before_k_pos)
{
if (hash_k.k->type != desc->key_type)
return 0;
@ -415,7 +426,8 @@ static inline int bch2_str_hash_check_key(struct btree_trans *trans,
if (likely(desc->hash_bkey(hash_info, hash_k) == hash_k.k->p.offset))
return 0;
return __bch2_str_hash_check_key(trans, s, desc, hash_info, k_iter, hash_k);
return __bch2_str_hash_check_key(trans, s, desc, hash_info, k_iter, hash_k,
updated_before_k_pos);
}
#endif /* _BCACHEFS_STR_HASH_H */

View File

@ -255,6 +255,13 @@ void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
prt_printf(out, " creation_parent %u", le32_to_cpu(s.v->creation_parent));
prt_printf(out, " fs_parent %u", le32_to_cpu(s.v->fs_path_parent));
}
if (BCH_SUBVOLUME_RO(s.v))
prt_printf(out, " ro");
if (BCH_SUBVOLUME_SNAP(s.v))
prt_printf(out, " snapshot");
if (BCH_SUBVOLUME_UNLINKED(s.v))
prt_printf(out, " unlinked");
}
static int subvolume_children_mod(struct btree_trans *trans, struct bpos pos, bool set)
@ -482,9 +489,12 @@ err:
static int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid)
{
return bch2_subvolumes_reparent(trans, subvolid) ?:
int ret = bch2_subvolumes_reparent(trans, subvolid) ?:
commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
__bch2_subvolume_delete(trans, subvolid));
bch2_recovery_pass_set_no_ratelimit(trans->c, BCH_RECOVERY_PASS_check_subvols);
return ret;
}
static void bch2_subvolume_wait_for_pagecache_and_delete(struct work_struct *work)
@ -593,7 +603,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
ret = bch2_bkey_get_empty_slot(trans, &dst_iter,
BTREE_ID_subvolumes, POS(0, U32_MAX));
if (ret == -BCH_ERR_ENOSPC_btree_slot)
ret = -BCH_ERR_ENOSPC_subvolume_create;
ret = bch_err_throw(c, ENOSPC_subvolume_create);
if (ret)
return ret;
@ -699,8 +709,9 @@ static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
return ret;
if (!bkey_is_inode(k.k)) {
bch_err(trans->c, "root inode not found");
ret = -BCH_ERR_ENOENT_inode;
struct bch_fs *c = trans->c;
bch_err(c, "root inode not found");
ret = bch_err_throw(c, ENOENT_inode);
goto err;
}

View File

@ -1112,7 +1112,7 @@ int bch2_write_super(struct bch_fs *c)
prt_str(&buf, ")");
bch2_fs_fatal_error(c, ": %s", buf.buf);
printbuf_exit(&buf);
ret = -BCH_ERR_sb_not_downgraded;
ret = bch_err_throw(c, sb_not_downgraded);
goto out;
}
@ -1142,7 +1142,7 @@ int bch2_write_super(struct bch_fs *c)
if (c->opts.errors != BCH_ON_ERROR_continue &&
c->opts.errors != BCH_ON_ERROR_fix_safe) {
ret = -BCH_ERR_erofs_sb_err;
ret = bch_err_throw(c, erofs_sb_err);
bch2_fs_fatal_error(c, "%s", buf.buf);
} else {
bch_err(c, "%s", buf.buf);
@ -1161,7 +1161,7 @@ int bch2_write_super(struct bch_fs *c)
ca->disk_sb.seq);
bch2_fs_fatal_error(c, "%s", buf.buf);
printbuf_exit(&buf);
ret = -BCH_ERR_erofs_sb_err;
ret = bch_err_throw(c, erofs_sb_err);
}
}
@ -1215,7 +1215,7 @@ int bch2_write_super(struct bch_fs *c)
!can_mount_with_written), c,
": Unable to write superblock to sufficient devices (from %ps)",
(void *) _RET_IP_))
ret = -BCH_ERR_erofs_sb_err;
ret = bch_err_throw(c, erofs_sb_err);
out:
/* Make new options visible after they're persistent: */
bch2_sb_update(c);

View File

@ -474,16 +474,16 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
BUG_ON(!test_bit(BCH_FS_may_go_rw, &c->flags));
if (WARN_ON(c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)))
return -BCH_ERR_erofs_no_alloc_info;
return bch_err_throw(c, erofs_no_alloc_info);
if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
bch_err(c, "cannot go rw, unfixed btree errors");
return -BCH_ERR_erofs_unfixed_errors;
return bch_err_throw(c, erofs_unfixed_errors);
}
if (c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) {
bch_err(c, "cannot go rw, filesystem is an unresized image file");
return -BCH_ERR_erofs_filesystem_full;
return bch_err_throw(c, erofs_filesystem_full);
}
if (test_bit(BCH_FS_rw, &c->flags))
@ -564,13 +564,13 @@ int bch2_fs_read_write(struct bch_fs *c)
{
if (c->opts.recovery_pass_last &&
c->opts.recovery_pass_last < BCH_RECOVERY_PASS_journal_replay)
return -BCH_ERR_erofs_norecovery;
return bch_err_throw(c, erofs_norecovery);
if (c->opts.nochanges)
return -BCH_ERR_erofs_nochanges;
return bch_err_throw(c, erofs_nochanges);
if (c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info))
return -BCH_ERR_erofs_no_alloc_info;
return bch_err_throw(c, erofs_no_alloc_info);
return __bch2_fs_read_write(c, false);
}
@ -755,7 +755,7 @@ static int bch2_fs_online(struct bch_fs *c)
if (c->sb.multi_device &&
__bch2_uuid_to_fs(c->sb.uuid)) {
bch_err(c, "filesystem UUID already open");
return -BCH_ERR_filesystem_uuid_already_open;
return bch_err_throw(c, filesystem_uuid_already_open);
}
ret = bch2_fs_chardev_init(c);
@ -814,7 +814,7 @@ static int bch2_fs_init_rw(struct bch_fs *c)
WQ_HIGHPRI|WQ_FREEZABLE|WQ_MEM_RECLAIM, 1)) ||
!(c->write_ref_wq = alloc_workqueue("bcachefs_write_ref",
WQ_FREEZABLE, 0)))
return -BCH_ERR_ENOMEM_fs_other_alloc;
return bch_err_throw(c, ENOMEM_fs_other_alloc);
int ret = bch2_fs_btree_interior_update_init(c) ?:
bch2_fs_btree_write_buffer_init(c) ?:
@ -995,7 +995,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1,
c->opts.btree_node_size) ||
mempool_init_kmalloc_pool(&c->large_bkey_pool, 1, 2048)) {
ret = -BCH_ERR_ENOMEM_fs_other_alloc;
ret = bch_err_throw(c, ENOMEM_fs_other_alloc);
goto err;
}
@ -1155,7 +1155,7 @@ int bch2_fs_start(struct bch_fs *c)
unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
if (!bch2_fs_may_start(c))
return -BCH_ERR_insufficient_devices_to_start;
return bch_err_throw(c, insufficient_devices_to_start);
down_write(&c->state_lock);
mutex_lock(&c->sb_lock);
@ -1166,7 +1166,7 @@ int bch2_fs_start(struct bch_fs *c)
sizeof(struct bch_sb_field_ext) / sizeof(u64))) {
mutex_unlock(&c->sb_lock);
up_write(&c->state_lock);
ret = -BCH_ERR_ENOSPC_sb;
ret = bch_err_throw(c, ENOSPC_sb);
goto err;
}
@ -1208,7 +1208,7 @@ int bch2_fs_start(struct bch_fs *c)
goto err;
if (bch2_fs_init_fault("fs_start")) {
ret = -BCH_ERR_injected_fs_start;
ret = bch_err_throw(c, injected_fs_start);
goto err;
}
@ -1235,11 +1235,11 @@ static int bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
struct bch_member m = bch2_sb_member_get(sb, sb->dev_idx);
if (le16_to_cpu(sb->block_size) != block_sectors(c))
return -BCH_ERR_mismatched_block_size;
return bch_err_throw(c, mismatched_block_size);
if (le16_to_cpu(m.bucket_size) <
BCH_SB_BTREE_NODE_SIZE(c->disk_sb.sb))
return -BCH_ERR_bucket_size_too_small;
return bch_err_throw(c, bucket_size_too_small);
return 0;
}
@ -1550,7 +1550,7 @@ static int bch2_dev_alloc(struct bch_fs *c, unsigned dev_idx)
bch2_dev_attach(c, ca, dev_idx);
return 0;
err:
return -BCH_ERR_ENOMEM_dev_alloc;
return bch_err_throw(c, ENOMEM_dev_alloc);
}
static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
@ -1560,13 +1560,13 @@ static int __bch2_dev_attach_bdev(struct bch_dev *ca, struct bch_sb_handle *sb)
if (bch2_dev_is_online(ca)) {
bch_err(ca, "already have device online in slot %u",
sb->sb->dev_idx);
return -BCH_ERR_device_already_online;
return bch_err_throw(ca->fs, device_already_online);
}
if (get_capacity(sb->bdev->bd_disk) <
ca->mi.bucket_size * ca->mi.nbuckets) {
bch_err(ca, "cannot online: device too small");
return -BCH_ERR_device_size_too_small;
return bch_err_throw(ca->fs, device_size_too_small);
}
BUG_ON(!enumerated_ref_is_zero(&ca->io_ref[READ]));
@ -1718,7 +1718,7 @@ int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
return 0;
if (!bch2_dev_state_allowed(c, ca, new_state, flags))
return -BCH_ERR_device_state_not_allowed;
return bch_err_throw(c, device_state_not_allowed);
if (new_state != BCH_MEMBER_STATE_rw)
__bch2_dev_read_only(c, ca);
@ -1771,7 +1771,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot remove without losing data");
ret = -BCH_ERR_device_state_not_allowed;
ret = bch_err_throw(c, device_state_not_allowed);
goto err;
}
@ -1907,7 +1907,7 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
if (list_empty(&c->list)) {
mutex_lock(&bch_fs_list_lock);
if (__bch2_uuid_to_fs(c->sb.uuid))
ret = -BCH_ERR_filesystem_uuid_already_open;
ret = bch_err_throw(c, filesystem_uuid_already_open);
else
list_add(&c->list, &bch_fs_list);
mutex_unlock(&bch_fs_list_lock);
@ -2094,7 +2094,7 @@ int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot offline required disk");
up_write(&c->state_lock);
return -BCH_ERR_device_state_not_allowed;
return bch_err_throw(c, device_state_not_allowed);
}
__bch2_dev_offline(c, ca);
@ -2133,7 +2133,7 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
if (nbuckets > BCH_MEMBER_NBUCKETS_MAX) {
bch_err(ca, "New device size too big (%llu greater than max %u)",
nbuckets, BCH_MEMBER_NBUCKETS_MAX);
ret = -BCH_ERR_device_size_too_big;
ret = bch_err_throw(c, device_size_too_big);
goto err;
}
@ -2141,7 +2141,7 @@ int bch2_dev_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
get_capacity(ca->disk_sb.bdev->bd_disk) <
ca->mi.bucket_size * nbuckets) {
bch_err(ca, "New size larger than device");
ret = -BCH_ERR_device_size_too_small;
ret = bch_err_throw(c, device_size_too_small);
goto err;
}
@ -2376,7 +2376,7 @@ struct bch_fs *bch2_fs_open(darray_const_str *devices,
}
if (opts->nochanges && !opts->read_only) {
ret = -BCH_ERR_erofs_nochanges;
ret = bch_err_throw(c, erofs_nochanges);
goto err_print;
}

View File

@ -199,6 +199,50 @@ DECLARE_EVENT_CLASS(bio,
(unsigned long long)__entry->sector, __entry->nr_sector)
);
/* errors */
TRACE_EVENT(error_throw,
TP_PROTO(struct bch_fs *c, int bch_err, unsigned long ip),
TP_ARGS(c, bch_err, ip),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(int, err )
__array(char, err_str, 32 )
__array(char, ip, 32 )
),
TP_fast_assign(
__entry->dev = c->dev;
__entry->err = bch_err;
strscpy(__entry->err_str, bch2_err_str(bch_err), sizeof(__entry->err_str));
snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
),
TP_printk("%d,%d %s ret %s", MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->ip, __entry->err_str)
);
TRACE_EVENT(error_downcast,
TP_PROTO(int bch_err, int std_err, unsigned long ip),
TP_ARGS(bch_err, std_err, ip),
TP_STRUCT__entry(
__array(char, bch_err, 32 )
__array(char, std_err, 32 )
__array(char, ip, 32 )
),
TP_fast_assign(
strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
),
TP_printk("%s ret %s -> %s %s", __entry->ip,
__entry->bch_err, __entry->std_err, __entry->ip)
);
/* disk_accounting.c */
TRACE_EVENT(accounting_mem_insert,
@ -1446,25 +1490,6 @@ DEFINE_EVENT(fs_str, io_move_evacuate_bucket,
TP_ARGS(c, str)
);
TRACE_EVENT(error_downcast,
TP_PROTO(int bch_err, int std_err, unsigned long ip),
TP_ARGS(bch_err, std_err, ip),
TP_STRUCT__entry(
__array(char, bch_err, 32 )
__array(char, std_err, 32 )
__array(char, ip, 32 )
),
TP_fast_assign(
strscpy(__entry->bch_err, bch2_err_str(bch_err), sizeof(__entry->bch_err));
strscpy(__entry->std_err, bch2_err_str(std_err), sizeof(__entry->std_err));
snprintf(__entry->ip, sizeof(__entry->ip), "%ps", (void *) ip);
),
TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
);
#ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
TRACE_EVENT(update_by_path,