Update bcachefs sources to 20342b5217ca bcachefs: Plumb trans_kmalloc ip to trans_log_msg
Some checks failed
Nix Flake actions / nix-matrix (push) Has been cancelled
build / bcachefs-tools-deb (ubuntu-22.04) (push) Has been cancelled
build / bcachefs-tools-deb (ubuntu-24.04) (push) Has been cancelled
build / bcachefs-tools-rpm (push) Has been cancelled
build / bcachefs-tools-msrv (push) Has been cancelled
Nix Flake actions / ${{ matrix.name }} (${{ matrix.system }}) (push) Has been cancelled

This commit is contained in:
Kent Overstreet 2025-06-19 13:12:31 -04:00
parent 35c7f2b4e3
commit 10ed83353b
24 changed files with 138 additions and 103 deletions

View File

@ -1 +1 @@
4af8a1ac90dcd9028d5a53b4487aa0d3a47f3de4
20342b5217ca8d5148ac9637ae6820030e0bcbf6

View File

@ -1406,6 +1406,9 @@ int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_ite
: BCH_DATA_free;
struct printbuf buf = PRINTBUF;
unsigned fsck_flags = (async_repair ? FSCK_ERR_NO_LOG : 0)|
FSCK_CAN_FIX|FSCK_CAN_IGNORE;
struct bpos bucket = iter->pos;
bucket.offset &= ~(~0ULL << 56);
u64 genbits = iter->pos.offset & (~0ULL << 56);
@ -1419,9 +1422,10 @@ int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_ite
return ret;
if (!bch2_dev_bucket_exists(c, bucket)) {
if (fsck_err(trans, need_discard_freespace_key_to_invalid_dev_bucket,
"entry in %s btree for nonexistant dev:bucket %llu:%llu",
bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset))
if (__fsck_err(trans, fsck_flags,
need_discard_freespace_key_to_invalid_dev_bucket,
"entry in %s btree for nonexistant dev:bucket %llu:%llu",
bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset))
goto delete;
ret = 1;
goto out;
@ -1433,7 +1437,8 @@ int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_ite
if (a->data_type != state ||
(state == BCH_DATA_free &&
genbits != alloc_freespace_genbits(*a))) {
if (fsck_err(trans, need_discard_freespace_key_bad,
if (__fsck_err(trans, fsck_flags,
need_discard_freespace_key_bad,
"%s\nincorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
bch2_btree_id_str(iter->btree_id),

View File

@ -863,7 +863,7 @@ struct bch_fs {
DARRAY(enum bcachefs_metadata_version)
incompat_versions_requested;
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
struct unicode_map *cf_encoding;
#endif

View File

@ -508,6 +508,7 @@ again:
* to handle a transaction restart - this code needs to be rewritten
* when we start doing online topology repair
*/
bch2_trans_unlock_long(trans);
if (mustfix_fsck_err_on(!have_child,
c, btree_node_topology_interior_node_empty,
"empty interior btree node at %s", buf.buf))

View File

@ -3194,6 +3194,10 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long
if (WARN_ON_ONCE(new_bytes > BTREE_TRANS_MEM_MAX)) {
#ifdef CONFIG_BCACHEFS_TRANS_KMALLOC_TRACE
struct printbuf buf = PRINTBUF;
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "bump allocator exceeded BTREE_TRANS_MEM_MAX (%u)\n",
BTREE_TRANS_MEM_MAX);
bch2_trans_kmalloc_trace_to_text(&buf, &trans->trans_kmalloc_trace);
bch2_print_str(c, KERN_ERR, buf.buf);
printbuf_exit(&buf);
@ -3319,7 +3323,7 @@ u32 bch2_trans_begin(struct btree_trans *trans)
trans->restart_count++;
trans->mem_top = 0;
if (trans->restarted == BCH_ERR_transaction_restart_mem_realloced) {
if (unlikely(trans->restarted == BCH_ERR_transaction_restart_mem_realloced)) {
EBUG_ON(!trans->mem || !trans->mem_bytes);
unsigned new_bytes = trans->realloc_bytes_required;
void *new_mem = krealloc(trans->mem, new_bytes, GFP_NOWAIT|__GFP_NOWARN);

View File

@ -546,23 +546,29 @@ int bch2_btree_insert_clone_trans(struct btree_trans *trans,
void *__bch2_trans_subbuf_alloc(struct btree_trans *trans,
struct btree_trans_subbuf *buf,
unsigned u64s)
unsigned u64s, ulong ip)
{
unsigned new_top = buf->u64s + u64s;
unsigned old_size = buf->size;
unsigned new_size = buf->size;
if (new_top > buf->size)
buf->size = roundup_pow_of_two(new_top);
BUG_ON(roundup_pow_of_two(new_top) > U16_MAX);
void *n = bch2_trans_kmalloc_nomemzero(trans, buf->size * sizeof(u64));
if (new_top > new_size)
new_size = roundup_pow_of_two(new_top);
void *n = bch2_trans_kmalloc_nomemzero_ip(trans, new_size * sizeof(u64), ip);
if (IS_ERR(n))
return n;
unsigned offset = (u64 *) n - (u64 *) trans->mem;
BUG_ON(offset > U16_MAX);
if (buf->u64s)
memcpy(n,
btree_trans_subbuf_base(trans, buf),
old_size * sizeof(u64));
buf->size * sizeof(u64));
buf->base = (u64 *) n - (u64 *) trans->mem;
buf->size = new_size;
void *p = btree_trans_subbuf_top(trans, buf);
buf->u64s = new_top;
@ -807,11 +813,11 @@ int bch2_btree_bit_mod_buffered(struct btree_trans *trans, enum btree_id btree,
return bch2_trans_update_buffered(trans, btree, &k);
}
static int __bch2_trans_log_str(struct btree_trans *trans, const char *str, unsigned len)
static int __bch2_trans_log_str(struct btree_trans *trans, const char *str, unsigned len, ulong ip)
{
unsigned u64s = DIV_ROUND_UP(len, sizeof(u64));
struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(u64s));
struct jset_entry *e = bch2_trans_jset_entry_alloc_ip(trans, jset_u64s(u64s), ip);
int ret = PTR_ERR_OR_ZERO(e);
if (ret)
return ret;
@ -824,7 +830,7 @@ static int __bch2_trans_log_str(struct btree_trans *trans, const char *str, unsi
int bch2_trans_log_str(struct btree_trans *trans, const char *str)
{
return __bch2_trans_log_str(trans, str, strlen(str));
return __bch2_trans_log_str(trans, str, strlen(str), _RET_IP_);
}
int bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf)
@ -833,13 +839,14 @@ int bch2_trans_log_msg(struct btree_trans *trans, struct printbuf *buf)
if (ret)
return ret;
return __bch2_trans_log_str(trans, buf->buf, buf->pos);
return __bch2_trans_log_str(trans, buf->buf, buf->pos, _RET_IP_);
}
int bch2_trans_log_bkey(struct btree_trans *trans, enum btree_id btree,
unsigned level, struct bkey_i *k)
{
struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(k->k.u64s));
struct jset_entry *e = bch2_trans_jset_entry_alloc_ip(trans,
jset_u64s(k->k.u64s), _RET_IP_);
int ret = PTR_ERR_OR_ZERO(e);
if (ret)
return ret;

View File

@ -137,19 +137,27 @@ static inline void *btree_trans_subbuf_top(struct btree_trans *trans,
void *__bch2_trans_subbuf_alloc(struct btree_trans *,
struct btree_trans_subbuf *,
unsigned);
unsigned, ulong);
static inline void *
bch2_trans_subbuf_alloc_ip(struct btree_trans *trans,
struct btree_trans_subbuf *buf,
unsigned u64s, ulong ip)
{
if (buf->u64s + u64s > buf->size)
return __bch2_trans_subbuf_alloc(trans, buf, u64s, ip);
void *p = btree_trans_subbuf_top(trans, buf);
buf->u64s += u64s;
return p;
}
static inline void *
bch2_trans_subbuf_alloc(struct btree_trans *trans,
struct btree_trans_subbuf *buf,
unsigned u64s)
{
if (buf->u64s + u64s > buf->size)
return __bch2_trans_subbuf_alloc(trans, buf, u64s);
void *p = btree_trans_subbuf_top(trans, buf);
buf->u64s += u64s;
return p;
return bch2_trans_subbuf_alloc_ip(trans, buf, u64s, _THIS_IP_);
}
static inline struct jset_entry *btree_trans_journal_entries_start(struct btree_trans *trans)
@ -162,10 +170,16 @@ static inline struct jset_entry *btree_trans_journal_entries_top(struct btree_tr
return btree_trans_subbuf_top(trans, &trans->journal_entries);
}
static inline struct jset_entry *
bch2_trans_jset_entry_alloc_ip(struct btree_trans *trans, unsigned u64s, ulong ip)
{
return bch2_trans_subbuf_alloc_ip(trans, &trans->journal_entries, u64s, ip);
}
static inline struct jset_entry *
bch2_trans_jset_entry_alloc(struct btree_trans *trans, unsigned u64s)
{
return bch2_trans_subbuf_alloc(trans, &trans->journal_entries, u64s);
return bch2_trans_jset_entry_alloc_ip(trans, u64s, _THIS_IP_);
}
int bch2_btree_insert_clone_trans(struct btree_trans *, enum btree_id, struct bkey_i *);

View File

@ -290,8 +290,6 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct bch_fs *c = trans->c;
struct write_point *wp;
struct btree *b;
BKEY_PADDED_ONSTACK(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
struct open_buckets obs = { .nr = 0 };
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
enum bch_watermark watermark = flags & BCH_WATERMARK_MASK;
unsigned nr_reserve = watermark < BCH_WATERMARK_reclaim
@ -310,8 +308,8 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
struct btree_alloc *a =
&c->btree_reserve_cache[--c->btree_reserve_cache_nr];
obs = a->ob;
bkey_copy(&tmp.k, &a->k);
bkey_copy(&b->key, &a->k);
b->ob = a->ob;
mutex_unlock(&c->btree_reserve_cache_lock);
goto out;
}
@ -345,14 +343,12 @@ retry:
goto retry;
}
bkey_btree_ptr_v2_init(&tmp.k);
bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c), false);
bkey_btree_ptr_v2_init(&b->key);
bch2_alloc_sectors_append_ptrs(c, wp, &b->key, btree_sectors(c), false);
bch2_open_bucket_get(c, wp, &obs);
bch2_open_bucket_get(c, wp, &b->ob);
bch2_alloc_sectors_done(c, wp);
out:
bkey_copy(&b->key, &tmp.k);
b->ob = obs;
six_unlock_write(&b->c.lock);
six_unlock_intent(&b->c.lock);
@ -513,30 +509,25 @@ static int bch2_btree_reserve_get(struct btree_trans *trans,
unsigned flags,
struct closure *cl)
{
struct btree *b;
unsigned interior;
int ret = 0;
BUG_ON(nr_nodes[0] + nr_nodes[1] > BTREE_RESERVE_MAX);
/*
* Protects reaping from the btree node cache and using the btree node
* open bucket reserve:
*/
ret = bch2_btree_cache_cannibalize_lock(trans, cl);
int ret = bch2_btree_cache_cannibalize_lock(trans, cl);
if (ret)
return ret;
for (interior = 0; interior < 2; interior++) {
for (unsigned interior = 0; interior < 2; interior++) {
struct prealloc_nodes *p = as->prealloc_nodes + interior;
while (p->nr < nr_nodes[interior]) {
b = __bch2_btree_node_alloc(trans, &as->disk_res, cl,
interior, target, flags);
if (IS_ERR(b)) {
ret = PTR_ERR(b);
struct btree *b = __bch2_btree_node_alloc(trans, &as->disk_res,
cl, interior, target, flags);
ret = PTR_ERR_OR_ZERO(b);
if (ret)
goto err;
}
p->b[p->nr++] = b;
}

View File

@ -249,6 +249,7 @@ static int data_update_invalid_bkey(struct data_update *m,
bch2_bkey_val_to_text(&buf, c, k);
prt_str(&buf, "\nnew: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
prt_newline(&buf);
bch2_fs_emergency_read_only2(c, &buf);

View File

@ -18,7 +18,7 @@ int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info,
{
*out_cf = (struct qstr) QSTR_INIT(NULL, 0);
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
unsigned char *buf = bch2_trans_kmalloc(trans, BCH_NAME_MAX + 1);
int ret = PTR_ERR_OR_ZERO(buf);
if (ret)
@ -31,7 +31,7 @@ int bch2_casefold(struct btree_trans *trans, const struct bch_hash_info *info,
*out_cf = (struct qstr) QSTR_INIT(buf, ret);
return 0;
#else
return -EOPNOTSUPP;
return bch_err_throw(trans->c, no_casefolding_without_utf8);
#endif
}
@ -231,7 +231,8 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
prt_printf(out, " type %s", bch2_d_type_str(d.v->d_type));
}
int bch2_dirent_init_name(struct bkey_i_dirent *dirent,
int bch2_dirent_init_name(struct bch_fs *c,
struct bkey_i_dirent *dirent,
const struct bch_hash_info *hash_info,
const struct qstr *name,
const struct qstr *cf_name)
@ -251,7 +252,7 @@ int bch2_dirent_init_name(struct bkey_i_dirent *dirent,
offsetof(struct bch_dirent, d_name) -
name->len);
} else {
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
memcpy(&dirent->v.d_cf_name_block.d_names[0], name->name, name->len);
char *cf_out = &dirent->v.d_cf_name_block.d_names[name->len];
@ -278,7 +279,7 @@ int bch2_dirent_init_name(struct bkey_i_dirent *dirent,
EBUG_ON(bch2_dirent_get_casefold_name(dirent_i_to_s_c(dirent)).len != cf_len);
#else
return -EOPNOTSUPP;
return bch_err_throw(c, no_casefolding_without_utf8);
#endif
}
@ -313,7 +314,7 @@ struct bkey_i_dirent *bch2_dirent_create_key(struct btree_trans *trans,
dirent->v.d_type = type;
dirent->v.d_unused = 0;
int ret = bch2_dirent_init_name(dirent, hash_info, name, cf_name);
int ret = bch2_dirent_init_name(trans->c, dirent, hash_info, name, cf_name);
if (ret)
return ERR_PTR(ret);

View File

@ -59,7 +59,8 @@ static inline void dirent_copy_target(struct bkey_i_dirent *dst,
dst->v.d_type = src.v->d_type;
}
int bch2_dirent_init_name(struct bkey_i_dirent *,
int bch2_dirent_init_name(struct bch_fs *,
struct bkey_i_dirent *,
const struct bch_hash_info *,
const struct qstr *,
const struct qstr *);

View File

@ -5,6 +5,7 @@
#define BCH_ERRCODES() \
x(ERANGE, ERANGE_option_too_small) \
x(ERANGE, ERANGE_option_too_big) \
x(ERANGE, projid_too_big) \
x(EINVAL, injected) \
x(BCH_ERR_injected, injected_fs_start) \
x(EINVAL, mount_option) \
@ -216,6 +217,11 @@
x(EINVAL, erasure_coding_found_btree_node) \
x(EINVAL, option_negative) \
x(EOPNOTSUPP, may_not_use_incompat_feature) \
x(EOPNOTSUPP, no_casefolding_without_utf8) \
x(EOPNOTSUPP, casefold_opt_is_dir_only) \
x(EOPNOTSUPP, unsupported_fsx_flag) \
x(EOPNOTSUPP, unsupported_fa_flag) \
x(EOPNOTSUPP, unsupported_fallocate_mode) \
x(EROFS, erofs_trans_commit) \
x(EROFS, erofs_no_writes) \
x(EROFS, erofs_journal_err) \

View File

@ -621,7 +621,9 @@ print:
if (s)
s->ret = ret;
if (trans)
if (trans &&
!(flags & FSCK_ERR_NO_LOG) &&
ret == -BCH_ERR_fsck_fix)
ret = bch2_trans_log_str(trans, bch2_sb_error_strs[err]) ?: ret;
err_unlock:
mutex_unlock(&c->fsck_error_msgs_lock);

View File

@ -841,7 +841,7 @@ long bch2_fallocate_dispatch(struct file *file, int mode,
else if (mode == FALLOC_FL_COLLAPSE_RANGE)
ret = bchfs_fcollapse_finsert(inode, offset, len, false);
else
ret = -EOPNOTSUPP;
ret = bch_err_throw(c, unsupported_fallocate_mode);
err:
bch2_pagecache_block_put(inode);
inode_unlock(&inode->v);

View File

@ -722,7 +722,7 @@ static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry,
if (IS_ERR(inode))
inode = NULL;
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
if (!inode && IS_CASEFOLDED(vdir)) {
/*
* Do not cache a negative dentry in casefolded directories
@ -1695,10 +1695,10 @@ static int bch2_fileattr_set(struct mnt_idmap *idmap,
s.mask = map_defined(bch_flags_to_xflags);
s.flags |= map_flags_rev(bch_flags_to_xflags, fa->fsx_xflags);
if (fa->fsx_xflags)
return -EOPNOTSUPP;
return bch_err_throw(c, unsupported_fsx_flag);
if (fa->fsx_projid >= U32_MAX)
return -EINVAL;
return bch_err_throw(c, projid_too_big);
/*
* inode fields accessible via the xattr interface are stored with a +1
@ -1721,7 +1721,7 @@ static int bch2_fileattr_set(struct mnt_idmap *idmap,
s.flags |= map_flags_rev(bch_flags_to_uflags, fa->flags);
if (fa->flags)
return -EOPNOTSUPP;
return bch_err_throw(c, unsupported_fa_flag);
}
mutex_lock(&inode->ei_update_lock);
@ -2564,7 +2564,7 @@ got_sb:
sb->s_shrink->seeks = 0;
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
sb->s_encoding = c->cf_encoding;
#endif
generic_set_sb_d_ops(sb);

View File

@ -728,14 +728,8 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *seen,
u32 id, u32 ancestor)
{
ssize_t i;
EBUG_ON(id > ancestor);
/* @ancestor should be the snapshot most recently added to @seen */
EBUG_ON(ancestor != seen->pos.snapshot);
EBUG_ON(ancestor != darray_last(seen->ids));
if (id == ancestor)
return true;
@ -751,11 +745,8 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see
* numerically, since snapshot ID lists are kept sorted, so if we find
* an id that's an ancestor of @id we're done:
*/
for (i = seen->ids.nr - 2;
i >= 0 && seen->ids.data[i] >= id;
--i)
if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i]))
darray_for_each_reverse(seen->ids, i)
if (*i != ancestor && bch2_snapshot_is_ancestor(c, id, *i))
return false;
return true;
@ -2311,7 +2302,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
*hash_info = bch2_hash_info_init(c, &i->inode);
dir->first_this_inode = false;
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
hash_info->cf_encoding = bch2_inode_casefold(c, &i->inode) ? c->cf_encoding : NULL;
#endif
@ -2601,14 +2592,6 @@ int bch2_check_root(struct bch_fs *c)
return ret;
}
static bool darray_u32_has(darray_u32 *d, u32 v)
{
darray_for_each(*d, i)
if (*i == v)
return true;
return false;
}
static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k)
{
struct bch_fs *c = trans->c;
@ -2641,7 +2624,7 @@ static int check_subvol_path(struct btree_trans *trans, struct btree_iter *iter,
u32 parent = le32_to_cpu(s.v->fs_path_parent);
if (darray_u32_has(&subvol_path, parent)) {
if (darray_find(subvol_path, parent)) {
printbuf_reset(&buf);
prt_printf(&buf, "subvolume loop: ");

View File

@ -1265,11 +1265,11 @@ int bch2_inode_set_casefold(struct btree_trans *trans, subvol_inum inum,
{
struct bch_fs *c = trans->c;
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
int ret = 0;
/* Not supported on individual files. */
if (!S_ISDIR(bi->bi_mode))
return -EOPNOTSUPP;
return bch_err_throw(c, casefold_opt_is_dir_only);
/*
* Make sure the dir is empty, as otherwise we'd need to
@ -1291,7 +1291,7 @@ int bch2_inode_set_casefold(struct btree_trans *trans, subvol_inum inum,
return bch2_maybe_propagate_has_case_insensitive(trans, inum, bi);
#else
bch_err(c, "Cannot use casefolding on a kernel without CONFIG_UNICODE");
return -EOPNOTSUPP;
return bch_err_throw(c, no_casefolding_without_utf8);
#endif
}

View File

@ -1716,9 +1716,10 @@ static CLOSURE_CALLBACK(journal_write_done)
bch2_log_msg_start(c, &buf);
if (err == -BCH_ERR_journal_write_err)
prt_printf(&buf, "unable to write journal to sufficient devices");
prt_printf(&buf, "unable to write journal to sufficient devices\n");
else
prt_printf(&buf, "journal write error marking replicas: %s", bch2_err_str(err));
prt_printf(&buf, "journal write error marking replicas: %s\n",
bch2_err_str(err));
bch2_fs_emergency_read_only2(c, &buf);

View File

@ -607,6 +607,7 @@ static int read_btree_roots(struct bch_fs *c)
buf.buf, bch2_err_str(ret))) {
if (btree_id_is_alloc(i))
r->error = 0;
ret = 0;
}
}
@ -1141,7 +1142,7 @@ fsck_err:
struct printbuf buf = PRINTBUF;
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "error in recovery: %s", bch2_err_str(ret));
prt_printf(&buf, "error in recovery: %s\n", bch2_err_str(ret));
bch2_fs_emergency_read_only2(c, &buf);
bch2_print_str(c, KERN_ERR, buf.buf);

View File

@ -3,9 +3,10 @@
#define _BCACHEFS_SB_ERRORS_FORMAT_H
enum bch_fsck_flags {
FSCK_CAN_FIX = 1 << 0,
FSCK_CAN_IGNORE = 1 << 1,
FSCK_AUTOFIX = 1 << 2,
FSCK_CAN_FIX = BIT(0),
FSCK_CAN_IGNORE = BIT(1),
FSCK_AUTOFIX = BIT(2),
FSCK_ERR_NO_LOG = BIT(3),
};
#define BCH_SB_ERRS() \
@ -278,7 +279,7 @@ enum bch_fsck_flags {
x(root_subvol_missing, 238, 0) \
x(root_dir_missing, 239, 0) \
x(root_inode_not_dir, 240, 0) \
x(dir_loop, 241, 0) \
x(dir_loop, 241, FSCK_AUTOFIX) \
x(hash_table_key_duplicate, 242, FSCK_AUTOFIX) \
x(hash_table_key_wrong_offset, 243, FSCK_AUTOFIX) \
x(unlinked_inode_not_on_deleted_list, 244, FSCK_AUTOFIX) \
@ -295,7 +296,7 @@ enum bch_fsck_flags {
x(subvol_root_fs_path_parent_nonzero, 255, 0) \
x(subvol_children_not_set, 256, 0) \
x(subvol_children_bad, 257, 0) \
x(subvol_loop, 258, 0) \
x(subvol_loop, 258, FSCK_AUTOFIX) \
x(subvol_unreachable, 259, FSCK_AUTOFIX) \
x(btree_node_bkey_bad_u64s, 260, 0) \
x(btree_node_topology_empty_interior_node, 261, 0) \

View File

@ -240,6 +240,10 @@ static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned
return ca;
}
DEFINE_CLASS(bch2_dev_tryget_noerror, struct bch_dev *,
bch2_dev_put(_T), bch2_dev_tryget_noerror(c, dev),
struct bch_fs *c, unsigned dev);
static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev)
{
struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev);
@ -248,6 +252,10 @@ static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev)
return ca;
}
DEFINE_CLASS(bch2_dev_tryget, struct bch_dev *,
bch2_dev_put(_T), bch2_dev_tryget(c, dev),
struct bch_fs *c, unsigned dev);
static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket)
{
struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode);
@ -258,6 +266,10 @@ static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, s
return ca;
}
DEFINE_CLASS(bch2_dev_bucket_tryget_noerror, struct bch_dev *,
bch2_dev_put(_T), bch2_dev_bucket_tryget_noerror(c, bucket),
struct bch_fs *c, struct bpos bucket);
void bch2_dev_bucket_missing(struct bch_dev *, u64);
static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket)
@ -271,6 +283,10 @@ static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bp
return ca;
}
DEFINE_CLASS(bch2_dev_bucket_tryget, struct bch_dev *,
bch2_dev_put(_T), bch2_dev_bucket_tryget(c, bucket),
struct bch_fs *c, struct bpos bucket);
static inline struct bch_dev *bch2_dev_iterate_noerror(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx)
{
if (ca && ca->dev_idx == dev_idx)

View File

@ -38,6 +38,7 @@ static int bch2_fsck_rename_dirent(struct btree_trans *trans,
struct bkey_s_c_dirent old,
bool *updated_before_k_pos)
{
struct bch_fs *c = trans->c;
struct qstr old_name = bch2_dirent_get_name(old);
struct bkey_i_dirent *new = bch2_trans_kmalloc(trans, BKEY_U64s_MAX * sizeof(u64));
int ret = PTR_ERR_OR_ZERO(new);
@ -60,7 +61,7 @@ static int bch2_fsck_rename_dirent(struct btree_trans *trans,
sprintf(renamed_buf, "%.*s.fsck_renamed-%u",
old_name.len, old_name.name, i));
ret = bch2_dirent_init_name(new, hash_info, &renamed_name, NULL);
ret = bch2_dirent_init_name(c, new, hash_info, &renamed_name, NULL);
if (ret)
return ret;
@ -79,7 +80,7 @@ static int bch2_fsck_rename_dirent(struct btree_trans *trans,
}
ret = ret ?: bch2_fsck_update_backpointers(trans, s, desc, hash_info, &new->k_i);
bch_err_fn(trans->c, ret);
bch_err_fn(c, ret);
return ret;
}

View File

@ -48,7 +48,7 @@ bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi)
struct bch_hash_info info = {
.inum_snapshot = bi->bi_snapshot,
.type = INODE_STR_HASH(bi),
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
.cf_encoding = bch2_inode_casefold(c, bi) ? c->cf_encoding : NULL,
#endif
.siphash_key = { .k0 = bi->bi_hash_seed }

View File

@ -585,7 +585,7 @@ static void __bch2_fs_free(struct bch_fs *c)
for (unsigned i = 0; i < BCH_TIME_STAT_NR; i++)
bch2_time_stats_exit(&c->times[i]);
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
utf8_unload(c->cf_encoding);
#endif
@ -1024,7 +1024,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
goto err;
}
#ifdef CONFIG_UNICODE
#if IS_ENABLED(CONFIG_UNICODE)
/* Default encoding until we can potentially have more as an option. */
c->cf_encoding = utf8_load(BCH_FS_DEFAULT_UTF8_ENCODING);
if (IS_ERR(c->cf_encoding)) {
@ -1160,12 +1160,11 @@ int bch2_fs_start(struct bch_fs *c)
print_mount_opts(c);
#ifdef CONFIG_UNICODE
bch_info(c, "Using encoding defined by superblock: utf8-%u.%u.%u",
unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING),
unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING),
unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
#endif
if (IS_ENABLED(CONFIG_UNICODE))
bch_info(c, "Using encoding defined by superblock: utf8-%u.%u.%u",
unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING),
unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING),
unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
if (!bch2_fs_may_start(c))
return bch_err_throw(c, insufficient_devices_to_start);