Update bcachefs sources to 055c5700a7df workqueue: Basic memory allocation profiling support
Some checks failed
build / bcachefs-tools-deb (ubuntu-24.04) (push) Has been cancelled
build / bcachefs-tools-rpm (push) Has been cancelled
build / bcachefs-tools-msrv (push) Has been cancelled
Nix Flake actions / nix-matrix (push) Has been cancelled
Nix Flake actions / ${{ matrix.name }} (${{ matrix.system }}) (push) Has been cancelled
update-flake-lock / lockfile (push) Has been cancelled

This commit is contained in:
Kent Overstreet 2025-07-28 19:56:35 -04:00
parent e0b9c3da02
commit 2d5b35dd34
18 changed files with 115 additions and 88 deletions

View File

@ -1 +1 @@
e54ff0aa96886b753343100125bd3dfab1a8e337
055c5700a7dfe6a7020d4bec812c98397d98c456

View File

@ -1554,6 +1554,9 @@ int bch2_check_alloc_info(struct bch_fs *c)
struct bkey_s_c k;
int ret = 0;
struct progress_indicator_state progress;
bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_alloc));
CLASS(btree_trans, trans)(c);
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_prefetch);
@ -1577,6 +1580,8 @@ int bch2_check_alloc_info(struct bch_fs *c)
if (!k.k)
break;
progress_update_iter(trans, &progress, &iter);
if (k.k->type) {
next = bpos_nosnap_successor(k.k->p);

View File

@ -511,7 +511,7 @@ restart:
if (btree_node_accessed(b)) {
clear_btree_node_accessed(b);
bc->not_freed[BCH_BTREE_CACHE_NOT_FREED_access_bit]++;
--touched;;
--touched;
} else if (!btree_node_reclaim(c, b)) {
__bch2_btree_node_hash_remove(bc, b);
__btree_node_data_free(b);

View File

@ -2011,7 +2011,7 @@ static void btree_node_scrub_work(struct work_struct *work)
bch_err_fn_ratelimited(c, ret);
}
bch2_bkey_buf_exit(&scrub->key, c);;
bch2_bkey_buf_exit(&scrub->key, c);
btree_bounce_free(c, c->opts.btree_node_size, scrub->used_mempool, scrub->buf);
enumerated_ref_put(&scrub->ca->io_ref[READ], BCH_DEV_READ_REF_btree_node_scrub);
kfree(scrub);

View File

@ -2744,7 +2744,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev_min(struct btree_trans *trans, struct
}
/* Extents can straddle iter->pos: */
iter->pos = bpos_min(iter->pos, k.k->p);;
iter->pos = bpos_min(iter->pos, k.k->p);
if (iter->flags & BTREE_ITER_filter_snapshots)
iter->pos.snapshot = iter->snapshot;

View File

@ -969,7 +969,7 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans,
BUG_ON(current != c->recovery_task);
struct bkey_i *accounting;
retry:
percpu_down_read(&c->mark_lock);
for (accounting = btree_trans_subbuf_base(trans, &trans->accounting);
accounting != btree_trans_subbuf_top(trans, &trans->accounting);
@ -1025,13 +1025,17 @@ fatal_err:
bch2_fs_fatal_error(c, "fatal error in transaction commit: %s", bch2_err_str(ret));
percpu_down_read(&c->mark_lock);
revert_fs_usage:
BUG();
/* error path not handled by __bch2_trans_commit() */
for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting);
i != accounting;
i = bkey_next(i))
bch2_accounting_trans_commit_revert(trans, bkey_i_to_accounting(i), flags);
percpu_up_read(&c->mark_lock);
if (bch2_err_matches(ret, BCH_ERR_btree_insert_need_mark_replicas)) {
ret = drop_locks_do(trans, bch2_accounting_update_sb(trans));
if (!ret)
goto retry;
}
return ret;
}

View File

@ -485,7 +485,7 @@ typedef DARRAY(struct trans_kmalloc_trace) darray_trans_kmalloc_trace;
struct btree_trans_subbuf {
u16 base;
u16 u64s;
u16 size;;
u16 size;
};
struct btree_trans {
@ -854,15 +854,15 @@ static inline bool btree_node_type_is_extents(enum btree_node_type type)
return type != BKEY_TYPE_btree && btree_id_is_extents(type - 1);
}
static const u64 btree_has_snapshots_mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_IS_snapshots)) << nr)
BCH_BTREE_IDS()
#undef x
;
static inline bool btree_type_has_snapshots(enum btree_id btree)
{
const u64 mask = 0
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_IS_snapshots)) << nr)
BCH_BTREE_IDS()
#undef x
;
return BIT_ULL(btree) & mask;
return BIT_ULL(btree) & btree_has_snapshots_mask;
}
static inline bool btree_type_has_snapshot_field(enum btree_id btree)

View File

@ -214,11 +214,13 @@ void bch2_dirent_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
struct qstr d_name = bch2_dirent_get_name(d);
prt_printf(out, "%.*s", d_name.len, d_name.name);
prt_bytes(out, d_name.name, d_name.len);
if (d.v->d_casefold) {
prt_str(out, " (casefold ");
struct qstr d_name = bch2_dirent_get_lookup_name(d);
prt_printf(out, " (casefold %.*s)", d_name.len, d_name.name);
prt_bytes(out, d_name.name, d_name.len);
prt_char(out, ')');
}
prt_str(out, " ->");

View File

@ -9,7 +9,7 @@ struct fast_list_pcpu;
struct fast_list {
GENRADIX(void *) items;
struct ida slots_allocated;;
struct ida slots_allocated;
struct fast_list_pcpu __percpu
*buffer;
};

View File

@ -635,6 +635,8 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
goto out;
}
inode->ei_last_dirtied = (unsigned long) current;
bch2_set_folio_dirty(c, inode, folio, &res, offset, len);
bch2_folio_reservation_put(c, inode, &res);

View File

@ -1972,11 +1972,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
"extent type past end of inode %llu:%u, i_size %llu\n%s",
i->inode.bi_inum, i->inode.bi_snapshot, i->inode.bi_size,
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_fpunch_snapshot(trans,
SPOS(i->inode.bi_inum,
last_block,
i->inode.bi_snapshot),
POS(i->inode.bi_inum, U64_MAX));
ret = snapshots_seen_add_inorder(c, s, i->inode.bi_snapshot) ?:
bch2_fpunch_snapshot(trans,
SPOS(i->inode.bi_inum,
last_block,
i->inode.bi_snapshot),
POS(i->inode.bi_inum, U64_MAX));
if (ret)
goto err;

View File

@ -1060,14 +1060,13 @@ static struct journal_buf *__bch2_next_write_buffer_flush_journal_buf(struct jou
if (open && !*blocked) {
__bch2_journal_block(j);
s.v = atomic64_read_acquire(&j->reservations.counter);
*blocked = true;
}
ret = journal_state_count(s, idx & JOURNAL_STATE_BUF_MASK) > open
? ERR_PTR(-EAGAIN)
: buf;
if (!IS_ERR(ret))
smp_mb();
break;
}
}
@ -1297,7 +1296,7 @@ int bch2_dev_journal_bucket_delete(struct bch_dev *ca, u64 b)
return -EINVAL;
}
u64 *new_buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);;
u64 *new_buckets = kcalloc(ja->nr, sizeof(u64), GFP_KERNEL);
if (!new_buckets)
return bch_err_throw(c, ENOMEM_set_nr_journal_buckets);

View File

@ -267,7 +267,7 @@ static inline union journal_res_state journal_state_buf_put(struct journal *j, u
{
union journal_res_state s;
s.v = atomic64_sub_return_release(((union journal_res_state) {
s.v = atomic64_sub_return(((union journal_res_state) {
.buf0_count = idx == 0,
.buf1_count = idx == 1,
.buf2_count = idx == 2,

View File

@ -428,15 +428,22 @@ static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs
bool first = true;
jset_entry_for_each_key(entry, k) {
/* We may be called on entries that haven't been validated: */
if (!k->k.u64s)
break;
if (!first) {
prt_newline(out);
bch2_prt_jset_entry_type(out, entry->type);
prt_str(out, ": ");
}
/* We may be called on entries that haven't been validated: */
if (!k->k.u64s) {
prt_str(out, "(invalid, k->u64s 0)");
break;
}
if (bkey_next(k) > vstruct_last(entry)) {
prt_str(out, "(invalid, bkey overruns jset_entry)");
break;
}
bch2_btree_id_level_to_text(out, entry->btree_id, entry->level);
prt_char(out, ' ');
bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(k));

View File

@ -26,6 +26,12 @@ static inline bool go_rw_in_recovery(struct bch_fs *c)
(c->opts.fsck && !(c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info))));
}
static inline bool recovery_pass_will_run(struct bch_fs *c, enum bch_recovery_pass pass)
{
return unlikely(test_bit(BCH_FS_in_recovery, &c->flags) &&
c->recovery.passes_to_run & BIT_ULL(pass));
}
int bch2_run_print_explicit_recovery_pass(struct bch_fs *, enum bch_recovery_pass);
int __bch2_run_explicit_recovery_pass(struct bch_fs *, struct printbuf *,

View File

@ -11,6 +11,7 @@
#include "errcode.h"
#include "error.h"
#include "fs.h"
#include "progress.h"
#include "recovery_passes.h"
#include "snapshot.h"
@ -142,7 +143,7 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
guard(rcu)();
struct snapshot_table *t = rcu_dereference(c->snapshots);
if (unlikely(c->recovery.pass_done < BCH_RECOVERY_PASS_check_snapshots))
if (unlikely(recovery_pass_will_run(c, BCH_RECOVERY_PASS_check_snapshots)))
return __bch2_snapshot_is_ancestor_early(t, id, ancestor);
if (likely(ancestor >= IS_ANCESTOR_BITMAP))
@ -364,31 +365,32 @@ int bch2_snapshot_lookup(struct btree_trans *trans, u32 id,
/* fsck: */
static u32 bch2_snapshot_child(struct bch_fs *c, u32 id, unsigned child)
static u32 bch2_snapshot_child(struct snapshot_table *t,
u32 id, unsigned child)
{
return snapshot_t(c, id)->children[child];
return __snapshot_t(t, id)->children[child];
}
static u32 bch2_snapshot_left_child(struct bch_fs *c, u32 id)
static u32 bch2_snapshot_left_child(struct snapshot_table *t, u32 id)
{
return bch2_snapshot_child(c, id, 0);
return bch2_snapshot_child(t, id, 0);
}
static u32 bch2_snapshot_right_child(struct bch_fs *c, u32 id)
static u32 bch2_snapshot_right_child(struct snapshot_table *t, u32 id)
{
return bch2_snapshot_child(c, id, 1);
return bch2_snapshot_child(t, id, 1);
}
static u32 bch2_snapshot_tree_next(struct bch_fs *c, u32 id)
static u32 bch2_snapshot_tree_next(struct snapshot_table *t, u32 id)
{
u32 n, parent;
n = bch2_snapshot_left_child(c, id);
n = bch2_snapshot_left_child(t, id);
if (n)
return n;
while ((parent = bch2_snapshot_parent(c, id))) {
n = bch2_snapshot_right_child(c, parent);
while ((parent = __bch2_snapshot_parent(t, id))) {
n = bch2_snapshot_right_child(t, parent);
if (n && n != id)
return n;
id = parent;
@ -401,17 +403,18 @@ u32 bch2_snapshot_oldest_subvol(struct bch_fs *c, u32 snapshot_root,
snapshot_id_list *skip)
{
guard(rcu)();
struct snapshot_table *t = rcu_dereference(c->snapshots);
u32 id, subvol = 0, s;
retry:
id = snapshot_root;
while (id && bch2_snapshot_exists(c, id)) {
while (id && __bch2_snapshot_exists(t, id)) {
if (!(skip && snapshot_list_has_id(skip, id))) {
s = snapshot_t(c, id)->subvol;
s = __snapshot_t(t, id)->subvol;
if (s && (!subvol || s < subvol))
subvol = s;
}
id = bch2_snapshot_tree_next(c, id);
id = bch2_snapshot_tree_next(t, id);
if (id == snapshot_root)
break;
}
@ -973,12 +976,16 @@ int bch2_reconstruct_snapshots(struct bch_fs *c)
struct snapshot_tree_reconstruct r = {};
int ret = 0;
struct progress_indicator_state progress;
bch2_progress_init(&progress, c, btree_has_snapshots_mask);
for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
if (btree_type_has_snapshots(btree)) {
r.btree = btree;
ret = for_each_btree_key(trans, iter, btree, POS_MIN,
BTREE_ITER_all_snapshots|BTREE_ITER_prefetch, k, ({
progress_update_iter(trans, &progress, &iter);
get_snapshot_trees(c, &r, k.k->p);
}));
if (ret)
@ -1424,38 +1431,22 @@ static inline u32 interior_delete_has_id(interior_delete_list *l, u32 id)
return i ? i->live_child : 0;
}
static unsigned __live_child(struct snapshot_table *t, u32 id,
snapshot_id_list *delete_leaves,
interior_delete_list *delete_interior)
{
struct snapshot_t *s = __snapshot_t(t, id);
if (!s)
return 0;
for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++)
if (s->children[i] &&
!snapshot_list_has_id(delete_leaves, s->children[i]) &&
!interior_delete_has_id(delete_interior, s->children[i]))
return s->children[i];
for (unsigned i = 0; i < ARRAY_SIZE(s->children); i++) {
u32 live_child = s->children[i]
? __live_child(t, s->children[i], delete_leaves, delete_interior)
: 0;
if (live_child)
return live_child;
}
return 0;
}
static unsigned live_child(struct bch_fs *c, u32 id)
static unsigned live_child(struct bch_fs *c, u32 start)
{
struct snapshot_delete *d = &c->snapshot_delete;
guard(rcu)();
return __live_child(rcu_dereference(c->snapshots), id,
&d->delete_leaves, &d->delete_interior);
struct snapshot_table *t = rcu_dereference(c->snapshots);
for (u32 id = bch2_snapshot_tree_next(t, start);
id && id != start;
id = bch2_snapshot_tree_next(t, id))
if (bch2_snapshot_is_leaf(c, id) &&
!snapshot_list_has_id(&d->delete_leaves, id) &&
!interior_delete_has_id(&d->delete_interior, id))
return id;
return 0;
}
static bool snapshot_id_dying(struct snapshot_delete *d, unsigned id)
@ -1712,12 +1703,14 @@ static inline u32 bch2_snapshot_nth_parent_skip(struct bch_fs *c, u32 id, u32 n,
interior_delete_list *skip)
{
guard(rcu)();
struct snapshot_table *t = rcu_dereference(c->snapshots);
while (interior_delete_has_id(skip, id))
id = __bch2_snapshot_parent(c, id);
id = __bch2_snapshot_parent(t, id);
while (n--) {
do {
id = __bch2_snapshot_parent(c, id);
id = __bch2_snapshot_parent(t, id);
} while (interior_delete_has_id(skip, id));
}

View File

@ -63,19 +63,19 @@ static inline u32 bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
return __bch2_snapshot_parent_early(c, id);
}
static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
static inline u32 __bch2_snapshot_parent(struct snapshot_table *t, u32 id)
{
const struct snapshot_t *s = snapshot_t(c, id);
const struct snapshot_t *s = __snapshot_t(t, id);
if (!s)
return 0;
u32 parent = s->parent;
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) &&
parent &&
s->depth != snapshot_t(c, parent)->depth + 1)
s->depth != __snapshot_t(t, parent)->depth + 1)
panic("id %u depth=%u parent %u depth=%u\n",
id, snapshot_t(c, id)->depth,
parent, snapshot_t(c, parent)->depth);
id, __snapshot_t(t, id)->depth,
parent, __snapshot_t(t, parent)->depth);
return parent;
}
@ -83,14 +83,16 @@ static inline u32 __bch2_snapshot_parent(struct bch_fs *c, u32 id)
static inline u32 bch2_snapshot_parent(struct bch_fs *c, u32 id)
{
guard(rcu)();
return __bch2_snapshot_parent(c, id);
return __bch2_snapshot_parent(rcu_dereference(c->snapshots), id);
}
static inline u32 bch2_snapshot_nth_parent(struct bch_fs *c, u32 id, u32 n)
{
guard(rcu)();
struct snapshot_table *t = rcu_dereference(c->snapshots);
while (n--)
id = __bch2_snapshot_parent(c, id);
id = __bch2_snapshot_parent(t, id);
return id;
}
@ -100,23 +102,29 @@ u32 bch2_snapshot_skiplist_get(struct bch_fs *, u32);
static inline u32 bch2_snapshot_root(struct bch_fs *c, u32 id)
{
guard(rcu)();
struct snapshot_table *t = rcu_dereference(c->snapshots);
u32 parent;
while ((parent = __bch2_snapshot_parent(c, id)))
while ((parent = __bch2_snapshot_parent(t, id)))
id = parent;
return id;
}
static inline enum snapshot_id_state __bch2_snapshot_id_state(struct bch_fs *c, u32 id)
static inline enum snapshot_id_state __bch2_snapshot_id_state(struct snapshot_table *t, u32 id)
{
const struct snapshot_t *s = snapshot_t(c, id);
const struct snapshot_t *s = __snapshot_t(t, id);
return s ? s->state : SNAPSHOT_ID_empty;
}
static inline enum snapshot_id_state bch2_snapshot_id_state(struct bch_fs *c, u32 id)
{
guard(rcu)();
return __bch2_snapshot_id_state(c, id);
return __bch2_snapshot_id_state(rcu_dereference(c->snapshots), id);
}
static inline bool __bch2_snapshot_exists(struct snapshot_table *t, u32 id)
{
return __bch2_snapshot_id_state(t, id) == SNAPSHOT_ID_live;
}
static inline bool bch2_snapshot_exists(struct bch_fs *c, u32 id)

View File

@ -79,7 +79,7 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v
} else {
darray_for_each(c->incompat_versions_requested, i)
if (version == *i)
return -BCH_ERR_may_not_use_incompat_feature;
return bch_err_throw(c, may_not_use_incompat_feature);
darray_push(&c->incompat_versions_requested, version);
CLASS(printbuf, buf)();
@ -90,7 +90,7 @@ int bch2_set_version_incompat(struct bch_fs *c, enum bcachefs_metadata_version v
prt_printf(&buf, "\n set version_upgrade=incompat to enable");
bch_notice(c, "%s", buf.buf);
return -BCH_ERR_may_not_use_incompat_feature;
return bch_err_throw(c, may_not_use_incompat_feature);
}
}