mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-12-08 00:00:12 +03:00
Update bcachefs sources to 4b5105c627f4 workqueue: Basic memory allocation profiling support
Some checks failed
build / bcachefs-tools-deb (ubuntu-22.04) (push) Has been cancelled
build / bcachefs-tools-deb (ubuntu-24.04) (push) Has been cancelled
build / bcachefs-tools-rpm (push) Has been cancelled
build / bcachefs-tools-msrv (push) Has been cancelled
Nix Flake actions / nix-matrix (push) Has been cancelled
Nix Flake actions / ${{ matrix.name }} (${{ matrix.system }}) (push) Has been cancelled
Some checks failed
build / bcachefs-tools-deb (ubuntu-22.04) (push) Has been cancelled
build / bcachefs-tools-deb (ubuntu-24.04) (push) Has been cancelled
build / bcachefs-tools-rpm (push) Has been cancelled
build / bcachefs-tools-msrv (push) Has been cancelled
Nix Flake actions / nix-matrix (push) Has been cancelled
Nix Flake actions / ${{ matrix.name }} (${{ matrix.system }}) (push) Has been cancelled
This commit is contained in:
parent
b261da891a
commit
e1e87f53cd
@ -1 +1 @@
|
||||
c241a5bf54ed4aeb29d029d8f1dae1dd592cdda4
|
||||
4b5105c627f4f1490e9bc4267c8096926de367b5
|
||||
|
||||
@ -28,6 +28,7 @@ extern void kmemleak_update_trace(const void *ptr) __ref;
|
||||
extern void kmemleak_not_leak(const void *ptr) __ref;
|
||||
extern void kmemleak_transient_leak(const void *ptr) __ref;
|
||||
extern void kmemleak_ignore(const void *ptr) __ref;
|
||||
extern void kmemleak_ignore_percpu(const void __percpu *ptr) __ref;
|
||||
extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
|
||||
extern void kmemleak_no_scan(const void *ptr) __ref;
|
||||
extern void kmemleak_alloc_phys(phys_addr_t phys, size_t size,
|
||||
@ -97,6 +98,9 @@ static inline void kmemleak_not_leak(const void *ptr)
|
||||
static inline void kmemleak_transient_leak(const void *ptr)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_ignore_percpu(const void __percpu *ptr)
|
||||
{
|
||||
}
|
||||
static inline void kmemleak_ignore(const void *ptr)
|
||||
{
|
||||
}
|
||||
|
||||
@ -362,27 +362,6 @@ static struct bkey_float *bkey_float(const struct btree *b,
|
||||
return ro_aux_tree_base(b, t)->f + idx;
|
||||
}
|
||||
|
||||
static void __bset_aux_tree_verify(struct btree *b)
|
||||
{
|
||||
for_each_bset(b, t) {
|
||||
if (t->aux_data_offset == U16_MAX)
|
||||
continue;
|
||||
|
||||
BUG_ON(t != b->set &&
|
||||
t[-1].aux_data_offset == U16_MAX);
|
||||
|
||||
BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
|
||||
BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
|
||||
BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bset_aux_tree_verify(struct btree *b)
|
||||
{
|
||||
if (static_branch_unlikely(&bch2_debug_check_bset_lookups))
|
||||
__bset_aux_tree_verify(b);
|
||||
}
|
||||
|
||||
void bch2_btree_keys_init(struct btree *b)
|
||||
{
|
||||
unsigned i;
|
||||
@ -538,6 +517,51 @@ static inline void bch2_bset_verify_rw_aux_tree(struct btree *b,
|
||||
__bch2_bset_verify_rw_aux_tree(b, t);
|
||||
}
|
||||
|
||||
static void __bset_aux_tree_verify_ro(struct btree *b, struct bset_tree *t)
|
||||
{
|
||||
struct bkey_packed *k = btree_bkey_first(b, t);
|
||||
|
||||
eytzinger1_for_each(j, t->size - 1) {
|
||||
while (tree_to_bkey(b, t, j) > k &&
|
||||
k != btree_bkey_last(b, t))
|
||||
k = bkey_p_next(k);
|
||||
|
||||
BUG_ON(tree_to_bkey(b, t, j) != k);
|
||||
}
|
||||
}
|
||||
|
||||
static void __bset_aux_tree_verify(struct btree *b)
|
||||
{
|
||||
for_each_bset(b, t) {
|
||||
if (t->aux_data_offset == U16_MAX)
|
||||
continue;
|
||||
|
||||
BUG_ON(t != b->set &&
|
||||
t[-1].aux_data_offset == U16_MAX);
|
||||
|
||||
BUG_ON(t->aux_data_offset < bset_aux_tree_buf_start(b, t));
|
||||
BUG_ON(t->aux_data_offset > btree_aux_data_u64s(b));
|
||||
BUG_ON(bset_aux_tree_buf_end(t) > btree_aux_data_u64s(b));
|
||||
|
||||
switch (bset_aux_tree_type(t)) {
|
||||
case BSET_RO_AUX_TREE:
|
||||
__bset_aux_tree_verify_ro(b, t);
|
||||
break;
|
||||
case BSET_RW_AUX_TREE:
|
||||
__bch2_bset_verify_rw_aux_tree(b, t);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void bset_aux_tree_verify(struct btree *b)
|
||||
{
|
||||
if (static_branch_unlikely(&bch2_debug_check_bset_lookups))
|
||||
__bset_aux_tree_verify(b);
|
||||
}
|
||||
|
||||
/* returns idx of first entry >= offset: */
|
||||
static unsigned rw_aux_tree_bsearch(struct btree *b,
|
||||
struct bset_tree *t,
|
||||
|
||||
@ -1302,9 +1302,6 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
|
||||
btree_bounce_free(c, btree_buf_bytes(b), used_mempool, sorted);
|
||||
|
||||
if (updated_range)
|
||||
bch2_btree_node_drop_keys_outside_node(b);
|
||||
|
||||
i = &b->data->keys;
|
||||
for (k = i->start; k != vstruct_last(i);) {
|
||||
struct bkey tmp;
|
||||
@ -1336,16 +1333,15 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
k = bkey_p_next(k);
|
||||
}
|
||||
|
||||
for (k = i->start; k != vstruct_last(i);) {
|
||||
BUG_ON(!k->u64s);
|
||||
}
|
||||
|
||||
bch2_bset_build_aux_tree(b, b->set, false);
|
||||
|
||||
set_needs_whiteout(btree_bset_first(b), true);
|
||||
|
||||
btree_node_reset_sib_u64s(b);
|
||||
|
||||
if (updated_range)
|
||||
bch2_btree_node_drop_keys_outside_node(b);
|
||||
|
||||
/*
|
||||
* XXX:
|
||||
*
|
||||
|
||||
@ -3209,6 +3209,8 @@ static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
thr->c->recovery_task = current;
|
||||
|
||||
ret = bch2_fs_start(thr->c);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -87,7 +87,7 @@ void bch2_dev_congested_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
|
||||
prt_printf(out, "read latency threshold:\t");
|
||||
bch2_pr_time_units(out,
|
||||
ca->io_latency[READ].quantiles.entries[QUANTILE_IDX(1)].m * 2);
|
||||
ca->io_latency[READ].quantiles.entries[QUANTILE_IDX(1)].m << 2);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "median read latency:\t");
|
||||
@ -97,7 +97,7 @@ void bch2_dev_congested_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
|
||||
prt_printf(out, "write latency threshold:\t");
|
||||
bch2_pr_time_units(out,
|
||||
ca->io_latency[WRITE].quantiles.entries[QUANTILE_IDX(1)].m * 3);
|
||||
ca->io_latency[WRITE].quantiles.entries[QUANTILE_IDX(1)].m << 3);
|
||||
prt_newline(out);
|
||||
|
||||
prt_printf(out, "median write latency:\t");
|
||||
|
||||
@ -55,14 +55,9 @@ static inline void bch2_congested_acct(struct bch_dev *ca, u64 io_latency,
|
||||
s64 latency_over = io_latency - latency_threshold;
|
||||
|
||||
if (latency_threshold && latency_over > 0) {
|
||||
/*
|
||||
* bump up congested by approximately latency_over * 4 /
|
||||
* latency_threshold - we don't need much accuracy here so don't
|
||||
* bother with the divide:
|
||||
*/
|
||||
if (atomic_read(&ca->congested) < CONGESTED_MAX)
|
||||
atomic_add(latency_over >>
|
||||
max_t(int, ilog2(latency_threshold) - 2, 0),
|
||||
atomic_add((u32) min(U32_MAX, io_latency * 2) /
|
||||
(u32) min(U32_MAX, latency_threshold),
|
||||
&ca->congested);
|
||||
|
||||
ca->congested_last = now;
|
||||
|
||||
@ -71,7 +71,7 @@ static int bch2_bucket_is_movable(struct btree_trans *trans,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
struct bch_dev *ca = bch2_dev_tryget(c, k.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_bucket_tryget(c, k.k->p);
|
||||
if (!ca)
|
||||
goto out;
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user