mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to 1592eaa60418 bcachefs: Btree path tracepoints
This commit is contained in:
parent
088e17a54a
commit
573d635b2c
@ -1 +1 @@
|
||||
4a7a003763f594fcb798c13b4644521083f885b3
|
||||
1592eaa60418d460021ecf44bc405ec49ef14adf
|
||||
|
@ -662,14 +662,8 @@ alloc:
|
||||
goto alloc;
|
||||
}
|
||||
err:
|
||||
if (!ob) {
|
||||
rcu_read_lock();
|
||||
struct task_struct *t = rcu_dereference(c->copygc_thread);
|
||||
if (t)
|
||||
wake_up_process(t);
|
||||
rcu_read_unlock();
|
||||
if (!ob)
|
||||
ob = ERR_PTR(-BCH_ERR_no_buckets_found);
|
||||
}
|
||||
|
||||
if (!IS_ERR(ob))
|
||||
ob->data_type = data_type;
|
||||
|
@ -981,7 +981,7 @@ struct bch_fs {
|
||||
struct bch_fs_rebalance rebalance;
|
||||
|
||||
/* COPYGC */
|
||||
struct task_struct __rcu *copygc_thread;
|
||||
struct task_struct *copygc_thread;
|
||||
struct write_point copygc_write_point;
|
||||
s64 copygc_wait_at;
|
||||
s64 copygc_wait;
|
||||
|
@ -1010,9 +1010,9 @@ retry_all:
|
||||
* the same position:
|
||||
*/
|
||||
if (trans->paths[idx].uptodate) {
|
||||
__btree_path_get(&trans->paths[idx], false);
|
||||
__btree_path_get(trans, &trans->paths[idx], false);
|
||||
ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
|
||||
__btree_path_put(&trans->paths[idx], false);
|
||||
__btree_path_put(trans, &trans->paths[idx], false);
|
||||
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
|
||||
bch2_err_matches(ret, ENOMEM))
|
||||
@ -1131,6 +1131,8 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
|
||||
if (unlikely(!trans->srcu_held))
|
||||
bch2_trans_srcu_lock(trans);
|
||||
|
||||
trace_btree_path_traverse_start(trans, path);
|
||||
|
||||
/*
|
||||
* Ensure we obey path->should_be_locked: if it's set, we can't unlock
|
||||
* and re-traverse the path without a transaction restart:
|
||||
@ -1194,6 +1196,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
|
||||
|
||||
out_uptodate:
|
||||
path->uptodate = BTREE_ITER_UPTODATE;
|
||||
trace_btree_path_traverse_end(trans, path);
|
||||
out:
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
|
||||
panic("ret %s (%i) trans->restarted %s (%i)\n",
|
||||
@ -1225,7 +1228,7 @@ static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_i
|
||||
{
|
||||
btree_path_idx_t new = btree_path_alloc(trans, src);
|
||||
btree_path_copy(trans, trans->paths + new, trans->paths + src);
|
||||
__btree_path_get(trans->paths + new, intent);
|
||||
__btree_path_get(trans, trans->paths + new, intent);
|
||||
#ifdef TRACK_PATH_ALLOCATED
|
||||
trans->paths[new].ip_allocated = ip;
|
||||
#endif
|
||||
@ -1236,8 +1239,10 @@ __flatten
|
||||
btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
|
||||
btree_path_idx_t path, bool intent, unsigned long ip)
|
||||
{
|
||||
__btree_path_put(trans->paths + path, intent);
|
||||
struct btree_path *old = trans->paths + path;
|
||||
__btree_path_put(trans, trans->paths + path, intent);
|
||||
path = btree_path_clone(trans, path, intent, ip);
|
||||
trace_btree_path_clone(trans, old, trans->paths + path);
|
||||
trans->paths[path].preserve = false;
|
||||
return path;
|
||||
}
|
||||
@ -1252,6 +1257,8 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
|
||||
bch2_trans_verify_not_in_restart(trans);
|
||||
EBUG_ON(!trans->paths[path_idx].ref);
|
||||
|
||||
trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
|
||||
|
||||
path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
|
||||
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
@ -1361,13 +1368,15 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
|
||||
{
|
||||
struct btree_path *path = trans->paths + path_idx, *dup;
|
||||
|
||||
if (!__btree_path_put(path, intent))
|
||||
if (!__btree_path_put(trans, path, intent))
|
||||
return;
|
||||
|
||||
dup = path->preserve
|
||||
? have_path_at_pos(trans, path)
|
||||
: have_node_at_pos(trans, path);
|
||||
|
||||
trace_btree_path_free(trans, path_idx, dup);
|
||||
|
||||
if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
|
||||
return;
|
||||
|
||||
@ -1392,7 +1401,7 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
|
||||
static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
|
||||
bool intent)
|
||||
{
|
||||
if (!__btree_path_put(trans->paths + path, intent))
|
||||
if (!__btree_path_put(trans, trans->paths + path, intent))
|
||||
return;
|
||||
|
||||
__bch2_path_free(trans, path);
|
||||
@ -1421,8 +1430,8 @@ void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
|
||||
noinline __cold
|
||||
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
|
||||
{
|
||||
prt_printf(buf, "transaction updates for %s journal seq %llu\n",
|
||||
trans->fn, trans->journal_res.seq);
|
||||
prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
|
||||
trans->nr_updates, trans->fn, trans->journal_res.seq);
|
||||
printbuf_indent_add(buf, 2);
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
@ -1464,7 +1473,7 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra
|
||||
{
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
|
||||
prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ",
|
||||
prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
|
||||
path_idx, path->ref, path->intent_ref,
|
||||
path->preserve ? 'P' : ' ',
|
||||
path->should_be_locked ? 'S' : ' ',
|
||||
@ -1716,14 +1725,16 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
|
||||
trans->paths[path_pos].cached == cached &&
|
||||
trans->paths[path_pos].btree_id == btree_id &&
|
||||
trans->paths[path_pos].level == level) {
|
||||
__btree_path_get(trans->paths + path_pos, intent);
|
||||
trace_btree_path_get(trans, trans->paths + path_pos, &pos);
|
||||
|
||||
__btree_path_get(trans, trans->paths + path_pos, intent);
|
||||
path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
|
||||
path = trans->paths + path_idx;
|
||||
} else {
|
||||
path_idx = btree_path_alloc(trans, path_pos);
|
||||
path = trans->paths + path_idx;
|
||||
|
||||
__btree_path_get(path, intent);
|
||||
__btree_path_get(trans, path, intent);
|
||||
path->pos = pos;
|
||||
path->btree_id = btree_id;
|
||||
path->cached = cached;
|
||||
@ -1738,6 +1749,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
|
||||
path->ip_allocated = ip;
|
||||
#endif
|
||||
trans->paths_sorted = false;
|
||||
|
||||
trace_btree_path_alloc(trans, path);
|
||||
}
|
||||
|
||||
if (!(flags & BTREE_ITER_nopreserve))
|
||||
@ -1857,7 +1870,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
|
||||
struct btree_path *path = btree_iter_path(trans, iter);
|
||||
if (btree_path_node(path, path->level))
|
||||
btree_path_set_should_be_locked(path);
|
||||
btree_path_set_should_be_locked(trans, path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1889,7 +1902,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
|
||||
out:
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
@ -1977,7 +1990,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
|
||||
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
|
||||
out:
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
@ -2149,7 +2162,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
|
||||
if (unlikely(ret))
|
||||
return bkey_s_c_err(ret);
|
||||
|
||||
btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
|
||||
btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
|
||||
|
||||
k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
|
||||
if (k.k && !bkey_err(k)) {
|
||||
@ -2193,7 +2206,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
goto out;
|
||||
}
|
||||
|
||||
btree_path_set_should_be_locked(path);
|
||||
btree_path_set_should_be_locked(trans, path);
|
||||
|
||||
k = btree_path_level_peek_all(trans->c, l, &iter->k);
|
||||
|
||||
@ -2320,7 +2333,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* advance, same as on exit for iter->path, but only up
|
||||
* to snapshot
|
||||
*/
|
||||
__btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
|
||||
__btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = iter->path;
|
||||
|
||||
iter->update_path = bch2_btree_path_set_pos(trans,
|
||||
@ -2376,14 +2389,14 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
|
||||
out_no_locked:
|
||||
if (iter->update_path) {
|
||||
ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
|
||||
if (unlikely(ret))
|
||||
k = bkey_s_c_err(ret);
|
||||
else
|
||||
btree_path_set_should_be_locked(trans->paths + iter->update_path);
|
||||
btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
|
||||
}
|
||||
|
||||
if (!(iter->flags & BTREE_ITER_all_snapshots))
|
||||
@ -2505,6 +2518,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
_THIS_IP_);
|
||||
path = btree_iter_path(trans, iter);
|
||||
trace_btree_path_save_pos(trans, path, trans->paths + saved_path);
|
||||
saved_k = *k.k;
|
||||
saved_v = k.v;
|
||||
}
|
||||
@ -2521,7 +2535,7 @@ got_key:
|
||||
continue;
|
||||
}
|
||||
|
||||
btree_path_set_should_be_locked(path);
|
||||
btree_path_set_should_be_locked(trans, path);
|
||||
break;
|
||||
} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
|
||||
/* Advance to previous leaf node: */
|
||||
@ -2679,7 +2693,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
}
|
||||
}
|
||||
out:
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
|
||||
out_no_locked:
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
@ -2905,9 +2919,9 @@ void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
|
||||
dst->ip_allocated = _RET_IP_;
|
||||
#endif
|
||||
if (src->path)
|
||||
__btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
|
||||
__btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
|
||||
if (src->update_path)
|
||||
__btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
|
||||
__btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
|
||||
dst->key_cache_path = 0;
|
||||
}
|
||||
|
||||
@ -3231,7 +3245,7 @@ void bch2_trans_put(struct btree_trans *trans)
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
__btree_path_put(trans->paths + i->path, true);
|
||||
__btree_path_put(trans, trans->paths + i->path, true);
|
||||
trans->nr_updates = 0;
|
||||
|
||||
check_btree_paths_leaked(trans);
|
||||
|
@ -6,6 +6,12 @@
|
||||
#include "btree_types.h"
|
||||
#include "trace.h"
|
||||
|
||||
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
|
||||
void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
|
||||
void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
|
||||
void bch2_dump_trans_updates(struct btree_trans *);
|
||||
void bch2_dump_trans_paths_updates(struct btree_trans *);
|
||||
|
||||
static inline int __bkey_err(const struct bkey *k)
|
||||
{
|
||||
return PTR_ERR_OR_ZERO(k);
|
||||
@ -13,16 +19,28 @@ static inline int __bkey_err(const struct bkey *k)
|
||||
|
||||
#define bkey_err(_k) __bkey_err((_k).k)
|
||||
|
||||
static inline void __btree_path_get(struct btree_path *path, bool intent)
|
||||
static inline void __btree_path_get(struct btree_trans *trans, struct btree_path *path, bool intent)
|
||||
{
|
||||
unsigned idx = path - trans->paths;
|
||||
|
||||
EBUG_ON(!test_bit(idx, trans->paths_allocated));
|
||||
if (unlikely(path->ref == U8_MAX)) {
|
||||
bch2_dump_trans_paths_updates(trans);
|
||||
panic("path %u refcount overflow\n", idx);
|
||||
}
|
||||
|
||||
path->ref++;
|
||||
path->intent_ref += intent;
|
||||
trace_btree_path_get_ll(trans, path);
|
||||
}
|
||||
|
||||
static inline bool __btree_path_put(struct btree_path *path, bool intent)
|
||||
static inline bool __btree_path_put(struct btree_trans *trans, struct btree_path *path, bool intent)
|
||||
{
|
||||
EBUG_ON(!test_bit(path - trans->paths, trans->paths_allocated));
|
||||
EBUG_ON(!path->ref);
|
||||
EBUG_ON(!path->intent_ref && intent);
|
||||
|
||||
trace_btree_path_put_ll(trans, path);
|
||||
path->intent_ref -= intent;
|
||||
return --path->ref == 0;
|
||||
}
|
||||
@ -873,12 +891,6 @@ __bch2_btree_iter_peek_and_restart(struct btree_trans *trans,
|
||||
_ret; \
|
||||
})
|
||||
|
||||
void bch2_trans_updates_to_text(struct printbuf *, struct btree_trans *);
|
||||
void bch2_btree_path_to_text(struct printbuf *, struct btree_trans *, btree_path_idx_t);
|
||||
void bch2_trans_paths_to_text(struct printbuf *, struct btree_trans *);
|
||||
void bch2_dump_trans_updates(struct btree_trans *);
|
||||
void bch2_dump_trans_paths_updates(struct btree_trans *);
|
||||
|
||||
struct btree_trans *__bch2_trans_get(struct bch_fs *, unsigned);
|
||||
void bch2_trans_put(struct btree_trans *);
|
||||
|
||||
|
@ -228,6 +228,9 @@ static inline int __btree_node_lock_nopath(struct btree_trans *trans,
|
||||
bch2_six_check_for_deadlock, trans, ip);
|
||||
WRITE_ONCE(trans->locking, NULL);
|
||||
WRITE_ONCE(trans->locking_wait.start_time, 0);
|
||||
|
||||
if (!ret)
|
||||
trace_btree_path_lock(trans, _THIS_IP_, b);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -400,12 +403,13 @@ static inline int bch2_btree_path_upgrade(struct btree_trans *trans,
|
||||
|
||||
/* misc: */
|
||||
|
||||
static inline void btree_path_set_should_be_locked(struct btree_path *path)
|
||||
static inline void btree_path_set_should_be_locked(struct btree_trans *trans, struct btree_path *path)
|
||||
{
|
||||
EBUG_ON(!btree_node_locked(path, path->level));
|
||||
EBUG_ON(path->uptodate);
|
||||
|
||||
path->should_be_locked = true;
|
||||
trace_btree_path_should_be_locked(trans, path);
|
||||
}
|
||||
|
||||
static inline void __btree_path_set_level_up(struct btree_trans *trans,
|
||||
|
@ -477,8 +477,8 @@ struct btree_trans {
|
||||
btree_path_idx_t nr_sorted;
|
||||
btree_path_idx_t nr_paths;
|
||||
btree_path_idx_t nr_paths_max;
|
||||
btree_path_idx_t nr_updates;
|
||||
u8 fn_idx;
|
||||
u8 nr_updates;
|
||||
u8 lock_must_abort;
|
||||
bool lock_may_not_fail:1;
|
||||
bool srcu_held:1;
|
||||
|
@ -374,7 +374,7 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
i->key_cache_already_flushed = true;
|
||||
i->flags |= BTREE_TRIGGER_norun;
|
||||
|
||||
btree_path_set_should_be_locked(btree_path);
|
||||
btree_path_set_should_be_locked(trans, btree_path);
|
||||
ret = bch2_trans_update_by_path(trans, path_idx, i->k, flags, ip);
|
||||
out:
|
||||
bch2_path_put(trans, path_idx, true);
|
||||
@ -422,7 +422,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
|
||||
break;
|
||||
}
|
||||
|
||||
if (!cmp && i < trans->updates + trans->nr_updates) {
|
||||
bool overwrite = !cmp && i < trans->updates + trans->nr_updates;
|
||||
|
||||
if (overwrite) {
|
||||
EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
|
||||
|
||||
bch2_path_put(trans, i->path, true);
|
||||
@ -449,7 +451,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
|
||||
}
|
||||
}
|
||||
|
||||
__btree_path_get(trans->paths + i->path, true);
|
||||
__btree_path_get(trans, trans->paths + i->path, true);
|
||||
|
||||
trace_update_by_path(trans, path, i, overwrite);
|
||||
|
||||
/*
|
||||
* If a key is present in the key cache, it must also exist in the
|
||||
@ -498,7 +502,7 @@ static noinline int bch2_trans_update_get_key_cache(struct btree_trans *trans,
|
||||
return btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_raced);
|
||||
}
|
||||
|
||||
btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
|
||||
btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1987,7 +1987,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
btree_path_set_should_be_locked(trans->paths + sib_path);
|
||||
btree_path_set_should_be_locked(trans, trans->paths + sib_path);
|
||||
|
||||
m = trans->paths[sib_path].l[level].b;
|
||||
|
||||
|
@ -560,7 +560,7 @@ int bch2_btree_write_buffer_tryflush(struct btree_trans *trans)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
/*
|
||||
* In check and repair code, when checking references to write buffer btrees we
|
||||
* need to issue a flush before we have a definitive error: this issues a flush
|
||||
* if this is a key we haven't yet checked.
|
||||
|
@ -233,7 +233,8 @@ static struct inode *bch2_alloc_inode(struct super_block *sb)
|
||||
|
||||
static struct bch_inode_info *__bch2_new_inode(struct bch_fs *c)
|
||||
{
|
||||
struct bch_inode_info *inode = kmem_cache_alloc(bch2_inode_cache, GFP_NOFS);
|
||||
struct bch_inode_info *inode = alloc_inode_sb(c->vfs_sb,
|
||||
bch2_inode_cache, GFP_NOFS);
|
||||
if (!inode)
|
||||
return NULL;
|
||||
|
||||
@ -2140,7 +2141,8 @@ int __init bch2_vfs_init(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
||||
bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT);
|
||||
bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT |
|
||||
SLAB_ACCOUNT);
|
||||
if (!bch2_inode_cache)
|
||||
goto err;
|
||||
|
||||
|
@ -357,18 +357,19 @@ static int bch2_copygc_thread(void *arg)
|
||||
}
|
||||
|
||||
last = atomic64_read(&clock->now);
|
||||
wait = max_t(long, 0, bch2_copygc_wait_amount(c) - clock->max_slop);
|
||||
wait = bch2_copygc_wait_amount(c);
|
||||
|
||||
if (wait > 0) {
|
||||
if (wait > clock->max_slop) {
|
||||
c->copygc_wait_at = last;
|
||||
c->copygc_wait = last + wait;
|
||||
move_buckets_wait(&ctxt, buckets, true);
|
||||
trace_and_count(c, copygc_wait, c, wait, c->copygc_wait);
|
||||
bch2_io_clock_schedule_timeout(clock, c->copygc_wait);
|
||||
trace_and_count(c, copygc_wait, c, wait, last + wait);
|
||||
bch2_kthread_io_clock_wait(clock, last + wait,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
continue;
|
||||
}
|
||||
|
||||
c->copygc_wait = c->copygc_wait_at = 0;
|
||||
c->copygc_wait = 0;
|
||||
|
||||
c->copygc_running = true;
|
||||
ret = bch2_copygc(&ctxt, buckets, &did_work);
|
||||
@ -400,10 +401,9 @@ static int bch2_copygc_thread(void *arg)
|
||||
|
||||
void bch2_copygc_stop(struct bch_fs *c)
|
||||
{
|
||||
struct task_struct *t = rcu_dereference_protected(c->copygc_thread, true);
|
||||
if (t) {
|
||||
kthread_stop(t);
|
||||
put_task_struct(t);
|
||||
if (c->copygc_thread) {
|
||||
kthread_stop(c->copygc_thread);
|
||||
put_task_struct(c->copygc_thread);
|
||||
}
|
||||
c->copygc_thread = NULL;
|
||||
}
|
||||
@ -430,8 +430,8 @@ int bch2_copygc_start(struct bch_fs *c)
|
||||
|
||||
get_task_struct(t);
|
||||
|
||||
rcu_assign_pointer(c->copygc_thread, t);
|
||||
wake_up_process(t);
|
||||
c->copygc_thread = t;
|
||||
wake_up_process(c->copygc_thread);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -818,7 +818,6 @@ STORE(bch2_dev)
|
||||
{
|
||||
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
|
||||
struct bch_fs *c = ca->fs;
|
||||
struct bch_member *mi;
|
||||
|
||||
if (attr == &sysfs_discard) {
|
||||
bool v = strtoul_or_return(buf);
|
||||
|
@ -3,7 +3,6 @@
|
||||
#define TRACE_SYSTEM bcachefs
|
||||
|
||||
#if !defined(_TRACE_BCACHEFS_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_BCACHEFS_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
@ -558,6 +557,7 @@ TRACE_EVENT(btree_path_relock_fail,
|
||||
__field(unsigned long, caller_ip )
|
||||
__field(u8, btree_id )
|
||||
__field(u8, level )
|
||||
__field(u8, path_idx)
|
||||
TRACE_BPOS_entries(pos)
|
||||
__array(char, node, 24 )
|
||||
__field(u8, self_read_count )
|
||||
@ -575,7 +575,8 @@ TRACE_EVENT(btree_path_relock_fail,
|
||||
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
||||
__entry->caller_ip = caller_ip;
|
||||
__entry->btree_id = path->btree_id;
|
||||
__entry->level = path->level;
|
||||
__entry->level = level;
|
||||
__entry->path_idx = path - trans->paths;
|
||||
TRACE_BPOS_assign(pos, path->pos);
|
||||
|
||||
c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level);
|
||||
@ -588,7 +589,7 @@ TRACE_EVENT(btree_path_relock_fail,
|
||||
c = six_lock_counts(&path->l[level].b->c.lock);
|
||||
__entry->read_count = c.n[SIX_LOCK_read];
|
||||
__entry->intent_count = c.n[SIX_LOCK_intent];
|
||||
scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
|
||||
scnprintf(__entry->node, sizeof(__entry->node), "%px", &b->c);
|
||||
}
|
||||
__entry->iter_lock_seq = path->l[level].lock_seq;
|
||||
__entry->node_lock_seq = is_btree_node(path, level)
|
||||
@ -596,9 +597,10 @@ TRACE_EVENT(btree_path_relock_fail,
|
||||
: 0;
|
||||
),
|
||||
|
||||
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
|
||||
TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u node %s held %u:%u lock count %u:%u iter seq %u lock seq %u",
|
||||
__entry->trans_fn,
|
||||
(void *) __entry->caller_ip,
|
||||
__entry->path_idx,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->pos_inode,
|
||||
__entry->pos_offset,
|
||||
@ -625,6 +627,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
|
||||
__field(unsigned long, caller_ip )
|
||||
__field(u8, btree_id )
|
||||
__field(u8, level )
|
||||
__field(u8, path_idx)
|
||||
TRACE_BPOS_entries(pos)
|
||||
__field(u8, locked )
|
||||
__field(u8, self_read_count )
|
||||
@ -642,6 +645,7 @@ TRACE_EVENT(btree_path_upgrade_fail,
|
||||
__entry->caller_ip = caller_ip;
|
||||
__entry->btree_id = path->btree_id;
|
||||
__entry->level = level;
|
||||
__entry->path_idx = path - trans->paths;
|
||||
TRACE_BPOS_assign(pos, path->pos);
|
||||
__entry->locked = btree_node_locked(path, level);
|
||||
|
||||
@ -657,9 +661,10 @@ TRACE_EVENT(btree_path_upgrade_fail,
|
||||
: 0;
|
||||
),
|
||||
|
||||
TP_printk("%s %pS btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
|
||||
TP_printk("%s %pS\nidx %2u btree %s pos %llu:%llu:%u level %u locked %u held %u:%u lock count %u:%u iter seq %u lock seq %u",
|
||||
__entry->trans_fn,
|
||||
(void *) __entry->caller_ip,
|
||||
__entry->path_idx,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->pos_inode,
|
||||
__entry->pos_offset,
|
||||
@ -1415,6 +1420,456 @@ TRACE_EVENT(error_downcast,
|
||||
TP_printk("%s -> %s %s", __entry->bch_err, __entry->std_err, __entry->ip)
|
||||
);
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_PATH_TRACEPOINTS
|
||||
|
||||
TRACE_EVENT(update_by_path,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path,
|
||||
struct btree_insert_entry *i, bool overwrite),
|
||||
TP_ARGS(trans, path, i, overwrite),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, trans_fn, 32 )
|
||||
__field(btree_path_idx_t, path_idx )
|
||||
__field(u8, btree_id )
|
||||
TRACE_BPOS_entries(pos)
|
||||
__field(u8, overwrite )
|
||||
__field(btree_path_idx_t, update_idx )
|
||||
__field(btree_path_idx_t, nr_updates )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
||||
__entry->path_idx = path - trans->paths;
|
||||
__entry->btree_id = path->btree_id;
|
||||
TRACE_BPOS_assign(pos, path->pos);
|
||||
__entry->overwrite = overwrite;
|
||||
__entry->update_idx = i - trans->updates;
|
||||
__entry->nr_updates = trans->nr_updates;
|
||||
),
|
||||
|
||||
TP_printk("%s path %3u btree %s pos %llu:%llu:%u overwrite %u update %u/%u",
|
||||
__entry->trans_fn,
|
||||
__entry->path_idx,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->pos_inode,
|
||||
__entry->pos_offset,
|
||||
__entry->pos_snapshot,
|
||||
__entry->overwrite,
|
||||
__entry->update_idx,
|
||||
__entry->nr_updates)
|
||||
);
|
||||
|
||||
TRACE_EVENT(btree_path_lock,
|
||||
TP_PROTO(struct btree_trans *trans,
|
||||
unsigned long caller_ip,
|
||||
struct btree_bkey_cached_common *b),
|
||||
TP_ARGS(trans, caller_ip, b),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, trans_fn, 32 )
|
||||
__field(unsigned long, caller_ip )
|
||||
__field(u8, btree_id )
|
||||
__field(u8, level )
|
||||
__array(char, node, 24 )
|
||||
__field(u32, lock_seq )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
||||
__entry->caller_ip = caller_ip;
|
||||
__entry->btree_id = b->btree_id;
|
||||
__entry->level = b->level;
|
||||
|
||||
scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
|
||||
__entry->lock_seq = six_lock_seq(&b->lock);
|
||||
),
|
||||
|
||||
TP_printk("%s %pS\nbtree %s level %u node %s lock seq %u",
|
||||
__entry->trans_fn,
|
||||
(void *) __entry->caller_ip,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->level,
|
||||
__entry->node,
|
||||
__entry->lock_seq)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(btree_path_ev,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
||||
TP_ARGS(trans, path),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(u16, idx )
|
||||
__field(u8, ref )
|
||||
__field(u8, btree_id )
|
||||
TRACE_BPOS_entries(pos)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = path - trans->paths;
|
||||
__entry->ref = path->ref;
|
||||
__entry->btree_id = path->btree_id;
|
||||
TRACE_BPOS_assign(pos, path->pos);
|
||||
),
|
||||
|
||||
TP_printk("path %3u ref %u btree %s pos %llu:%llu:%u",
|
||||
__entry->idx, __entry->ref,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->pos_inode,
|
||||
__entry->pos_offset,
|
||||
__entry->pos_snapshot)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btree_path_ev, btree_path_get_ll,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
||||
TP_ARGS(trans, path)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btree_path_ev, btree_path_put_ll,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
||||
TP_ARGS(trans, path)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btree_path_ev, btree_path_should_be_locked,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
||||
TP_ARGS(trans, path)
|
||||
);
|
||||
|
||||
TRACE_EVENT(btree_path_alloc,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
||||
TP_ARGS(trans, path),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(btree_path_idx_t, idx )
|
||||
__field(u8, locks_want )
|
||||
__field(u8, btree_id )
|
||||
TRACE_BPOS_entries(pos)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = path - trans->paths;
|
||||
__entry->locks_want = path->locks_want;
|
||||
__entry->btree_id = path->btree_id;
|
||||
TRACE_BPOS_assign(pos, path->pos);
|
||||
),
|
||||
|
||||
TP_printk("path %3u btree %s locks_want %u pos %llu:%llu:%u",
|
||||
__entry->idx,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->locks_want,
|
||||
__entry->pos_inode,
|
||||
__entry->pos_offset,
|
||||
__entry->pos_snapshot)
|
||||
);
|
||||
|
||||
TRACE_EVENT(btree_path_get,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos),
|
||||
TP_ARGS(trans, path, new_pos),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(btree_path_idx_t, idx )
|
||||
__field(u8, ref )
|
||||
__field(u8, preserve )
|
||||
__field(u8, locks_want )
|
||||
__field(u8, btree_id )
|
||||
TRACE_BPOS_entries(old_pos)
|
||||
TRACE_BPOS_entries(new_pos)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = path - trans->paths;
|
||||
__entry->ref = path->ref;
|
||||
__entry->preserve = path->preserve;
|
||||
__entry->locks_want = path->locks_want;
|
||||
__entry->btree_id = path->btree_id;
|
||||
TRACE_BPOS_assign(old_pos, path->pos);
|
||||
TRACE_BPOS_assign(new_pos, *new_pos);
|
||||
),
|
||||
|
||||
TP_printk(" path %3u ref %u preserve %u btree %s locks_want %u pos %llu:%llu:%u -> %llu:%llu:%u",
|
||||
__entry->idx,
|
||||
__entry->ref,
|
||||
__entry->preserve,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->locks_want,
|
||||
__entry->old_pos_inode,
|
||||
__entry->old_pos_offset,
|
||||
__entry->old_pos_snapshot,
|
||||
__entry->new_pos_inode,
|
||||
__entry->new_pos_offset,
|
||||
__entry->new_pos_snapshot)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(btree_path_clone,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
|
||||
TP_ARGS(trans, path, new),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(btree_path_idx_t, idx )
|
||||
__field(u8, new_idx )
|
||||
__field(u8, btree_id )
|
||||
__field(u8, ref )
|
||||
__field(u8, preserve )
|
||||
TRACE_BPOS_entries(pos)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = path - trans->paths;
|
||||
__entry->new_idx = new - trans->paths;
|
||||
__entry->btree_id = path->btree_id;
|
||||
__entry->ref = path->ref;
|
||||
__entry->preserve = path->preserve;
|
||||
TRACE_BPOS_assign(pos, path->pos);
|
||||
),
|
||||
|
||||
TP_printk(" path %3u ref %u preserve %u btree %s %llu:%llu:%u -> %u",
|
||||
__entry->idx,
|
||||
__entry->ref,
|
||||
__entry->preserve,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->pos_inode,
|
||||
__entry->pos_offset,
|
||||
__entry->pos_snapshot,
|
||||
__entry->new_idx)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btree_path_clone, btree_path_clone,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
|
||||
TP_ARGS(trans, path, new)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btree_path_clone, btree_path_save_pos,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path, struct btree_path *new),
|
||||
TP_ARGS(trans, path, new)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(btree_path_traverse,
|
||||
TP_PROTO(struct btree_trans *trans,
|
||||
struct btree_path *path),
|
||||
TP_ARGS(trans, path),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__array(char, trans_fn, 32 )
|
||||
__field(btree_path_idx_t, idx )
|
||||
__field(u8, ref )
|
||||
__field(u8, preserve )
|
||||
__field(u8, should_be_locked )
|
||||
__field(u8, btree_id )
|
||||
__field(u8, level )
|
||||
TRACE_BPOS_entries(pos)
|
||||
__field(u8, locks_want )
|
||||
__field(u8, nodes_locked )
|
||||
__array(char, node0, 24 )
|
||||
__array(char, node1, 24 )
|
||||
__array(char, node2, 24 )
|
||||
__array(char, node3, 24 )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
strscpy(__entry->trans_fn, trans->fn, sizeof(__entry->trans_fn));
|
||||
|
||||
__entry->idx = path - trans->paths;
|
||||
__entry->ref = path->ref;
|
||||
__entry->preserve = path->preserve;
|
||||
__entry->btree_id = path->btree_id;
|
||||
__entry->level = path->level;
|
||||
TRACE_BPOS_assign(pos, path->pos);
|
||||
|
||||
__entry->locks_want = path->locks_want;
|
||||
__entry->nodes_locked = path->nodes_locked;
|
||||
struct btree *b = path->l[0].b;
|
||||
if (IS_ERR(b))
|
||||
strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
||||
else
|
||||
scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
|
||||
b = path->l[1].b;
|
||||
if (IS_ERR(b))
|
||||
strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
||||
else
|
||||
scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
|
||||
b = path->l[2].b;
|
||||
if (IS_ERR(b))
|
||||
strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
||||
else
|
||||
scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
|
||||
b = path->l[3].b;
|
||||
if (IS_ERR(b))
|
||||
strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
||||
else
|
||||
scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
|
||||
),
|
||||
|
||||
TP_printk("%s\npath %3u ref %u preserve %u btree %s %llu:%llu:%u level %u locks_want %u\n"
|
||||
"locks %u %u %u %u node %s %s %s %s",
|
||||
__entry->trans_fn,
|
||||
__entry->idx,
|
||||
__entry->ref,
|
||||
__entry->preserve,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->pos_inode,
|
||||
__entry->pos_offset,
|
||||
__entry->pos_snapshot,
|
||||
__entry->level,
|
||||
__entry->locks_want,
|
||||
(__entry->nodes_locked >> 6) & 3,
|
||||
(__entry->nodes_locked >> 4) & 3,
|
||||
(__entry->nodes_locked >> 2) & 3,
|
||||
(__entry->nodes_locked >> 0) & 3,
|
||||
__entry->node3,
|
||||
__entry->node2,
|
||||
__entry->node1,
|
||||
__entry->node0)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btree_path_traverse, btree_path_traverse_start,
|
||||
TP_PROTO(struct btree_trans *trans,
|
||||
struct btree_path *path),
|
||||
TP_ARGS(trans, path)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(btree_path_traverse, btree_path_traverse_end,
|
||||
TP_PROTO(struct btree_trans *trans, struct btree_path *path),
|
||||
TP_ARGS(trans, path)
|
||||
);
|
||||
|
||||
TRACE_EVENT(btree_path_set_pos,
|
||||
TP_PROTO(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
struct bpos *new_pos),
|
||||
TP_ARGS(trans, path, new_pos),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(btree_path_idx_t, idx )
|
||||
__field(u8, ref )
|
||||
__field(u8, preserve )
|
||||
__field(u8, btree_id )
|
||||
TRACE_BPOS_entries(old_pos)
|
||||
TRACE_BPOS_entries(new_pos)
|
||||
__field(u8, locks_want )
|
||||
__field(u8, nodes_locked )
|
||||
__array(char, node0, 24 )
|
||||
__array(char, node1, 24 )
|
||||
__array(char, node2, 24 )
|
||||
__array(char, node3, 24 )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = path - trans->paths;
|
||||
__entry->ref = path->ref;
|
||||
__entry->preserve = path->preserve;
|
||||
__entry->btree_id = path->btree_id;
|
||||
TRACE_BPOS_assign(old_pos, path->pos);
|
||||
TRACE_BPOS_assign(new_pos, *new_pos);
|
||||
|
||||
__entry->nodes_locked = path->nodes_locked;
|
||||
struct btree *b = path->l[0].b;
|
||||
if (IS_ERR(b))
|
||||
strscpy(__entry->node0, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
||||
else
|
||||
scnprintf(__entry->node0, sizeof(__entry->node0), "%px", &b->c);
|
||||
b = path->l[1].b;
|
||||
if (IS_ERR(b))
|
||||
strscpy(__entry->node1, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
||||
else
|
||||
scnprintf(__entry->node1, sizeof(__entry->node0), "%px", &b->c);
|
||||
b = path->l[2].b;
|
||||
if (IS_ERR(b))
|
||||
strscpy(__entry->node2, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
||||
else
|
||||
scnprintf(__entry->node2, sizeof(__entry->node0), "%px", &b->c);
|
||||
b = path->l[3].b;
|
||||
if (IS_ERR(b))
|
||||
strscpy(__entry->node3, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node0));
|
||||
else
|
||||
scnprintf(__entry->node3, sizeof(__entry->node0), "%px", &b->c);
|
||||
),
|
||||
|
||||
TP_printk("\npath %3u ref %u preserve %u btree %s %llu:%llu:%u -> %llu:%llu:%u\n"
|
||||
"locks %u %u %u %u node %s %s %s %s",
|
||||
__entry->idx,
|
||||
__entry->ref,
|
||||
__entry->preserve,
|
||||
bch2_btree_id_str(__entry->btree_id),
|
||||
__entry->old_pos_inode,
|
||||
__entry->old_pos_offset,
|
||||
__entry->old_pos_snapshot,
|
||||
__entry->new_pos_inode,
|
||||
__entry->new_pos_offset,
|
||||
__entry->new_pos_snapshot,
|
||||
(__entry->nodes_locked >> 6) & 3,
|
||||
(__entry->nodes_locked >> 4) & 3,
|
||||
(__entry->nodes_locked >> 2) & 3,
|
||||
(__entry->nodes_locked >> 0) & 3,
|
||||
__entry->node3,
|
||||
__entry->node2,
|
||||
__entry->node1,
|
||||
__entry->node0)
|
||||
);
|
||||
|
||||
TRACE_EVENT(btree_path_free,
|
||||
TP_PROTO(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup),
|
||||
TP_ARGS(trans, path, dup),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(btree_path_idx_t, idx )
|
||||
__field(u8, preserve )
|
||||
__field(u8, should_be_locked)
|
||||
__field(s8, dup )
|
||||
__field(u8, dup_locked )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = path;
|
||||
__entry->preserve = trans->paths[path].preserve;
|
||||
__entry->should_be_locked = trans->paths[path].should_be_locked;
|
||||
__entry->dup = dup ? dup - trans->paths : -1;
|
||||
__entry->dup_locked = dup ? btree_node_locked(dup, dup->level) : 0;
|
||||
),
|
||||
|
||||
TP_printk(" path %3u %c %c dup %2i locked %u", __entry->idx,
|
||||
__entry->preserve ? 'P' : ' ',
|
||||
__entry->should_be_locked ? 'S' : ' ',
|
||||
__entry->dup,
|
||||
__entry->dup_locked)
|
||||
);
|
||||
|
||||
TRACE_EVENT(btree_path_free_trans_begin,
|
||||
TP_PROTO(btree_path_idx_t path),
|
||||
TP_ARGS(path),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(btree_path_idx_t, idx )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->idx = path;
|
||||
),
|
||||
|
||||
TP_printk(" path %3u", __entry->idx)
|
||||
);
|
||||
|
||||
#else /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
|
||||
#ifndef _TRACE_BCACHEFS_H
|
||||
|
||||
static inline void trace_update_by_path(struct btree_trans *trans, struct btree_path *path,
|
||||
struct btree_insert_entry *i, bool overwrite) {}
|
||||
static inline void trace_btree_path_lock(struct btree_trans *trans, unsigned long caller_ip, struct btree_bkey_cached_common *b) {}
|
||||
static inline void trace_btree_path_get_ll(struct btree_trans *trans, struct btree_path *path) {}
|
||||
static inline void trace_btree_path_put_ll(struct btree_trans *trans, struct btree_path *path) {}
|
||||
static inline void trace_btree_path_should_be_locked(struct btree_trans *trans, struct btree_path *path) {}
|
||||
static inline void trace_btree_path_alloc(struct btree_trans *trans, struct btree_path *path) {}
|
||||
static inline void trace_btree_path_get(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
|
||||
static inline void trace_btree_path_clone(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
|
||||
static inline void trace_btree_path_save_pos(struct btree_trans *trans, struct btree_path *path, struct btree_path *new) {}
|
||||
static inline void trace_btree_path_traverse_start(struct btree_trans *trans, struct btree_path *path) {}
|
||||
static inline void trace_btree_path_traverse_end(struct btree_trans *trans, struct btree_path *path) {}
|
||||
static inline void trace_btree_path_set_pos(struct btree_trans *trans, struct btree_path *path, struct bpos *new_pos) {}
|
||||
static inline void trace_btree_path_free(struct btree_trans *trans, btree_path_idx_t path, struct btree_path *dup) {}
|
||||
static inline void trace_btree_path_free_trans_begin(btree_path_idx_t path) {}
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_BCACHEFS_PATH_TRACEPOINTS */
|
||||
|
||||
#define _TRACE_BCACHEFS_H
|
||||
#endif /* _TRACE_BCACHEFS_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
Loading…
Reference in New Issue
Block a user