mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
Update bcachefs sources to b1708f0191 bcachefs: Fix a null ptr deref in bch2_btree_iter_traverse_one()
This commit is contained in:
parent
f8f84d9388
commit
0243e4d41c
@ -1 +1 @@
|
||||
8a316f4112c5aa4b3c2c4745fa83475dbec8a959
|
||||
b1708f0191fcad1b7afa47dd6a7c6b1104c4639d
|
||||
|
@ -501,6 +501,7 @@ static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
|
||||
static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
unsigned long gc_count = c->gc_count;
|
||||
u64 available;
|
||||
int ret = 0;
|
||||
|
||||
ca->allocator_state = ALLOCATOR_BLOCKED;
|
||||
@ -516,9 +517,11 @@ static int wait_buckets_available(struct bch_fs *c, struct bch_dev *ca)
|
||||
if (gc_count != c->gc_count)
|
||||
ca->inc_gen_really_needs_gc = 0;
|
||||
|
||||
if ((ssize_t) (dev_buckets_available(c, ca) -
|
||||
ca->inc_gen_really_needs_gc) >=
|
||||
(ssize_t) fifo_free(&ca->free_inc))
|
||||
available = max_t(s64, 0, dev_buckets_available(c, ca) -
|
||||
ca->inc_gen_really_needs_gc);
|
||||
|
||||
if (available > fifo_free(&ca->free_inc) ||
|
||||
(available && !fifo_full(&ca->free[RESERVE_BTREE])))
|
||||
break;
|
||||
|
||||
up_read(&c->gc_lock);
|
||||
@ -1101,6 +1104,8 @@ static int bch2_allocator_thread(void *arg)
|
||||
|
||||
while (1) {
|
||||
cond_resched();
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
pr_debug("discarding %zu invalidated buckets",
|
||||
fifo_used(&ca->free_inc));
|
||||
|
@ -339,7 +339,7 @@ enum bch_time_stats {
|
||||
#define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
|
||||
|
||||
/* Size of the freelist we allocate btree nodes from: */
|
||||
#define BTREE_NODE_RESERVE BTREE_RESERVE_MAX
|
||||
#define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
|
||||
|
||||
#define BTREE_NODE_OPEN_BUCKET_RESERVE (BTREE_RESERVE_MAX * BCH_REPLICAS_MAX)
|
||||
|
||||
|
@ -275,9 +275,13 @@ struct bch_ioctl_dev_usage {
|
||||
|
||||
__u32 bucket_size;
|
||||
__u64 nr_buckets;
|
||||
__u64 available_buckets;
|
||||
|
||||
__u64 buckets[BCH_DATA_NR];
|
||||
__u64 sectors[BCH_DATA_NR];
|
||||
|
||||
__u64 ec_buckets;
|
||||
__u64 ec_sectors;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -64,21 +64,27 @@ struct bset_tree *bch2_bkey_to_bset(struct btree *b, struct bkey_packed *k)
|
||||
* by the time we actually do the insert will all be deleted.
|
||||
*/
|
||||
|
||||
void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
|
||||
void bch2_dump_bset(struct bch_fs *c, struct btree *b,
|
||||
struct bset *i, unsigned set)
|
||||
{
|
||||
struct bkey_packed *_k, *_n;
|
||||
struct bkey k, n;
|
||||
char buf[120];
|
||||
struct bkey uk, n;
|
||||
struct bkey_s_c k;
|
||||
char buf[200];
|
||||
|
||||
if (!i->u64s)
|
||||
return;
|
||||
|
||||
for (_k = i->start, k = bkey_unpack_key(b, _k);
|
||||
for (_k = i->start;
|
||||
_k < vstruct_last(i);
|
||||
_k = _n, k = n) {
|
||||
_k = _n) {
|
||||
_n = bkey_next_skip_noops(_k, vstruct_last(i));
|
||||
|
||||
bch2_bkey_to_text(&PBUF(buf), &k);
|
||||
k = bkey_disassemble(b, _k, &uk);
|
||||
if (c)
|
||||
bch2_bkey_val_to_text(&PBUF(buf), c, k);
|
||||
else
|
||||
bch2_bkey_to_text(&PBUF(buf), k.k);
|
||||
printk(KERN_ERR "block %u key %5zu: %s\n", set,
|
||||
_k->_data - i->_data, buf);
|
||||
|
||||
@ -87,31 +93,24 @@ void bch2_dump_bset(struct btree *b, struct bset *i, unsigned set)
|
||||
|
||||
n = bkey_unpack_key(b, _n);
|
||||
|
||||
if (bkey_cmp(bkey_start_pos(&n), k.p) < 0) {
|
||||
if (bkey_cmp(bkey_start_pos(&n), k.k->p) < 0) {
|
||||
printk(KERN_ERR "Key skipped backwards\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* Weird check for duplicate non extent keys: extents are
|
||||
* deleted iff they have 0 size, so if it has zero size and it's
|
||||
* not deleted these aren't extents:
|
||||
*/
|
||||
if (((!k.size && !bkey_deleted(&k)) ||
|
||||
(!n.size && !bkey_deleted(&n))) &&
|
||||
!bkey_deleted(&k) &&
|
||||
!bkey_cmp(n.p, k.p))
|
||||
if (!bkey_deleted(k.k) &&
|
||||
!bkey_cmp(n.p, k.k->p))
|
||||
printk(KERN_ERR "Duplicate keys\n");
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_dump_btree_node(struct btree *b)
|
||||
void bch2_dump_btree_node(struct bch_fs *c, struct btree *b)
|
||||
{
|
||||
struct bset_tree *t;
|
||||
|
||||
console_lock();
|
||||
for_each_bset(b, t)
|
||||
bch2_dump_bset(b, bset(b, t), t - b->set);
|
||||
bch2_dump_bset(c, b, bset(b, t), t - b->set);
|
||||
console_unlock();
|
||||
}
|
||||
|
||||
@ -170,7 +169,7 @@ static void bch2_btree_node_iter_next_check(struct btree_node_iter *_iter,
|
||||
struct bkey nu = bkey_unpack_key(b, n);
|
||||
char buf1[80], buf2[80];
|
||||
|
||||
bch2_dump_btree_node(b);
|
||||
bch2_dump_btree_node(NULL, b);
|
||||
bch2_bkey_to_text(&PBUF(buf1), &ku);
|
||||
bch2_bkey_to_text(&PBUF(buf2), &nu);
|
||||
printk(KERN_ERR "out of order/overlapping:\n%s\n%s\n",
|
||||
@ -248,7 +247,7 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
|
||||
char buf1[100];
|
||||
char buf2[100];
|
||||
|
||||
bch2_dump_btree_node(b);
|
||||
bch2_dump_btree_node(NULL, b);
|
||||
bch2_bkey_to_text(&PBUF(buf1), &k1);
|
||||
bch2_bkey_to_text(&PBUF(buf2), &k2);
|
||||
|
||||
@ -269,7 +268,7 @@ void bch2_verify_insert_pos(struct btree *b, struct bkey_packed *where,
|
||||
char buf1[100];
|
||||
char buf2[100];
|
||||
|
||||
bch2_dump_btree_node(b);
|
||||
bch2_dump_btree_node(NULL, b);
|
||||
bch2_bkey_to_text(&PBUF(buf1), &k1);
|
||||
bch2_bkey_to_text(&PBUF(buf2), &k2);
|
||||
|
||||
|
@ -600,8 +600,8 @@ void bch2_bfloat_to_text(struct printbuf *, struct btree *,
|
||||
|
||||
/* Debug stuff */
|
||||
|
||||
void bch2_dump_bset(struct btree *, struct bset *, unsigned);
|
||||
void bch2_dump_btree_node(struct btree *);
|
||||
void bch2_dump_bset(struct bch_fs *, struct btree *, struct bset *, unsigned);
|
||||
void bch2_dump_btree_node(struct bch_fs *, struct btree *);
|
||||
void bch2_dump_btree_node_iter(struct btree *, struct btree_node_iter *);
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
|
@ -309,7 +309,7 @@ restart:
|
||||
if (freed >= nr)
|
||||
goto out;
|
||||
|
||||
if (sc->gfp_mask & __GFP_IO)
|
||||
if (sc->gfp_mask & __GFP_FS)
|
||||
mutex_lock(&bc->lock);
|
||||
else if (!mutex_trylock(&bc->lock))
|
||||
goto out;
|
||||
|
@ -902,6 +902,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id id)
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const struct bch_extent_ptr *ptr;
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket *g = PTR_BUCKET(ca, ptr, false);
|
||||
@ -914,6 +915,7 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id id)
|
||||
|
||||
}
|
||||
}
|
||||
percpu_up_read(&c->mark_lock);
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
@ -923,17 +925,25 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id id)
|
||||
int bch2_gc_gens(struct bch_fs *c)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
struct bucket_array *buckets;
|
||||
struct bucket *g;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
down_read(&c->state_lock);
|
||||
/*
|
||||
* Ideally we would be using state_lock and not gc_lock here, but that
|
||||
* introduces a deadlock in the RO path - we currently take the state
|
||||
* lock at the start of going RO, thus the gc thread may get stuck:
|
||||
*/
|
||||
down_read(&c->gc_lock);
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
struct bucket_array *buckets = bucket_array(ca);
|
||||
struct bucket *g;
|
||||
down_read(&ca->bucket_lock);
|
||||
buckets = bucket_array(ca);
|
||||
|
||||
for_each_bucket(g, buckets)
|
||||
g->gc_gen = g->mark.gen;
|
||||
up_read(&ca->bucket_lock);
|
||||
}
|
||||
|
||||
for (i = 0; i < BTREE_ID_NR; i++)
|
||||
@ -944,14 +954,15 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
}
|
||||
|
||||
for_each_member_device(ca, c, i) {
|
||||
struct bucket_array *buckets = bucket_array(ca);
|
||||
struct bucket *g;
|
||||
down_read(&ca->bucket_lock);
|
||||
buckets = bucket_array(ca);
|
||||
|
||||
for_each_bucket(g, buckets)
|
||||
g->oldest_gen = g->gc_gen;
|
||||
up_read(&ca->bucket_lock);
|
||||
}
|
||||
err:
|
||||
up_read(&c->state_lock);
|
||||
up_read(&c->gc_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -897,7 +897,7 @@ static int validate_bset_keys(struct bch_fs *c, struct btree *b,
|
||||
bch2_bkey_to_text(&PBUF(buf1), &up);
|
||||
bch2_bkey_to_text(&PBUF(buf2), u.k);
|
||||
|
||||
bch2_dump_bset(b, i, 0);
|
||||
bch2_dump_bset(c, b, i, 0);
|
||||
btree_err(BTREE_ERR_FATAL, c, b, i,
|
||||
"keys out of order: %s > %s",
|
||||
buf1, buf2);
|
||||
|
@ -185,6 +185,14 @@ static inline bool btree_iter_get_locks(struct btree_iter *iter,
|
||||
return iter->uptodate < BTREE_ITER_NEED_RELOCK;
|
||||
}
|
||||
|
||||
static struct bpos btree_node_pos(struct btree_bkey_cached_common *_b,
|
||||
enum btree_iter_type type)
|
||||
{
|
||||
return type != BTREE_ITER_CACHED
|
||||
? container_of(_b, struct btree, c)->key.k.p
|
||||
: container_of(_b, struct bkey_cached, c)->key.pos;
|
||||
}
|
||||
|
||||
/* Slowpath: */
|
||||
bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
|
||||
unsigned level, struct btree_iter *iter,
|
||||
@ -253,7 +261,8 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
|
||||
|
||||
if (iter->btree_id == linked->btree_id &&
|
||||
btree_node_locked(linked, level) &&
|
||||
bkey_cmp(pos, linked->l[level].b->key.k.p) <= 0)
|
||||
bkey_cmp(pos, btree_node_pos((void *) linked->l[level].b,
|
||||
btree_iter_type(linked))) <= 0)
|
||||
ret = false;
|
||||
|
||||
/*
|
||||
@ -435,6 +444,22 @@ void bch2_trans_unlock(struct btree_trans *trans)
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
|
||||
static void bch2_btree_iter_verify_cached(struct btree_iter *iter)
|
||||
{
|
||||
struct bkey_cached *ck;
|
||||
bool locked = btree_node_locked(iter, 0);
|
||||
|
||||
if (!bch2_btree_node_relock(iter, 0))
|
||||
return;
|
||||
|
||||
ck = (void *) iter->l[0].b;
|
||||
BUG_ON(ck->key.btree_id != iter->btree_id ||
|
||||
bkey_cmp(ck->key.pos, iter->pos));
|
||||
|
||||
if (!locked)
|
||||
btree_node_unlock(iter, 0);
|
||||
}
|
||||
|
||||
static void bch2_btree_iter_verify_level(struct btree_iter *iter,
|
||||
unsigned level)
|
||||
{
|
||||
@ -449,6 +474,12 @@ static void bch2_btree_iter_verify_level(struct btree_iter *iter,
|
||||
if (!debug_check_iterators(iter->trans->c))
|
||||
return;
|
||||
|
||||
if (btree_iter_type(iter) == BTREE_ITER_CACHED) {
|
||||
if (!level)
|
||||
bch2_btree_iter_verify_cached(iter);
|
||||
return;
|
||||
}
|
||||
|
||||
BUG_ON(iter->level < iter->min_depth);
|
||||
|
||||
if (!btree_iter_node(iter, level))
|
||||
@ -1204,7 +1235,7 @@ static int btree_iter_traverse_one(struct btree_iter *iter)
|
||||
*
|
||||
* XXX correctly using BTREE_ITER_UPTODATE should make this unnecessary
|
||||
*/
|
||||
if (btree_iter_node(iter, iter->level)) {
|
||||
if (is_btree_node(iter, iter->level)) {
|
||||
BUG_ON(!btree_iter_pos_in_node(iter, iter->l[iter->level].b));
|
||||
|
||||
btree_iter_advance_to_pos(iter, &iter->l[iter->level], -1);
|
||||
@ -1257,13 +1288,14 @@ int __must_check __bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void bch2_btree_iter_checks(struct btree_iter *iter,
|
||||
enum btree_iter_type type)
|
||||
static inline void bch2_btree_iter_checks(struct btree_iter *iter)
|
||||
{
|
||||
EBUG_ON(iter->btree_id >= BTREE_ID_NR);
|
||||
EBUG_ON(btree_iter_type(iter) != type);
|
||||
enum btree_iter_type type = btree_iter_type(iter);
|
||||
|
||||
BUG_ON(type == BTREE_ITER_KEYS &&
|
||||
EBUG_ON(iter->btree_id >= BTREE_ID_NR);
|
||||
|
||||
BUG_ON((type == BTREE_ITER_KEYS ||
|
||||
type == BTREE_ITER_CACHED) &&
|
||||
(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
|
||||
bkey_cmp(iter->pos, iter->k.p) > 0));
|
||||
|
||||
@ -1278,7 +1310,8 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
|
||||
struct btree *b;
|
||||
int ret;
|
||||
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_NODES);
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
|
||||
bch2_btree_iter_checks(iter);
|
||||
|
||||
if (iter->uptodate == BTREE_ITER_UPTODATE)
|
||||
return iter->l[iter->level].b;
|
||||
@ -1306,7 +1339,8 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
struct btree *b;
|
||||
int ret;
|
||||
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_NODES);
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_NODES);
|
||||
bch2_btree_iter_checks(iter);
|
||||
|
||||
/* already got to end? */
|
||||
if (!btree_iter_node(iter, iter->level))
|
||||
@ -1534,7 +1568,8 @@ struct bkey_s_c bch2_btree_iter_peek(struct btree_iter *iter)
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
|
||||
bch2_btree_iter_checks(iter);
|
||||
|
||||
if (iter->uptodate == BTREE_ITER_UPTODATE &&
|
||||
!bkey_deleted(&iter->k))
|
||||
@ -1621,7 +1656,8 @@ struct bkey_s_c bch2_btree_iter_peek_with_updates(struct btree_iter *iter)
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
|
||||
bch2_btree_iter_checks(iter);
|
||||
|
||||
while (1) {
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
@ -1681,7 +1717,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
|
||||
bch2_btree_iter_checks(iter);
|
||||
|
||||
if (iter->uptodate == BTREE_ITER_UPTODATE &&
|
||||
!bkey_deleted(&iter->k))
|
||||
@ -1717,7 +1754,8 @@ struct bkey_s_c bch2_btree_iter_prev(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = bkey_start_pos(&iter->k);
|
||||
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
|
||||
bch2_btree_iter_checks(iter);
|
||||
|
||||
if (unlikely(!bkey_cmp(pos, POS_MIN)))
|
||||
return bkey_s_c_null;
|
||||
@ -1798,7 +1836,8 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_KEYS);
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_KEYS);
|
||||
bch2_btree_iter_checks(iter);
|
||||
|
||||
if (iter->uptodate == BTREE_ITER_UPTODATE)
|
||||
return btree_iter_peek_uptodate(iter);
|
||||
@ -1844,7 +1883,8 @@ struct bkey_s_c bch2_btree_iter_peek_cached(struct btree_iter *iter)
|
||||
struct bkey_cached *ck;
|
||||
int ret;
|
||||
|
||||
bch2_btree_iter_checks(iter, BTREE_ITER_CACHED);
|
||||
EBUG_ON(btree_iter_type(iter) != BTREE_ITER_CACHED);
|
||||
bch2_btree_iter_checks(iter);
|
||||
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
if (unlikely(ret))
|
||||
@ -2323,6 +2363,15 @@ int bch2_trans_exit(struct btree_trans *trans)
|
||||
return trans->error ? -EIO : 0;
|
||||
}
|
||||
|
||||
static void bch2_btree_iter_node_to_text(struct printbuf *out,
|
||||
struct btree_bkey_cached_common *_b,
|
||||
enum btree_iter_type type)
|
||||
{
|
||||
pr_buf(out, " %px l=%u %s:",
|
||||
_b, _b->level, bch2_btree_ids[_b->btree_id]);
|
||||
bch2_bpos_to_text(out, btree_node_pos(_b, type));
|
||||
}
|
||||
|
||||
void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
{
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
@ -2347,11 +2396,11 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
|
||||
for (l = 0; l < BTREE_MAX_DEPTH; l++) {
|
||||
if (btree_node_locked(iter, l)) {
|
||||
b = iter->l[l].b;
|
||||
|
||||
pr_buf(out, " %px %s l=%u ",
|
||||
b, btree_node_intent_locked(iter, l) ? "i" : "r", l);
|
||||
bch2_bpos_to_text(out, b->key.k.p);
|
||||
pr_buf(out, " %s l=%u ",
|
||||
btree_node_intent_locked(iter, l) ? "i" : "r", l);
|
||||
bch2_btree_iter_node_to_text(out,
|
||||
(void *) iter->l[l].b,
|
||||
btree_iter_type(iter));
|
||||
pr_buf(out, "\n");
|
||||
}
|
||||
}
|
||||
@ -2365,10 +2414,11 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
bch2_btree_ids[trans->locking_btree_id]);
|
||||
bch2_bpos_to_text(out, trans->locking_pos);
|
||||
|
||||
pr_buf(out, " node %px l=%u %s:",
|
||||
b, b->c.level,
|
||||
bch2_btree_ids[b->c.btree_id]);
|
||||
bch2_bpos_to_text(out, b->key.k.p);
|
||||
|
||||
pr_buf(out, " node ");
|
||||
bch2_btree_iter_node_to_text(out,
|
||||
(void *) b,
|
||||
btree_iter_type(&trans->iters[trans->locking_iter_idx]));
|
||||
pr_buf(out, "\n");
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "btree_cache.h"
|
||||
#include "btree_iter.h"
|
||||
#include "btree_key_cache.h"
|
||||
#include "btree_locking.h"
|
||||
@ -492,3 +493,27 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *c)
|
||||
{
|
||||
return rhashtable_init(&c->table, &bch2_btree_key_cache_params);
|
||||
}
|
||||
|
||||
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
|
||||
{
|
||||
struct bucket_table *tbl;
|
||||
struct bkey_cached *ck;
|
||||
struct rhash_head *pos;
|
||||
size_t i;
|
||||
|
||||
mutex_lock(&c->lock);
|
||||
tbl = rht_dereference_rcu(c->table.tbl, &c->table);
|
||||
|
||||
for (i = 0; i < tbl->size; i++) {
|
||||
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
|
||||
pr_buf(out, "%s:",
|
||||
bch2_btree_ids[ck->key.btree_id]);
|
||||
bch2_bpos_to_text(out, ck->key.pos);
|
||||
|
||||
if (test_bit(BKEY_CACHED_DIRTY, &ck->flags))
|
||||
pr_buf(out, " journal seq %llu", ck->journal.seq);
|
||||
pr_buf(out, "\n");
|
||||
}
|
||||
}
|
||||
mutex_unlock(&c->lock);
|
||||
}
|
||||
|
@ -20,4 +20,6 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *);
|
||||
void bch2_fs_btree_key_cache_init_early(struct btree_key_cache *);
|
||||
int bch2_fs_btree_key_cache_init(struct btree_key_cache *);
|
||||
|
||||
void bch2_btree_key_cache_to_text(struct printbuf *, struct btree_key_cache *);
|
||||
|
||||
#endif /* _BCACHEFS_BTREE_KEY_CACHE_H */
|
||||
|
@ -1398,14 +1398,14 @@ int bch2_btree_split_leaf(struct bch_fs *c, struct btree_iter *iter,
|
||||
struct btree_update *as;
|
||||
struct closure cl;
|
||||
int ret = 0;
|
||||
struct btree_iter *linked;
|
||||
struct btree_insert_entry *i;
|
||||
|
||||
/*
|
||||
* We already have a disk reservation and open buckets pinned; this
|
||||
* allocation must not block:
|
||||
*/
|
||||
trans_for_each_iter(trans, linked)
|
||||
if (linked->btree_id == BTREE_ID_EXTENTS)
|
||||
trans_for_each_update(trans, i)
|
||||
if (btree_node_type_needs_gc(i->iter->btree_id))
|
||||
flags |= BTREE_INSERT_USE_RESERVE;
|
||||
|
||||
closure_init_stack(&cl);
|
||||
|
@ -374,6 +374,11 @@ static inline int is_fragmented_bucket(struct bucket_mark m,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int bucket_stripe_sectors(struct bucket_mark m)
|
||||
{
|
||||
return m.stripe ? m.dirty_sectors : 0;
|
||||
}
|
||||
|
||||
static inline enum bch_data_type bucket_type(struct bucket_mark m)
|
||||
{
|
||||
return m.cached_sectors && !m.dirty_sectors
|
||||
@ -441,33 +446,35 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
|
||||
struct bucket_mark old, struct bucket_mark new,
|
||||
bool gc)
|
||||
{
|
||||
struct bch_dev_usage *dev_usage;
|
||||
struct bch_dev_usage *u;
|
||||
|
||||
percpu_rwsem_assert_held(&c->mark_lock);
|
||||
|
||||
preempt_disable();
|
||||
dev_usage = this_cpu_ptr(ca->usage[gc]);
|
||||
u = this_cpu_ptr(ca->usage[gc]);
|
||||
|
||||
if (bucket_type(old))
|
||||
account_bucket(fs_usage, dev_usage, bucket_type(old),
|
||||
account_bucket(fs_usage, u, bucket_type(old),
|
||||
-1, -ca->mi.bucket_size);
|
||||
|
||||
if (bucket_type(new))
|
||||
account_bucket(fs_usage, dev_usage, bucket_type(new),
|
||||
account_bucket(fs_usage, u, bucket_type(new),
|
||||
1, ca->mi.bucket_size);
|
||||
|
||||
dev_usage->buckets_alloc +=
|
||||
u->buckets_alloc +=
|
||||
(int) new.owned_by_allocator - (int) old.owned_by_allocator;
|
||||
dev_usage->buckets_ec +=
|
||||
(int) new.stripe - (int) old.stripe;
|
||||
dev_usage->buckets_unavailable +=
|
||||
u->buckets_unavailable +=
|
||||
is_unavailable_bucket(new) - is_unavailable_bucket(old);
|
||||
|
||||
dev_usage->sectors[old.data_type] -= old.dirty_sectors;
|
||||
dev_usage->sectors[new.data_type] += new.dirty_sectors;
|
||||
dev_usage->sectors[BCH_DATA_CACHED] +=
|
||||
u->buckets_ec += (int) new.stripe - (int) old.stripe;
|
||||
u->sectors_ec += bucket_stripe_sectors(new) -
|
||||
bucket_stripe_sectors(old);
|
||||
|
||||
u->sectors[old.data_type] -= old.dirty_sectors;
|
||||
u->sectors[new.data_type] += new.dirty_sectors;
|
||||
u->sectors[BCH_DATA_CACHED] +=
|
||||
(int) new.cached_sectors - (int) old.cached_sectors;
|
||||
dev_usage->sectors_fragmented +=
|
||||
u->sectors_fragmented +=
|
||||
is_fragmented_bucket(new, ca) - is_fragmented_bucket(old, ca);
|
||||
preempt_enable();
|
||||
|
||||
@ -1993,8 +2000,6 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
int ret = -ENOMEM;
|
||||
unsigned i;
|
||||
|
||||
lockdep_assert_held(&c->state_lock);
|
||||
|
||||
memset(&free, 0, sizeof(free));
|
||||
memset(&free_inc, 0, sizeof(free_inc));
|
||||
memset(&alloc_heap, 0, sizeof(alloc_heap));
|
||||
@ -2021,6 +2026,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
bch2_copygc_stop(ca);
|
||||
|
||||
if (resize) {
|
||||
down_write(&c->gc_lock);
|
||||
down_write(&ca->bucket_lock);
|
||||
percpu_down_write(&c->mark_lock);
|
||||
}
|
||||
@ -2043,8 +2049,10 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
|
||||
|
||||
swap(ca->buckets_nouse, buckets_nouse);
|
||||
|
||||
if (resize)
|
||||
if (resize) {
|
||||
percpu_up_write(&c->mark_lock);
|
||||
up_write(&c->gc_lock);
|
||||
}
|
||||
|
||||
spin_lock(&c->freelist_lock);
|
||||
for (i = 0; i < RESERVE_NR; i++) {
|
||||
|
@ -53,12 +53,14 @@ struct bucket_array {
|
||||
struct bch_dev_usage {
|
||||
u64 buckets[BCH_DATA_NR];
|
||||
u64 buckets_alloc;
|
||||
u64 buckets_ec;
|
||||
u64 buckets_unavailable;
|
||||
|
||||
/* _compressed_ sectors: */
|
||||
u64 sectors[BCH_DATA_NR];
|
||||
u64 sectors_fragmented;
|
||||
|
||||
u64 buckets_ec;
|
||||
u64 sectors_ec;
|
||||
};
|
||||
|
||||
struct bch_fs_usage {
|
||||
|
@ -470,9 +470,12 @@ static long bch2_ioctl_dev_usage(struct bch_fs *c,
|
||||
|
||||
src = bch2_dev_usage_read(c, ca);
|
||||
|
||||
arg.state = ca->mi.state;
|
||||
arg.bucket_size = ca->mi.bucket_size;
|
||||
arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
|
||||
arg.state = ca->mi.state;
|
||||
arg.bucket_size = ca->mi.bucket_size;
|
||||
arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket;
|
||||
arg.available_buckets = arg.nr_buckets - src.buckets_unavailable;
|
||||
arg.ec_buckets = src.buckets_ec;
|
||||
arg.ec_sectors = src.sectors_ec;
|
||||
|
||||
for (i = 0; i < BCH_DATA_NR; i++) {
|
||||
arg.buckets[i] = src.buckets[i];
|
||||
|
@ -97,10 +97,10 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
|
||||
console_lock();
|
||||
|
||||
printk(KERN_ERR "*** in memory:\n");
|
||||
bch2_dump_bset(b, inmemory, 0);
|
||||
bch2_dump_bset(c, b, inmemory, 0);
|
||||
|
||||
printk(KERN_ERR "*** read back in:\n");
|
||||
bch2_dump_bset(v, sorted, 0);
|
||||
bch2_dump_bset(c, v, sorted, 0);
|
||||
|
||||
while (offset < b->written) {
|
||||
if (!offset ) {
|
||||
@ -117,7 +117,7 @@ void __bch2_btree_verify(struct bch_fs *c, struct btree *b)
|
||||
}
|
||||
|
||||
printk(KERN_ERR "*** on disk block %u:\n", offset);
|
||||
bch2_dump_bset(b, i, offset);
|
||||
bch2_dump_bset(c, b, i, offset);
|
||||
|
||||
offset += sectors;
|
||||
}
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include "btree_cache.h"
|
||||
#include "btree_io.h"
|
||||
#include "btree_iter.h"
|
||||
#include "btree_key_cache.h"
|
||||
#include "btree_update.h"
|
||||
#include "btree_update_interior.h"
|
||||
#include "btree_gc.h"
|
||||
@ -165,6 +166,7 @@ read_attribute(journal_debug);
|
||||
read_attribute(journal_pins);
|
||||
read_attribute(btree_updates);
|
||||
read_attribute(dirty_btree_nodes);
|
||||
read_attribute(btree_key_cache);
|
||||
read_attribute(btree_transactions);
|
||||
|
||||
read_attribute(internal_uuid);
|
||||
@ -401,6 +403,14 @@ SHOW(bch2_fs)
|
||||
|
||||
if (attr == &sysfs_dirty_btree_nodes)
|
||||
return bch2_dirty_btree_nodes_print(c, buf);
|
||||
|
||||
if (attr == &sysfs_btree_key_cache) {
|
||||
struct printbuf out = _PBUF(buf, PAGE_SIZE);
|
||||
|
||||
bch2_btree_key_cache_to_text(&out, &c->btree_key_cache);
|
||||
return out.pos - buf;
|
||||
}
|
||||
|
||||
if (attr == &sysfs_btree_transactions) {
|
||||
struct printbuf out = _PBUF(buf, PAGE_SIZE);
|
||||
|
||||
@ -571,6 +581,7 @@ struct attribute *bch2_fs_internal_files[] = {
|
||||
&sysfs_journal_pins,
|
||||
&sysfs_btree_updates,
|
||||
&sysfs_dirty_btree_nodes,
|
||||
&sysfs_btree_key_cache,
|
||||
&sysfs_btree_transactions,
|
||||
|
||||
&sysfs_read_realloc_races,
|
||||
@ -835,6 +846,7 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
|
||||
" meta: %llu\n"
|
||||
" user: %llu\n"
|
||||
" cached: %llu\n"
|
||||
" erasure coded: %llu\n"
|
||||
" fragmented: %llu\n"
|
||||
" copygc threshold: %llu\n"
|
||||
"freelist_wait: %s\n"
|
||||
@ -861,6 +873,7 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
|
||||
stats.sectors[BCH_DATA_BTREE],
|
||||
stats.sectors[BCH_DATA_USER],
|
||||
stats.sectors[BCH_DATA_CACHED],
|
||||
stats.sectors_ec,
|
||||
stats.sectors_fragmented,
|
||||
ca->copygc_threshold,
|
||||
c->freelist_wait.list.first ? "waiting" : "empty",
|
||||
|
Loading…
Reference in New Issue
Block a user