Update bcachefs sources to 91e6c3e0d5 bcachefs: Gap buffer for journal keys

This commit is contained in:
Kent Overstreet 2022-04-04 13:48:45 -04:00
parent 32aabbc4e5
commit 498874fdb7
13 changed files with 243 additions and 125 deletions

View File

@ -1 +1 @@
ab3b6e7dd69c5cd5dfd96fd265ade6897720f671
91e6c3e0d5ac0d29a9c97e71a1ba7abb346b4991

View File

@ -82,6 +82,26 @@ DECLARE_EVENT_CLASS(bio,
(unsigned long long)__entry->sector, __entry->nr_sector)
);
/* super-io.c: */
TRACE_EVENT(write_super,
TP_PROTO(struct bch_fs *c, unsigned long ip),
TP_ARGS(c, ip),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(unsigned long, ip )
),
TP_fast_assign(
__entry->dev = c->dev;
__entry->ip = ip;
),
TP_printk("%d,%d for %pS",
MAJOR(__entry->dev), MINOR(__entry->dev),
(void *) __entry->ip)
);
/* io.c: */
DEFINE_EVENT(bio, read_split,
@ -353,31 +373,23 @@ DEFINE_EVENT(btree_node, btree_set_root,
);
TRACE_EVENT(btree_cache_scan,
TP_PROTO(unsigned long nr_to_scan_pages,
unsigned long nr_to_scan_nodes,
unsigned long can_free_nodes,
long ret),
TP_ARGS(nr_to_scan_pages, nr_to_scan_nodes, can_free_nodes, ret),
TP_PROTO(long nr_to_scan, long can_free, long ret),
TP_ARGS(nr_to_scan, can_free, ret),
TP_STRUCT__entry(
__field(unsigned long, nr_to_scan_pages )
__field(unsigned long, nr_to_scan_nodes )
__field(unsigned long, can_free_nodes )
__field(long, nr_to_scan )
__field(long, can_free )
__field(long, ret )
),
TP_fast_assign(
__entry->nr_to_scan_pages = nr_to_scan_pages;
__entry->nr_to_scan_nodes = nr_to_scan_nodes;
__entry->can_free_nodes = can_free_nodes;
__entry->nr_to_scan = nr_to_scan;
__entry->can_free = can_free;
__entry->ret = ret;
),
TP_printk("scanned for %lu pages, %lu nodes, can free %lu nodes, ret %li",
__entry->nr_to_scan_pages,
__entry->nr_to_scan_nodes,
__entry->can_free_nodes,
__entry->ret)
TP_printk("scanned for %li nodes, can free %li, ret %li",
__entry->nr_to_scan, __entry->can_free, __entry->ret)
);
TRACE_EVENT(btree_node_relock_fail,

View File

@ -548,6 +548,12 @@ struct journal_keys {
u32 journal_seq;
u32 journal_offset;
} *d;
/*
* Gap buffer: instead of all the empty space in the array being at the
* end of the buffer - from @nr to @size - the empty space is at @gap.
* This means that sequential insertions are O(n) instead of O(n^2).
*/
size_t gap;
size_t nr;
size_t size;
u64 journal_seq_base;

View File

@ -281,7 +281,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
struct btree_cache *bc = &c->btree_cache;
struct btree *b, *t;
unsigned long nr = sc->nr_to_scan;
unsigned long can_free;
unsigned long can_free = 0;
unsigned long touched = 0;
unsigned long freed = 0;
unsigned i, flags;
@ -305,7 +305,6 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
* succeed, so that inserting keys into the btree can always succeed and
* IO can always make forward progress:
*/
nr /= btree_pages(c);
can_free = btree_cache_can_free(bc);
nr = min_t(unsigned long, nr, can_free);
@ -375,13 +374,10 @@ touched:
mutex_unlock(&bc->lock);
out:
ret = (unsigned long) freed * btree_pages(c);
ret = freed;
memalloc_nofs_restore(flags);
out_norestore:
trace_btree_cache_scan(sc->nr_to_scan,
sc->nr_to_scan / btree_pages(c),
btree_cache_can_free(bc),
ret);
trace_btree_cache_scan(sc->nr_to_scan, can_free, ret);
return ret;
}
@ -395,7 +391,7 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
if (bch2_btree_shrinker_disabled)
return 0;
return btree_cache_can_free(bc) * btree_pages(c);
return btree_cache_can_free(bc);
}
void bch2_fs_btree_cache_exit(struct bch_fs *c)
@ -482,7 +478,6 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
bc->shrink.count_objects = bch2_btree_cache_count;
bc->shrink.scan_objects = bch2_btree_cache_scan;
bc->shrink.seeks = 4;
bc->shrink.batch = btree_pages(c) * 2;
ret = register_shrinker(&bc->shrink);
out:
pr_verbose_init(c->opts, "ret %i", ret);

View File

@ -620,13 +620,13 @@ int bch2_mark_alloc(struct btree_trans *trans,
return 0;
}
void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
size_t b, enum bch_data_type data_type,
unsigned sectors, struct gc_pos pos,
unsigned flags)
{
struct bucket old, new, *g;
bool overflow;
int ret = 0;
BUG_ON(!(flags & BTREE_TRIGGER_GC));
BUG_ON(data_type != BCH_DATA_sb &&
@ -636,7 +636,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
* Backup superblock might be past the end of our normal usable space:
*/
if (b >= ca->mi.nbuckets)
return;
return 0;
percpu_down_read(&c->mark_lock);
g = gc_bucket(ca, b);
@ -644,35 +644,41 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
bucket_lock(g);
old = *g;
if (bch2_fs_inconsistent_on(g->data_type &&
g->data_type != data_type, c,
"different types of data in same bucket: %s, %s",
bch2_data_types[g->data_type],
bch2_data_types[data_type])) {
ret = -EIO;
goto err;
}
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
ca->dev_idx, b, g->gen,
bch2_data_types[g->data_type ?: data_type],
g->dirty_sectors, sectors)) {
ret = -EIO;
goto err;
}
g->data_type = data_type;
g->dirty_sectors += sectors;
overflow = g->dirty_sectors < sectors;
new = *g;
err:
bucket_unlock(g);
bch2_fs_inconsistent_on(old.data_type &&
old.data_type != data_type, c,
"different types of data in same bucket: %s, %s",
bch2_data_types[old.data_type],
bch2_data_types[data_type]);
bch2_fs_inconsistent_on(overflow, c,
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
ca->dev_idx, b, new.gen,
bch2_data_types[old.data_type ?: data_type],
old.dirty_sectors, sectors);
if (!ret)
bch2_dev_usage_update_m(c, ca, old, new, 0, true);
percpu_up_read(&c->mark_lock);
return ret;
}
static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
{
EBUG_ON(sectors < 0);
return p.crc.compression_type &&
p.crc.compression_type != BCH_COMPRESSION_TYPE_incompressible
return crc_is_compressed(p.crc)
? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
p.crc.uncompressed_size)
: sectors;
@ -808,25 +814,22 @@ static int mark_stripe_bucket(struct btree_trans *trans,
old = *g;
ret = check_bucket_ref(c, k, ptr, sectors, data_type,
new.gen, new.data_type,
new.dirty_sectors, new.cached_sectors);
if (ret) {
bucket_unlock(g);
g->gen, g->data_type,
g->dirty_sectors, g->cached_sectors);
if (ret)
goto err;
}
new.dirty_sectors += sectors;
if (data_type)
new.data_type = data_type;
g->data_type = data_type;
g->dirty_sectors += sectors;
g->stripe = k.k->p.offset;
g->stripe_redundancy = s->nr_redundant;
new = *g;
bucket_unlock(g);
bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
err:
bucket_unlock(g);
if (!ret)
bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
percpu_up_read(&c->mark_lock);
printbuf_exit(&buf);
return ret;
@ -872,29 +875,22 @@ static int bch2_mark_pointer(struct btree_trans *trans,
percpu_down_read(&c->mark_lock);
g = PTR_GC_BUCKET(ca, &p.ptr);
bucket_lock(g);
old = *g;
bucket_data_type = g->data_type;
ret = __mark_pointer(trans, k, &p.ptr, sectors,
data_type, g->gen,
&bucket_data_type,
&g->dirty_sectors,
&g->cached_sectors);
if (ret) {
bucket_unlock(g);
goto err;
}
if (!ret)
g->data_type = bucket_data_type;
new = *g;
bucket_unlock(g);
if (!ret)
bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
err:
percpu_up_read(&c->mark_lock);
return ret;

View File

@ -194,7 +194,7 @@ bch2_fs_usage_read_short(struct bch_fs *);
void bch2_fs_usage_initialize(struct bch_fs *);
void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
size_t, enum bch_data_type, unsigned,
struct gc_pos, unsigned);

View File

@ -1981,22 +1981,28 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
struct printbuf buf = PRINTBUF;
int ret;
bch2_bkey_val_to_text(&buf, c, k);
bch2_fs_inconsistent(c, "Attempting to read from stale dirty pointer: %s", buf.buf);
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
POS(ptr.dev, PTR_BUCKET_NR(ca, &ptr)),
PTR_BUCKET_POS(c, &ptr),
BTREE_ITER_CACHED);
ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
if (ret)
goto out;
pr_buf(&buf, "Attempting to read from stale dirty pointer:");
pr_indent_push(&buf, 2);
pr_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
bch_err(c, "%s", buf.buf);
bch_err(c, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
pr_newline(&buf);
pr_buf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
if (!ret) {
pr_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
}
bch2_fs_inconsistent(c, "%s", buf.buf);
bch2_trans_iter_exit(trans, &iter);
out:
printbuf_exit(&buf);
}

View File

@ -964,6 +964,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
int bch2_dev_journal_alloc(struct bch_dev *ca)
{
unsigned nr;
int ret;
if (dynamic_fault("bcachefs:add:journal_alloc"))
return -ENOMEM;
@ -980,7 +981,15 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
min(1 << 13,
(1 << 24) / ca->mi.bucket_size));
return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
if (ca->fs)
mutex_lock(&ca->fs->sb_lock);
ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
if (ca->fs)
mutex_unlock(&ca->fs->sb_lock);
return ret;
}
/* startup/shutdown: */

View File

@ -186,6 +186,7 @@ int bch2_journal_buckets_to_sb(struct bch_fs *c, struct bch_dev *ca)
struct bch_sb_field_journal_v2 *j;
unsigned i, dst = 0, nr = 1;
if (c)
lockdep_assert_held(&c->sb_lock);
if (!ja->nr) {

View File

@ -72,58 +72,97 @@ static int journal_key_cmp(const struct journal_key *l, const struct journal_key
return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
}
size_t bch2_journal_key_search(struct journal_keys *journal_keys,
static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
{
size_t gap_size = keys->size - keys->nr;
if (idx >= keys->gap)
idx += gap_size;
return idx;
}
static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
{
return keys->d + idx_to_pos(keys, idx);
}
size_t bch2_journal_key_search(struct journal_keys *keys,
enum btree_id id, unsigned level,
struct bpos pos)
{
size_t l = 0, r = journal_keys->nr, m;
size_t l = 0, r = keys->nr, m;
while (l < r) {
m = l + ((r - l) >> 1);
if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
l = m + 1;
else
r = m;
}
BUG_ON(l < journal_keys->nr &&
__journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
BUG_ON(l < keys->nr &&
__journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
BUG_ON(l &&
__journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
__journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
return l;
return idx_to_pos(keys, l);
}
struct bkey_i *bch2_journal_keys_peek(struct bch_fs *c, enum btree_id btree_id,
unsigned level, struct bpos pos)
{
struct journal_keys *keys = &c->journal_keys;
struct journal_key *end = keys->d + keys->nr;
struct journal_key *k = keys->d +
bch2_journal_key_search(keys, btree_id, level, pos);
size_t idx = bch2_journal_key_search(keys, btree_id, level, pos);
while (k < end && k->overwritten)
k++;
while (idx < keys->size &&
keys->d[idx].overwritten) {
idx++;
if (idx == keys->gap)
idx += keys->size - keys->nr;
}
if (k < end &&
k->btree_id == btree_id &&
k->level == level)
return k->k;
if (idx < keys->size &&
keys->d[idx].btree_id == btree_id &&
keys->d[idx].level == level)
return keys->d[idx].k;
return NULL;
}
static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
static void journal_iters_fix(struct bch_fs *c)
{
struct bkey_i *n = iter->keys->d[idx].k;
struct btree_and_journal_iter *biter =
container_of(iter, struct btree_and_journal_iter, journal);
struct journal_keys *keys = &c->journal_keys;
/* The key we just inserted is immediately before the gap: */
struct journal_key *n = &keys->d[keys->gap - 1];
size_t gap_end = keys->gap + (keys->size - keys->nr);
struct btree_and_journal_iter *iter;
if (iter->idx > idx ||
(iter->idx == idx &&
biter->last &&
bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
iter->idx++;
/*
* If an iterator points one after the key we just inserted,
* and the key we just inserted compares >= the iterator's position,
* decrement the iterator so it points at the key we just inserted:
*/
list_for_each_entry(iter, &c->journal_iters, journal.list)
if (iter->journal.idx == gap_end &&
iter->last &&
iter->b->c.btree_id == n->btree_id &&
iter->b->c.level == n->level &&
bpos_cmp(n->k->k.p, iter->unpacked.p) >= 0)
iter->journal.idx = keys->gap - 1;
}
static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
{
struct journal_keys *keys = &c->journal_keys;
struct journal_iter *iter;
size_t gap_size = keys->size - keys->nr;
list_for_each_entry(iter, &c->journal_iters, list) {
if (iter->idx > old_gap)
iter->idx -= gap_size;
if (iter->idx >= new_gap)
iter->idx += gap_size;
}
}
int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
@ -141,12 +180,11 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
.journal_seq = U32_MAX,
};
struct journal_keys *keys = &c->journal_keys;
struct journal_iter *iter;
size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
BUG_ON(test_bit(BCH_FS_RW, &c->flags));
if (idx < keys->nr &&
if (idx < keys->size &&
journal_key_cmp(&n, &keys->d[idx]) == 0) {
if (keys->d[idx].allocated)
kfree(keys->d[idx].k);
@ -154,6 +192,9 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
return 0;
}
if (idx > keys->gap)
idx -= keys->size - keys->nr;
if (keys->nr == keys->size) {
struct journal_keys new_keys = {
.nr = keys->nr,
@ -168,15 +209,24 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
return -ENOMEM;
}
/* Since @keys was full, there was no gap: */
memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
kvfree(keys->d);
*keys = new_keys;
/* And now the gap is at the end: */
keys->gap = keys->nr;
}
array_insert_item(keys->d, keys->nr, idx, n);
journal_iters_move_gap(c, keys->gap, idx);
list_for_each_entry(iter, &c->journal_iters, list)
journal_iter_fix(c, iter, idx);
move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
keys->gap = idx;
keys->nr++;
keys->d[keys->gap++] = n;
journal_iters_fix(c);
return 0;
}
@ -220,7 +270,7 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
struct journal_keys *keys = &c->journal_keys;
size_t idx = bch2_journal_key_search(keys, btree, level, pos);
if (idx < keys->nr &&
if (idx < keys->size &&
keys->d[idx].btree_id == btree &&
keys->d[idx].level == level &&
!bpos_cmp(keys->d[idx].k->k.p, pos))
@ -246,8 +296,11 @@ static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
static void bch2_journal_iter_advance(struct journal_iter *iter)
{
if (iter->idx < iter->keys->nr)
if (iter->idx < iter->keys->size) {
iter->idx++;
if (iter->idx == iter->keys->gap)
iter->idx += iter->keys->size - iter->keys->nr;
}
}
static void bch2_journal_iter_exit(struct journal_iter *iter)
@ -409,6 +462,9 @@ void bch2_journal_keys_free(struct journal_keys *keys)
{
struct journal_key *i;
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
keys->gap = keys->nr;
for (i = keys->d; i < keys->d + keys->nr; i++)
if (i->allocated)
kfree(i->k);
@ -478,6 +534,7 @@ static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
}
keys.nr = dst - keys.d;
keys.gap = keys.nr;
err:
return keys;
}
@ -538,6 +595,9 @@ static int bch2_journal_replay(struct bch_fs *c)
size_t i;
int ret;
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
keys->gap = keys->nr;
keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
if (!keys_sorted)
return -ENOMEM;

View File

@ -2,9 +2,6 @@
#ifndef _BCACHEFS_RECOVERY_H
#define _BCACHEFS_RECOVERY_H
#define for_each_journal_key(keys, i) \
for (i = (keys).d; i < (keys).d + (keys).nr; (i)++)
struct journal_iter {
struct list_head list;
enum btree_id btree_id;

View File

@ -21,6 +21,8 @@
#include <linux/backing-dev.h>
#include <linux/sort.h>
#include <trace/events/bcachefs.h>
const char * const bch2_sb_fields[] = {
#define x(name, nr) #name,
BCH_SB_FIELDS()
@ -797,6 +799,8 @@ int bch2_write_super(struct bch_fs *c)
unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
int ret = 0;
trace_write_super(c, _RET_IP_);
if (c->opts.very_degraded)
degraded_flags |= BCH_FORCE_IF_LOST;
@ -831,6 +835,13 @@ int bch2_write_super(struct bch_fs *c)
if (c->opts.nochanges)
goto out;
/*
* Defer writing the superblock until filesystem initialization is
* complete - don't write out a partly initialized superblock:
*/
if (!BCH_SB_INITIALIZED(c->disk_sb.sb))
goto out;
for_each_online_member(ca, c, i) {
__set_bit(ca->dev_idx, sb_written.d);
ca->sb_write_error = 0;

View File

@ -806,6 +806,31 @@ do { \
#define array_remove_item(_array, _nr, _pos) \
array_remove_items(_array, _nr, _pos, 1)
static inline void __move_gap(void *array, size_t element_size,
size_t nr, size_t size,
size_t old_gap, size_t new_gap)
{
size_t gap_end = old_gap + size - nr;
if (new_gap < old_gap) {
size_t move = old_gap - new_gap;
memmove(array + element_size * (gap_end - move),
array + element_size * (old_gap - move),
element_size * move);
} else if (new_gap > old_gap) {
size_t move = new_gap - old_gap;
memmove(array + element_size * old_gap,
array + element_size * gap_end,
element_size * move);
}
}
/* Move the gap in a gap buffer: */
#define move_gap(_array, _nr, _size, _old_gap, _new_gap) \
__move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap)
#define bubble_sort(_base, _nr, _cmp) \
do { \
ssize_t _i, _end; \