mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to 91e6c3e0d5 bcachefs: Gap buffer for journal keys
This commit is contained in:
parent
32aabbc4e5
commit
498874fdb7
@ -1 +1 @@
|
|||||||
ab3b6e7dd69c5cd5dfd96fd265ade6897720f671
|
91e6c3e0d5ac0d29a9c97e71a1ba7abb346b4991
|
||||||
|
@ -82,6 +82,26 @@ DECLARE_EVENT_CLASS(bio,
|
|||||||
(unsigned long long)__entry->sector, __entry->nr_sector)
|
(unsigned long long)__entry->sector, __entry->nr_sector)
|
||||||
);
|
);
|
||||||
|
|
||||||
|
/* super-io.c: */
|
||||||
|
TRACE_EVENT(write_super,
|
||||||
|
TP_PROTO(struct bch_fs *c, unsigned long ip),
|
||||||
|
TP_ARGS(c, ip),
|
||||||
|
|
||||||
|
TP_STRUCT__entry(
|
||||||
|
__field(dev_t, dev )
|
||||||
|
__field(unsigned long, ip )
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_fast_assign(
|
||||||
|
__entry->dev = c->dev;
|
||||||
|
__entry->ip = ip;
|
||||||
|
),
|
||||||
|
|
||||||
|
TP_printk("%d,%d for %pS",
|
||||||
|
MAJOR(__entry->dev), MINOR(__entry->dev),
|
||||||
|
(void *) __entry->ip)
|
||||||
|
);
|
||||||
|
|
||||||
/* io.c: */
|
/* io.c: */
|
||||||
|
|
||||||
DEFINE_EVENT(bio, read_split,
|
DEFINE_EVENT(bio, read_split,
|
||||||
@ -353,31 +373,23 @@ DEFINE_EVENT(btree_node, btree_set_root,
|
|||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(btree_cache_scan,
|
TRACE_EVENT(btree_cache_scan,
|
||||||
TP_PROTO(unsigned long nr_to_scan_pages,
|
TP_PROTO(long nr_to_scan, long can_free, long ret),
|
||||||
unsigned long nr_to_scan_nodes,
|
TP_ARGS(nr_to_scan, can_free, ret),
|
||||||
unsigned long can_free_nodes,
|
|
||||||
long ret),
|
|
||||||
TP_ARGS(nr_to_scan_pages, nr_to_scan_nodes, can_free_nodes, ret),
|
|
||||||
|
|
||||||
TP_STRUCT__entry(
|
TP_STRUCT__entry(
|
||||||
__field(unsigned long, nr_to_scan_pages )
|
__field(long, nr_to_scan )
|
||||||
__field(unsigned long, nr_to_scan_nodes )
|
__field(long, can_free )
|
||||||
__field(unsigned long, can_free_nodes )
|
__field(long, ret )
|
||||||
__field(long, ret )
|
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_fast_assign(
|
TP_fast_assign(
|
||||||
__entry->nr_to_scan_pages = nr_to_scan_pages;
|
__entry->nr_to_scan = nr_to_scan;
|
||||||
__entry->nr_to_scan_nodes = nr_to_scan_nodes;
|
__entry->can_free = can_free;
|
||||||
__entry->can_free_nodes = can_free_nodes;
|
__entry->ret = ret;
|
||||||
__entry->ret = ret;
|
|
||||||
),
|
),
|
||||||
|
|
||||||
TP_printk("scanned for %lu pages, %lu nodes, can free %lu nodes, ret %li",
|
TP_printk("scanned for %li nodes, can free %li, ret %li",
|
||||||
__entry->nr_to_scan_pages,
|
__entry->nr_to_scan, __entry->can_free, __entry->ret)
|
||||||
__entry->nr_to_scan_nodes,
|
|
||||||
__entry->can_free_nodes,
|
|
||||||
__entry->ret)
|
|
||||||
);
|
);
|
||||||
|
|
||||||
TRACE_EVENT(btree_node_relock_fail,
|
TRACE_EVENT(btree_node_relock_fail,
|
||||||
|
@ -548,6 +548,12 @@ struct journal_keys {
|
|||||||
u32 journal_seq;
|
u32 journal_seq;
|
||||||
u32 journal_offset;
|
u32 journal_offset;
|
||||||
} *d;
|
} *d;
|
||||||
|
/*
|
||||||
|
* Gap buffer: instead of all the empty space in the array being at the
|
||||||
|
* end of the buffer - from @nr to @size - the empty space is at @gap.
|
||||||
|
* This means that sequential insertions are O(n) instead of O(n^2).
|
||||||
|
*/
|
||||||
|
size_t gap;
|
||||||
size_t nr;
|
size_t nr;
|
||||||
size_t size;
|
size_t size;
|
||||||
u64 journal_seq_base;
|
u64 journal_seq_base;
|
||||||
|
@ -281,7 +281,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
|
|||||||
struct btree_cache *bc = &c->btree_cache;
|
struct btree_cache *bc = &c->btree_cache;
|
||||||
struct btree *b, *t;
|
struct btree *b, *t;
|
||||||
unsigned long nr = sc->nr_to_scan;
|
unsigned long nr = sc->nr_to_scan;
|
||||||
unsigned long can_free;
|
unsigned long can_free = 0;
|
||||||
unsigned long touched = 0;
|
unsigned long touched = 0;
|
||||||
unsigned long freed = 0;
|
unsigned long freed = 0;
|
||||||
unsigned i, flags;
|
unsigned i, flags;
|
||||||
@ -305,7 +305,6 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
|
|||||||
* succeed, so that inserting keys into the btree can always succeed and
|
* succeed, so that inserting keys into the btree can always succeed and
|
||||||
* IO can always make forward progress:
|
* IO can always make forward progress:
|
||||||
*/
|
*/
|
||||||
nr /= btree_pages(c);
|
|
||||||
can_free = btree_cache_can_free(bc);
|
can_free = btree_cache_can_free(bc);
|
||||||
nr = min_t(unsigned long, nr, can_free);
|
nr = min_t(unsigned long, nr, can_free);
|
||||||
|
|
||||||
@ -375,13 +374,10 @@ touched:
|
|||||||
|
|
||||||
mutex_unlock(&bc->lock);
|
mutex_unlock(&bc->lock);
|
||||||
out:
|
out:
|
||||||
ret = (unsigned long) freed * btree_pages(c);
|
ret = freed;
|
||||||
memalloc_nofs_restore(flags);
|
memalloc_nofs_restore(flags);
|
||||||
out_norestore:
|
out_norestore:
|
||||||
trace_btree_cache_scan(sc->nr_to_scan,
|
trace_btree_cache_scan(sc->nr_to_scan, can_free, ret);
|
||||||
sc->nr_to_scan / btree_pages(c),
|
|
||||||
btree_cache_can_free(bc),
|
|
||||||
ret);
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -395,7 +391,7 @@ static unsigned long bch2_btree_cache_count(struct shrinker *shrink,
|
|||||||
if (bch2_btree_shrinker_disabled)
|
if (bch2_btree_shrinker_disabled)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
return btree_cache_can_free(bc) * btree_pages(c);
|
return btree_cache_can_free(bc);
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch2_fs_btree_cache_exit(struct bch_fs *c)
|
void bch2_fs_btree_cache_exit(struct bch_fs *c)
|
||||||
@ -482,7 +478,6 @@ int bch2_fs_btree_cache_init(struct bch_fs *c)
|
|||||||
bc->shrink.count_objects = bch2_btree_cache_count;
|
bc->shrink.count_objects = bch2_btree_cache_count;
|
||||||
bc->shrink.scan_objects = bch2_btree_cache_scan;
|
bc->shrink.scan_objects = bch2_btree_cache_scan;
|
||||||
bc->shrink.seeks = 4;
|
bc->shrink.seeks = 4;
|
||||||
bc->shrink.batch = btree_pages(c) * 2;
|
|
||||||
ret = register_shrinker(&bc->shrink);
|
ret = register_shrinker(&bc->shrink);
|
||||||
out:
|
out:
|
||||||
pr_verbose_init(c->opts, "ret %i", ret);
|
pr_verbose_init(c->opts, "ret %i", ret);
|
||||||
|
@ -620,13 +620,13 @@ int bch2_mark_alloc(struct btree_trans *trans,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
int bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||||
size_t b, enum bch_data_type data_type,
|
size_t b, enum bch_data_type data_type,
|
||||||
unsigned sectors, struct gc_pos pos,
|
unsigned sectors, struct gc_pos pos,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
struct bucket old, new, *g;
|
struct bucket old, new, *g;
|
||||||
bool overflow;
|
int ret = 0;
|
||||||
|
|
||||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||||
BUG_ON(data_type != BCH_DATA_sb &&
|
BUG_ON(data_type != BCH_DATA_sb &&
|
||||||
@ -636,7 +636,7 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
|||||||
* Backup superblock might be past the end of our normal usable space:
|
* Backup superblock might be past the end of our normal usable space:
|
||||||
*/
|
*/
|
||||||
if (b >= ca->mi.nbuckets)
|
if (b >= ca->mi.nbuckets)
|
||||||
return;
|
return 0;
|
||||||
|
|
||||||
percpu_down_read(&c->mark_lock);
|
percpu_down_read(&c->mark_lock);
|
||||||
g = gc_bucket(ca, b);
|
g = gc_bucket(ca, b);
|
||||||
@ -644,37 +644,43 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
|||||||
bucket_lock(g);
|
bucket_lock(g);
|
||||||
old = *g;
|
old = *g;
|
||||||
|
|
||||||
|
if (bch2_fs_inconsistent_on(g->data_type &&
|
||||||
|
g->data_type != data_type, c,
|
||||||
|
"different types of data in same bucket: %s, %s",
|
||||||
|
bch2_data_types[g->data_type],
|
||||||
|
bch2_data_types[data_type])) {
|
||||||
|
ret = -EIO;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bch2_fs_inconsistent_on((u64) g->dirty_sectors + sectors > ca->mi.bucket_size, c,
|
||||||
|
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > bucket size",
|
||||||
|
ca->dev_idx, b, g->gen,
|
||||||
|
bch2_data_types[g->data_type ?: data_type],
|
||||||
|
g->dirty_sectors, sectors)) {
|
||||||
|
ret = -EIO;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
g->data_type = data_type;
|
g->data_type = data_type;
|
||||||
g->dirty_sectors += sectors;
|
g->dirty_sectors += sectors;
|
||||||
overflow = g->dirty_sectors < sectors;
|
|
||||||
|
|
||||||
new = *g;
|
new = *g;
|
||||||
|
err:
|
||||||
bucket_unlock(g);
|
bucket_unlock(g);
|
||||||
|
if (!ret)
|
||||||
bch2_fs_inconsistent_on(old.data_type &&
|
bch2_dev_usage_update_m(c, ca, old, new, 0, true);
|
||||||
old.data_type != data_type, c,
|
|
||||||
"different types of data in same bucket: %s, %s",
|
|
||||||
bch2_data_types[old.data_type],
|
|
||||||
bch2_data_types[data_type]);
|
|
||||||
|
|
||||||
bch2_fs_inconsistent_on(overflow, c,
|
|
||||||
"bucket %u:%zu gen %u data type %s sector count overflow: %u + %u > U16_MAX",
|
|
||||||
ca->dev_idx, b, new.gen,
|
|
||||||
bch2_data_types[old.data_type ?: data_type],
|
|
||||||
old.dirty_sectors, sectors);
|
|
||||||
|
|
||||||
bch2_dev_usage_update_m(c, ca, old, new, 0, true);
|
|
||||||
percpu_up_read(&c->mark_lock);
|
percpu_up_read(&c->mark_lock);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
|
static s64 ptr_disk_sectors(s64 sectors, struct extent_ptr_decoded p)
|
||||||
{
|
{
|
||||||
EBUG_ON(sectors < 0);
|
EBUG_ON(sectors < 0);
|
||||||
|
|
||||||
return p.crc.compression_type &&
|
return crc_is_compressed(p.crc)
|
||||||
p.crc.compression_type != BCH_COMPRESSION_TYPE_incompressible
|
|
||||||
? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
|
? DIV_ROUND_UP_ULL(sectors * p.crc.compressed_size,
|
||||||
p.crc.uncompressed_size)
|
p.crc.uncompressed_size)
|
||||||
: sectors;
|
: sectors;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -808,25 +814,22 @@ static int mark_stripe_bucket(struct btree_trans *trans,
|
|||||||
old = *g;
|
old = *g;
|
||||||
|
|
||||||
ret = check_bucket_ref(c, k, ptr, sectors, data_type,
|
ret = check_bucket_ref(c, k, ptr, sectors, data_type,
|
||||||
new.gen, new.data_type,
|
g->gen, g->data_type,
|
||||||
new.dirty_sectors, new.cached_sectors);
|
g->dirty_sectors, g->cached_sectors);
|
||||||
if (ret) {
|
if (ret)
|
||||||
bucket_unlock(g);
|
|
||||||
goto err;
|
goto err;
|
||||||
}
|
|
||||||
|
|
||||||
new.dirty_sectors += sectors;
|
|
||||||
if (data_type)
|
if (data_type)
|
||||||
new.data_type = data_type;
|
g->data_type = data_type;
|
||||||
|
g->dirty_sectors += sectors;
|
||||||
|
|
||||||
g->stripe = k.k->p.offset;
|
g->stripe = k.k->p.offset;
|
||||||
g->stripe_redundancy = s->nr_redundant;
|
g->stripe_redundancy = s->nr_redundant;
|
||||||
|
|
||||||
new = *g;
|
new = *g;
|
||||||
bucket_unlock(g);
|
|
||||||
|
|
||||||
bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
|
|
||||||
err:
|
err:
|
||||||
|
bucket_unlock(g);
|
||||||
|
if (!ret)
|
||||||
|
bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
|
||||||
percpu_up_read(&c->mark_lock);
|
percpu_up_read(&c->mark_lock);
|
||||||
printbuf_exit(&buf);
|
printbuf_exit(&buf);
|
||||||
return ret;
|
return ret;
|
||||||
@ -872,29 +875,22 @@ static int bch2_mark_pointer(struct btree_trans *trans,
|
|||||||
|
|
||||||
percpu_down_read(&c->mark_lock);
|
percpu_down_read(&c->mark_lock);
|
||||||
g = PTR_GC_BUCKET(ca, &p.ptr);
|
g = PTR_GC_BUCKET(ca, &p.ptr);
|
||||||
|
|
||||||
bucket_lock(g);
|
bucket_lock(g);
|
||||||
old = *g;
|
old = *g;
|
||||||
|
|
||||||
bucket_data_type = g->data_type;
|
bucket_data_type = g->data_type;
|
||||||
|
|
||||||
ret = __mark_pointer(trans, k, &p.ptr, sectors,
|
ret = __mark_pointer(trans, k, &p.ptr, sectors,
|
||||||
data_type, g->gen,
|
data_type, g->gen,
|
||||||
&bucket_data_type,
|
&bucket_data_type,
|
||||||
&g->dirty_sectors,
|
&g->dirty_sectors,
|
||||||
&g->cached_sectors);
|
&g->cached_sectors);
|
||||||
if (ret) {
|
if (!ret)
|
||||||
bucket_unlock(g);
|
g->data_type = bucket_data_type;
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
g->data_type = bucket_data_type;
|
|
||||||
|
|
||||||
new = *g;
|
new = *g;
|
||||||
bucket_unlock(g);
|
bucket_unlock(g);
|
||||||
|
if (!ret)
|
||||||
bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
|
bch2_dev_usage_update_m(c, ca, old, new, journal_seq, true);
|
||||||
err:
|
|
||||||
percpu_up_read(&c->mark_lock);
|
percpu_up_read(&c->mark_lock);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -194,9 +194,9 @@ bch2_fs_usage_read_short(struct bch_fs *);
|
|||||||
|
|
||||||
void bch2_fs_usage_initialize(struct bch_fs *);
|
void bch2_fs_usage_initialize(struct bch_fs *);
|
||||||
|
|
||||||
void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
|
int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
|
||||||
size_t, enum bch_data_type, unsigned,
|
size_t, enum bch_data_type, unsigned,
|
||||||
struct gc_pos, unsigned);
|
struct gc_pos, unsigned);
|
||||||
|
|
||||||
int bch2_mark_alloc(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
|
int bch2_mark_alloc(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
|
||||||
int bch2_mark_extent(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
|
int bch2_mark_extent(struct btree_trans *, struct bkey_s_c, struct bkey_s_c, unsigned);
|
||||||
|
@ -1981,22 +1981,28 @@ static noinline void read_from_stale_dirty_pointer(struct btree_trans *trans,
|
|||||||
struct printbuf buf = PRINTBUF;
|
struct printbuf buf = PRINTBUF;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
bch2_bkey_val_to_text(&buf, c, k);
|
|
||||||
bch2_fs_inconsistent(c, "Attempting to read from stale dirty pointer: %s", buf.buf);
|
|
||||||
|
|
||||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
|
||||||
POS(ptr.dev, PTR_BUCKET_NR(ca, &ptr)),
|
PTR_BUCKET_POS(c, &ptr),
|
||||||
BTREE_ITER_CACHED);
|
BTREE_ITER_CACHED);
|
||||||
|
|
||||||
ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
|
pr_buf(&buf, "Attempting to read from stale dirty pointer:");
|
||||||
if (ret)
|
pr_indent_push(&buf, 2);
|
||||||
goto out;
|
pr_newline(&buf);
|
||||||
|
|
||||||
bch2_bkey_val_to_text(&buf, c, k);
|
bch2_bkey_val_to_text(&buf, c, k);
|
||||||
bch_err(c, "%s", buf.buf);
|
pr_newline(&buf);
|
||||||
bch_err(c, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
|
|
||||||
|
pr_buf(&buf, "memory gen: %u", *bucket_gen(ca, iter.pos.offset));
|
||||||
|
|
||||||
|
ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
|
||||||
|
if (!ret) {
|
||||||
|
pr_newline(&buf);
|
||||||
|
bch2_bkey_val_to_text(&buf, c, k);
|
||||||
|
}
|
||||||
|
|
||||||
|
bch2_fs_inconsistent(c, "%s", buf.buf);
|
||||||
|
|
||||||
bch2_trans_iter_exit(trans, &iter);
|
bch2_trans_iter_exit(trans, &iter);
|
||||||
out:
|
|
||||||
printbuf_exit(&buf);
|
printbuf_exit(&buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -964,6 +964,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
|
|||||||
int bch2_dev_journal_alloc(struct bch_dev *ca)
|
int bch2_dev_journal_alloc(struct bch_dev *ca)
|
||||||
{
|
{
|
||||||
unsigned nr;
|
unsigned nr;
|
||||||
|
int ret;
|
||||||
|
|
||||||
if (dynamic_fault("bcachefs:add:journal_alloc"))
|
if (dynamic_fault("bcachefs:add:journal_alloc"))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -980,7 +981,15 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
|
|||||||
min(1 << 13,
|
min(1 << 13,
|
||||||
(1 << 24) / ca->mi.bucket_size));
|
(1 << 24) / ca->mi.bucket_size));
|
||||||
|
|
||||||
return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
|
if (ca->fs)
|
||||||
|
mutex_lock(&ca->fs->sb_lock);
|
||||||
|
|
||||||
|
ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
|
||||||
|
|
||||||
|
if (ca->fs)
|
||||||
|
mutex_unlock(&ca->fs->sb_lock);
|
||||||
|
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* startup/shutdown: */
|
/* startup/shutdown: */
|
||||||
|
@ -186,7 +186,8 @@ int bch2_journal_buckets_to_sb(struct bch_fs *c, struct bch_dev *ca)
|
|||||||
struct bch_sb_field_journal_v2 *j;
|
struct bch_sb_field_journal_v2 *j;
|
||||||
unsigned i, dst = 0, nr = 1;
|
unsigned i, dst = 0, nr = 1;
|
||||||
|
|
||||||
lockdep_assert_held(&c->sb_lock);
|
if (c)
|
||||||
|
lockdep_assert_held(&c->sb_lock);
|
||||||
|
|
||||||
if (!ja->nr) {
|
if (!ja->nr) {
|
||||||
bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
|
bch2_sb_field_delete(&ca->disk_sb, BCH_SB_FIELD_journal);
|
||||||
|
@ -72,58 +72,97 @@ static int journal_key_cmp(const struct journal_key *l, const struct journal_key
|
|||||||
return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
|
return __journal_key_cmp(l->btree_id, l->level, l->k->k.p, r);
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t bch2_journal_key_search(struct journal_keys *journal_keys,
|
static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
|
||||||
|
{
|
||||||
|
size_t gap_size = keys->size - keys->nr;
|
||||||
|
|
||||||
|
if (idx >= keys->gap)
|
||||||
|
idx += gap_size;
|
||||||
|
return idx;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
|
||||||
|
{
|
||||||
|
return keys->d + idx_to_pos(keys, idx);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t bch2_journal_key_search(struct journal_keys *keys,
|
||||||
enum btree_id id, unsigned level,
|
enum btree_id id, unsigned level,
|
||||||
struct bpos pos)
|
struct bpos pos)
|
||||||
{
|
{
|
||||||
size_t l = 0, r = journal_keys->nr, m;
|
size_t l = 0, r = keys->nr, m;
|
||||||
|
|
||||||
while (l < r) {
|
while (l < r) {
|
||||||
m = l + ((r - l) >> 1);
|
m = l + ((r - l) >> 1);
|
||||||
if (__journal_key_cmp(id, level, pos, &journal_keys->d[m]) > 0)
|
if (__journal_key_cmp(id, level, pos, idx_to_key(keys, m)) > 0)
|
||||||
l = m + 1;
|
l = m + 1;
|
||||||
else
|
else
|
||||||
r = m;
|
r = m;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(l < journal_keys->nr &&
|
BUG_ON(l < keys->nr &&
|
||||||
__journal_key_cmp(id, level, pos, &journal_keys->d[l]) > 0);
|
__journal_key_cmp(id, level, pos, idx_to_key(keys, l)) > 0);
|
||||||
|
|
||||||
BUG_ON(l &&
|
BUG_ON(l &&
|
||||||
__journal_key_cmp(id, level, pos, &journal_keys->d[l - 1]) <= 0);
|
__journal_key_cmp(id, level, pos, idx_to_key(keys, l - 1)) <= 0);
|
||||||
|
|
||||||
return l;
|
return idx_to_pos(keys, l);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bkey_i *bch2_journal_keys_peek(struct bch_fs *c, enum btree_id btree_id,
|
struct bkey_i *bch2_journal_keys_peek(struct bch_fs *c, enum btree_id btree_id,
|
||||||
unsigned level, struct bpos pos)
|
unsigned level, struct bpos pos)
|
||||||
{
|
{
|
||||||
struct journal_keys *keys = &c->journal_keys;
|
struct journal_keys *keys = &c->journal_keys;
|
||||||
struct journal_key *end = keys->d + keys->nr;
|
size_t idx = bch2_journal_key_search(keys, btree_id, level, pos);
|
||||||
struct journal_key *k = keys->d +
|
|
||||||
bch2_journal_key_search(keys, btree_id, level, pos);
|
|
||||||
|
|
||||||
while (k < end && k->overwritten)
|
while (idx < keys->size &&
|
||||||
k++;
|
keys->d[idx].overwritten) {
|
||||||
|
idx++;
|
||||||
|
if (idx == keys->gap)
|
||||||
|
idx += keys->size - keys->nr;
|
||||||
|
}
|
||||||
|
|
||||||
if (k < end &&
|
if (idx < keys->size &&
|
||||||
k->btree_id == btree_id &&
|
keys->d[idx].btree_id == btree_id &&
|
||||||
k->level == level)
|
keys->d[idx].level == level)
|
||||||
return k->k;
|
return keys->d[idx].k;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void journal_iter_fix(struct bch_fs *c, struct journal_iter *iter, unsigned idx)
|
static void journal_iters_fix(struct bch_fs *c)
|
||||||
{
|
{
|
||||||
struct bkey_i *n = iter->keys->d[idx].k;
|
struct journal_keys *keys = &c->journal_keys;
|
||||||
struct btree_and_journal_iter *biter =
|
/* The key we just inserted is immediately before the gap: */
|
||||||
container_of(iter, struct btree_and_journal_iter, journal);
|
struct journal_key *n = &keys->d[keys->gap - 1];
|
||||||
|
size_t gap_end = keys->gap + (keys->size - keys->nr);
|
||||||
|
struct btree_and_journal_iter *iter;
|
||||||
|
|
||||||
if (iter->idx > idx ||
|
/*
|
||||||
(iter->idx == idx &&
|
* If an iterator points one after the key we just inserted,
|
||||||
biter->last &&
|
* and the key we just inserted compares >= the iterator's position,
|
||||||
bpos_cmp(n->k.p, biter->unpacked.p) <= 0))
|
* decrement the iterator so it points at the key we just inserted:
|
||||||
iter->idx++;
|
*/
|
||||||
|
list_for_each_entry(iter, &c->journal_iters, journal.list)
|
||||||
|
if (iter->journal.idx == gap_end &&
|
||||||
|
iter->last &&
|
||||||
|
iter->b->c.btree_id == n->btree_id &&
|
||||||
|
iter->b->c.level == n->level &&
|
||||||
|
bpos_cmp(n->k->k.p, iter->unpacked.p) >= 0)
|
||||||
|
iter->journal.idx = keys->gap - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void journal_iters_move_gap(struct bch_fs *c, size_t old_gap, size_t new_gap)
|
||||||
|
{
|
||||||
|
struct journal_keys *keys = &c->journal_keys;
|
||||||
|
struct journal_iter *iter;
|
||||||
|
size_t gap_size = keys->size - keys->nr;
|
||||||
|
|
||||||
|
list_for_each_entry(iter, &c->journal_iters, list) {
|
||||||
|
if (iter->idx > old_gap)
|
||||||
|
iter->idx -= gap_size;
|
||||||
|
if (iter->idx >= new_gap)
|
||||||
|
iter->idx += gap_size;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
||||||
@ -141,12 +180,11 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
|||||||
.journal_seq = U32_MAX,
|
.journal_seq = U32_MAX,
|
||||||
};
|
};
|
||||||
struct journal_keys *keys = &c->journal_keys;
|
struct journal_keys *keys = &c->journal_keys;
|
||||||
struct journal_iter *iter;
|
|
||||||
size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
|
size_t idx = bch2_journal_key_search(keys, id, level, k->k.p);
|
||||||
|
|
||||||
BUG_ON(test_bit(BCH_FS_RW, &c->flags));
|
BUG_ON(test_bit(BCH_FS_RW, &c->flags));
|
||||||
|
|
||||||
if (idx < keys->nr &&
|
if (idx < keys->size &&
|
||||||
journal_key_cmp(&n, &keys->d[idx]) == 0) {
|
journal_key_cmp(&n, &keys->d[idx]) == 0) {
|
||||||
if (keys->d[idx].allocated)
|
if (keys->d[idx].allocated)
|
||||||
kfree(keys->d[idx].k);
|
kfree(keys->d[idx].k);
|
||||||
@ -154,6 +192,9 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (idx > keys->gap)
|
||||||
|
idx -= keys->size - keys->nr;
|
||||||
|
|
||||||
if (keys->nr == keys->size) {
|
if (keys->nr == keys->size) {
|
||||||
struct journal_keys new_keys = {
|
struct journal_keys new_keys = {
|
||||||
.nr = keys->nr,
|
.nr = keys->nr,
|
||||||
@ -168,15 +209,24 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
|||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Since @keys was full, there was no gap: */
|
||||||
memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
|
memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
|
||||||
kvfree(keys->d);
|
kvfree(keys->d);
|
||||||
*keys = new_keys;
|
*keys = new_keys;
|
||||||
|
|
||||||
|
/* And now the gap is at the end: */
|
||||||
|
keys->gap = keys->nr;
|
||||||
}
|
}
|
||||||
|
|
||||||
array_insert_item(keys->d, keys->nr, idx, n);
|
journal_iters_move_gap(c, keys->gap, idx);
|
||||||
|
|
||||||
list_for_each_entry(iter, &c->journal_iters, list)
|
move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
|
||||||
journal_iter_fix(c, iter, idx);
|
keys->gap = idx;
|
||||||
|
|
||||||
|
keys->nr++;
|
||||||
|
keys->d[keys->gap++] = n;
|
||||||
|
|
||||||
|
journal_iters_fix(c);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -220,7 +270,7 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
|
|||||||
struct journal_keys *keys = &c->journal_keys;
|
struct journal_keys *keys = &c->journal_keys;
|
||||||
size_t idx = bch2_journal_key_search(keys, btree, level, pos);
|
size_t idx = bch2_journal_key_search(keys, btree, level, pos);
|
||||||
|
|
||||||
if (idx < keys->nr &&
|
if (idx < keys->size &&
|
||||||
keys->d[idx].btree_id == btree &&
|
keys->d[idx].btree_id == btree &&
|
||||||
keys->d[idx].level == level &&
|
keys->d[idx].level == level &&
|
||||||
!bpos_cmp(keys->d[idx].k->k.p, pos))
|
!bpos_cmp(keys->d[idx].k->k.p, pos))
|
||||||
@ -246,8 +296,11 @@ static struct bkey_i *bch2_journal_iter_peek(struct journal_iter *iter)
|
|||||||
|
|
||||||
static void bch2_journal_iter_advance(struct journal_iter *iter)
|
static void bch2_journal_iter_advance(struct journal_iter *iter)
|
||||||
{
|
{
|
||||||
if (iter->idx < iter->keys->nr)
|
if (iter->idx < iter->keys->size) {
|
||||||
iter->idx++;
|
iter->idx++;
|
||||||
|
if (iter->idx == iter->keys->gap)
|
||||||
|
iter->idx += iter->keys->size - iter->keys->nr;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bch2_journal_iter_exit(struct journal_iter *iter)
|
static void bch2_journal_iter_exit(struct journal_iter *iter)
|
||||||
@ -409,6 +462,9 @@ void bch2_journal_keys_free(struct journal_keys *keys)
|
|||||||
{
|
{
|
||||||
struct journal_key *i;
|
struct journal_key *i;
|
||||||
|
|
||||||
|
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
|
||||||
|
keys->gap = keys->nr;
|
||||||
|
|
||||||
for (i = keys->d; i < keys->d + keys->nr; i++)
|
for (i = keys->d; i < keys->d + keys->nr; i++)
|
||||||
if (i->allocated)
|
if (i->allocated)
|
||||||
kfree(i->k);
|
kfree(i->k);
|
||||||
@ -478,6 +534,7 @@ static struct journal_keys journal_keys_sort(struct list_head *journal_entries)
|
|||||||
}
|
}
|
||||||
|
|
||||||
keys.nr = dst - keys.d;
|
keys.nr = dst - keys.d;
|
||||||
|
keys.gap = keys.nr;
|
||||||
err:
|
err:
|
||||||
return keys;
|
return keys;
|
||||||
}
|
}
|
||||||
@ -538,6 +595,9 @@ static int bch2_journal_replay(struct bch_fs *c)
|
|||||||
size_t i;
|
size_t i;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
|
||||||
|
keys->gap = keys->nr;
|
||||||
|
|
||||||
keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
|
keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
|
||||||
if (!keys_sorted)
|
if (!keys_sorted)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -2,9 +2,6 @@
|
|||||||
#ifndef _BCACHEFS_RECOVERY_H
|
#ifndef _BCACHEFS_RECOVERY_H
|
||||||
#define _BCACHEFS_RECOVERY_H
|
#define _BCACHEFS_RECOVERY_H
|
||||||
|
|
||||||
#define for_each_journal_key(keys, i) \
|
|
||||||
for (i = (keys).d; i < (keys).d + (keys).nr; (i)++)
|
|
||||||
|
|
||||||
struct journal_iter {
|
struct journal_iter {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
enum btree_id btree_id;
|
enum btree_id btree_id;
|
||||||
|
@ -21,6 +21,8 @@
|
|||||||
#include <linux/backing-dev.h>
|
#include <linux/backing-dev.h>
|
||||||
#include <linux/sort.h>
|
#include <linux/sort.h>
|
||||||
|
|
||||||
|
#include <trace/events/bcachefs.h>
|
||||||
|
|
||||||
const char * const bch2_sb_fields[] = {
|
const char * const bch2_sb_fields[] = {
|
||||||
#define x(name, nr) #name,
|
#define x(name, nr) #name,
|
||||||
BCH_SB_FIELDS()
|
BCH_SB_FIELDS()
|
||||||
@ -797,6 +799,8 @@ int bch2_write_super(struct bch_fs *c)
|
|||||||
unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
|
unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
trace_write_super(c, _RET_IP_);
|
||||||
|
|
||||||
if (c->opts.very_degraded)
|
if (c->opts.very_degraded)
|
||||||
degraded_flags |= BCH_FORCE_IF_LOST;
|
degraded_flags |= BCH_FORCE_IF_LOST;
|
||||||
|
|
||||||
@ -831,6 +835,13 @@ int bch2_write_super(struct bch_fs *c)
|
|||||||
if (c->opts.nochanges)
|
if (c->opts.nochanges)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Defer writing the superblock until filesystem initialization is
|
||||||
|
* complete - don't write out a partly initialized superblock:
|
||||||
|
*/
|
||||||
|
if (!BCH_SB_INITIALIZED(c->disk_sb.sb))
|
||||||
|
goto out;
|
||||||
|
|
||||||
for_each_online_member(ca, c, i) {
|
for_each_online_member(ca, c, i) {
|
||||||
__set_bit(ca->dev_idx, sb_written.d);
|
__set_bit(ca->dev_idx, sb_written.d);
|
||||||
ca->sb_write_error = 0;
|
ca->sb_write_error = 0;
|
||||||
|
@ -806,6 +806,31 @@ do { \
|
|||||||
#define array_remove_item(_array, _nr, _pos) \
|
#define array_remove_item(_array, _nr, _pos) \
|
||||||
array_remove_items(_array, _nr, _pos, 1)
|
array_remove_items(_array, _nr, _pos, 1)
|
||||||
|
|
||||||
|
static inline void __move_gap(void *array, size_t element_size,
|
||||||
|
size_t nr, size_t size,
|
||||||
|
size_t old_gap, size_t new_gap)
|
||||||
|
{
|
||||||
|
size_t gap_end = old_gap + size - nr;
|
||||||
|
|
||||||
|
if (new_gap < old_gap) {
|
||||||
|
size_t move = old_gap - new_gap;
|
||||||
|
|
||||||
|
memmove(array + element_size * (gap_end - move),
|
||||||
|
array + element_size * (old_gap - move),
|
||||||
|
element_size * move);
|
||||||
|
} else if (new_gap > old_gap) {
|
||||||
|
size_t move = new_gap - old_gap;
|
||||||
|
|
||||||
|
memmove(array + element_size * old_gap,
|
||||||
|
array + element_size * gap_end,
|
||||||
|
element_size * move);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Move the gap in a gap buffer: */
|
||||||
|
#define move_gap(_array, _nr, _size, _old_gap, _new_gap) \
|
||||||
|
__move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap)
|
||||||
|
|
||||||
#define bubble_sort(_base, _nr, _cmp) \
|
#define bubble_sort(_base, _nr, _cmp) \
|
||||||
do { \
|
do { \
|
||||||
ssize_t _i, _end; \
|
ssize_t _i, _end; \
|
||||||
|
Loading…
Reference in New Issue
Block a user