mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
Update bcachefs sources
This commit is contained in:
parent
c0ad33c126
commit
4921d02142
@ -1 +1 @@
|
||||
217509029551f8b40024d7fcd6e311be1fb6410c
|
||||
d37e2bb6dca5e4decf6b5918737c719fa6f17cc6
|
||||
|
@ -98,7 +98,7 @@ u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *c, struct bkey_s_c k)
|
||||
struct bch_dev *ca = c->devs[ptr->dev];
|
||||
size_t b = PTR_BUCKET_NR(ca, ptr);
|
||||
|
||||
if (__gen_after(ca->oldest_gens[b], ptr->gen))
|
||||
if (gen_after(ca->oldest_gens[b], ptr->gen))
|
||||
ca->oldest_gens[b] = ptr->gen;
|
||||
|
||||
max_stale = max(max_stale, ptr_stale(ca, ptr));
|
||||
@ -126,14 +126,40 @@ static u8 bch2_btree_mark_key(struct bch_fs *c, enum bkey_type type,
|
||||
}
|
||||
}
|
||||
|
||||
u8 bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
|
||||
struct bkey_s_c k)
|
||||
int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (k.k->type) {
|
||||
case BCH_EXTENT:
|
||||
case BCH_EXTENT_CACHED: {
|
||||
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
|
||||
const struct bch_extent_ptr *ptr;
|
||||
|
||||
extent_for_each_ptr(e, ptr) {
|
||||
struct bch_dev *ca = c->devs[ptr->dev];
|
||||
struct bucket *g = PTR_BUCKET(ca, ptr);
|
||||
|
||||
unfixable_fsck_err_on(gen_cmp(ptr->gen, g->mark.gen) > 0, c,
|
||||
"%s ptr gen in the future: %u > %u",
|
||||
type == BKEY_TYPE_BTREE
|
||||
? "btree" : "data",
|
||||
ptr->gen, g->mark.gen);
|
||||
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
atomic64_set(&c->key_version,
|
||||
max_t(u64, k.k->version.lo,
|
||||
atomic64_read(&c->key_version)));
|
||||
|
||||
return bch2_btree_mark_key(c, type, k);
|
||||
bch2_btree_mark_key(c, type, k);
|
||||
return 0;
|
||||
fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool btree_gc_mark_node(struct bch_fs *c, struct btree *b)
|
||||
@ -890,16 +916,22 @@ int bch2_gc_thread_start(struct bch_fs *c)
|
||||
|
||||
/* Initial GC computes bucket marks during startup */
|
||||
|
||||
static void bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
|
||||
static int bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct btree *b;
|
||||
struct range_checks r;
|
||||
int ret = 0;
|
||||
|
||||
btree_node_range_checks_init(&r, 0);
|
||||
|
||||
if (!c->btree_roots[id].b)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
ret = bch2_btree_mark_key_initial(c, BKEY_TYPE_BTREE,
|
||||
bkey_i_to_s_c(&c->btree_roots[id].b->key));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* We have to hit every btree node before starting journal replay, in
|
||||
@ -915,28 +947,37 @@ static void bch2_initial_gc_btree(struct bch_fs *c, enum btree_id id)
|
||||
|
||||
for_each_btree_node_key_unpack(b, k, &node_iter,
|
||||
btree_node_is_extents(b),
|
||||
&unpacked)
|
||||
bch2_btree_mark_key_initial(c, btree_node_type(b), k);
|
||||
&unpacked) {
|
||||
ret = bch2_btree_mark_key_initial(c,
|
||||
btree_node_type(b), k);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_btree_iter_cond_resched(&iter);
|
||||
}
|
||||
|
||||
err:
|
||||
bch2_btree_iter_unlock(&iter);
|
||||
|
||||
bch2_btree_mark_key(c, BKEY_TYPE_BTREE,
|
||||
bkey_i_to_s_c(&c->btree_roots[id].b->key));
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_initial_gc(struct bch_fs *c, struct list_head *journal)
|
||||
{
|
||||
enum btree_id id;
|
||||
int ret;
|
||||
|
||||
for (id = 0; id < BTREE_ID_NR; id++)
|
||||
bch2_initial_gc_btree(c, id);
|
||||
for (id = 0; id < BTREE_ID_NR; id++) {
|
||||
ret = bch2_initial_gc_btree(c, id);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (journal)
|
||||
bch2_journal_mark(c, journal);
|
||||
if (journal) {
|
||||
ret = bch2_journal_mark(c, journal);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
bch2_mark_metadata(c);
|
||||
|
||||
|
@ -11,7 +11,7 @@ void bch2_gc_thread_stop(struct bch_fs *);
|
||||
int bch2_gc_thread_start(struct bch_fs *);
|
||||
int bch2_initial_gc(struct bch_fs *, struct list_head *);
|
||||
u8 bch2_btree_key_recalc_oldest_gen(struct bch_fs *, struct bkey_s_c);
|
||||
u8 bch2_btree_mark_key_initial(struct bch_fs *, enum bkey_type,
|
||||
int bch2_btree_mark_key_initial(struct bch_fs *, enum bkey_type,
|
||||
struct bkey_s_c);
|
||||
void bch2_mark_dev_metadata(struct bch_fs *, struct bch_dev *);
|
||||
|
||||
|
@ -353,25 +353,6 @@ void bch2_mark_alloc_bucket(struct bch_dev *ca, struct bucket *g,
|
||||
}));
|
||||
}
|
||||
|
||||
void bch2_mark_metadata_bucket(struct bch_dev *ca, struct bucket *g,
|
||||
enum bucket_data_type type,
|
||||
bool may_make_unavailable)
|
||||
{
|
||||
struct bucket_mark old, new;
|
||||
|
||||
BUG_ON(!type);
|
||||
|
||||
old = bucket_data_cmpxchg(ca, g, new, ({
|
||||
new.data_type = type;
|
||||
new.had_metadata = 1;
|
||||
}));
|
||||
|
||||
BUG_ON(old.cached_sectors);
|
||||
BUG_ON(old.dirty_sectors);
|
||||
BUG_ON(!may_make_unavailable &&
|
||||
bucket_became_unavailable(ca->fs, old, new));
|
||||
}
|
||||
|
||||
#define saturated_add(ca, dst, src, max) \
|
||||
do { \
|
||||
BUG_ON((int) (dst) + (src) < 0); \
|
||||
@ -385,6 +366,32 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
void bch2_mark_metadata_bucket(struct bch_dev *ca, struct bucket *g,
|
||||
enum bucket_data_type type,
|
||||
bool may_make_unavailable)
|
||||
{
|
||||
struct bucket_mark old, new;
|
||||
|
||||
BUG_ON(!type);
|
||||
|
||||
old = bucket_data_cmpxchg(ca, g, new, ({
|
||||
saturated_add(ca, new.dirty_sectors, ca->mi.bucket_size,
|
||||
GC_MAX_SECTORS_USED);
|
||||
new.data_type = type;
|
||||
new.had_metadata = 1;
|
||||
}));
|
||||
|
||||
if (old.data_type != type &&
|
||||
(old.data_type ||
|
||||
old.cached_sectors ||
|
||||
old.dirty_sectors))
|
||||
bch_err(ca->fs, "bucket %zu has multiple types of data (%u, %u)",
|
||||
g - ca->buckets, old.data_type, new.data_type);
|
||||
|
||||
BUG_ON(!may_make_unavailable &&
|
||||
bucket_became_unavailable(ca->fs, old, new));
|
||||
}
|
||||
|
||||
#if 0
|
||||
/* Reverting this until the copygc + compression issue is fixed: */
|
||||
|
||||
@ -431,6 +438,8 @@ static void bch2_mark_pointer(struct bch_fs *c,
|
||||
unsigned saturated;
|
||||
struct bch_dev *ca = c->devs[ptr->dev];
|
||||
struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
|
||||
unsigned data_type = type == S_META
|
||||
? BUCKET_BTREE : BUCKET_DATA;
|
||||
unsigned old_sectors, new_sectors;
|
||||
int disk_sectors, compressed_sectors;
|
||||
|
||||
@ -494,13 +503,19 @@ static void bch2_mark_pointer(struct bch_fs *c,
|
||||
new.journal_seq = journal_seq;
|
||||
}
|
||||
} else {
|
||||
new.data_type = type == S_META
|
||||
? BUCKET_BTREE : BUCKET_DATA;
|
||||
new.data_type = data_type;
|
||||
}
|
||||
|
||||
new.had_metadata |= is_meta_bucket(new);
|
||||
}));
|
||||
|
||||
if (old.data_type != data_type &&
|
||||
(old.data_type ||
|
||||
old.cached_sectors ||
|
||||
old.dirty_sectors))
|
||||
bch_err(ca->fs, "bucket %zu has multiple types of data (%u, %u)",
|
||||
g - ca->buckets, old.data_type, new.data_type);
|
||||
|
||||
BUG_ON(!may_make_unavailable &&
|
||||
bucket_became_unavailable(c, old, new));
|
||||
|
||||
|
@ -73,20 +73,16 @@ static inline struct bucket *PTR_BUCKET(const struct bch_dev *ca,
|
||||
return ca->buckets + PTR_BUCKET_NR(ca, ptr);
|
||||
}
|
||||
|
||||
static inline u8 __gen_after(u8 a, u8 b)
|
||||
static inline int gen_cmp(u8 a, u8 b)
|
||||
{
|
||||
u8 r = a - b;
|
||||
|
||||
return r > 128U ? 0 : r;
|
||||
return (s8) (a - b);
|
||||
}
|
||||
|
||||
static inline u8 gen_after(u8 a, u8 b)
|
||||
static inline int gen_after(u8 a, u8 b)
|
||||
{
|
||||
u8 r = a - b;
|
||||
int r = gen_cmp(a, b);
|
||||
|
||||
BUG_ON(r > 128U);
|
||||
|
||||
return r;
|
||||
return r > 0 ? r : 0;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -239,7 +235,7 @@ void bch2_invalidate_bucket(struct bch_dev *, struct bucket *);
|
||||
void bch2_mark_free_bucket(struct bch_dev *, struct bucket *);
|
||||
void bch2_mark_alloc_bucket(struct bch_dev *, struct bucket *, bool);
|
||||
void bch2_mark_metadata_bucket(struct bch_dev *, struct bucket *,
|
||||
enum bucket_data_type, bool);
|
||||
enum bucket_data_type, bool);
|
||||
|
||||
void __bch2_gc_mark_key(struct bch_fs *, struct bkey_s_c, s64, bool,
|
||||
struct bch_fs_usage *);
|
||||
|
@ -602,12 +602,6 @@ bch2_btree_pick_ptr(struct bch_fs *c, const struct btree *b)
|
||||
PTR_BUCKET_NR(ca, ptr)))
|
||||
break;
|
||||
|
||||
if (bch2_dev_inconsistent_on(ptr_stale(ca, ptr), ca,
|
||||
"stale btree node pointer at btree %u level %u/%u bucket %zu",
|
||||
b->btree_id, b->level, root ? root->level : -1,
|
||||
PTR_BUCKET_NR(ca, ptr)))
|
||||
continue;
|
||||
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_FAILED)
|
||||
continue;
|
||||
|
||||
|
@ -134,18 +134,22 @@ static int check_extents(struct bch_fs *c)
|
||||
"extent type %u for missing inode %llu",
|
||||
k.k->type, k.k->p.inode);
|
||||
|
||||
unfixable_fsck_err_on(w.first_this_inode && w.have_inode &&
|
||||
unfixable_fsck_err_on(w.have_inode &&
|
||||
!S_ISREG(w.inode.i_mode) && !S_ISLNK(w.inode.i_mode), c,
|
||||
"extent type %u for non regular file, inode %llu mode %o",
|
||||
k.k->type, k.k->p.inode, w.inode.i_mode);
|
||||
|
||||
unfixable_fsck_err_on(w.first_this_inode &&
|
||||
w.have_inode &&
|
||||
!(w.inode.i_flags & BCH_INODE_I_SECTORS_DIRTY) &&
|
||||
w.inode.i_sectors !=
|
||||
(i_sectors = bch2_count_inode_sectors(c, w.cur_inum)),
|
||||
c, "i_sectors wrong: got %llu, should be %llu",
|
||||
w.inode.i_sectors, i_sectors);
|
||||
|
||||
unfixable_fsck_err_on(w.have_inode &&
|
||||
!S_ISREG(w.inode.i_mode) && !S_ISLNK(w.inode.i_mode), c,
|
||||
"extent type %u for non regular file, inode %llu mode %o",
|
||||
k.k->type, k.k->p.inode, w.inode.i_mode);
|
||||
|
||||
unfixable_fsck_err_on(k.k->type != BCH_RESERVATION &&
|
||||
!(w.inode.i_flags & BCH_INODE_I_SIZE_DIRTY) &&
|
||||
k.k->type != BCH_RESERVATION &&
|
||||
k.k->p.offset > round_up(w.inode.i_size, PAGE_SIZE) >> 9, c,
|
||||
"extent type %u offset %llu past end of inode %llu, i_size %llu",
|
||||
k.k->type, k.k->p.offset, k.k->p.inode, w.inode.i_size);
|
||||
|
@ -475,7 +475,7 @@ static int bch2_write_extent(struct bch_write_op *op,
|
||||
nonce = extent_nonce(op->version,
|
||||
crc_nonce,
|
||||
src_len >> 9,
|
||||
compression_type),
|
||||
fragment_compression_type),
|
||||
|
||||
bch2_encrypt_bio(c, csum_type, nonce, bio);
|
||||
|
||||
|
@ -1077,20 +1077,26 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_journal_mark(struct bch_fs *c, struct list_head *list)
|
||||
int bch2_journal_mark(struct bch_fs *c, struct list_head *list)
|
||||
{
|
||||
struct bkey_i *k, *n;
|
||||
struct jset_entry *j;
|
||||
struct journal_replay *r;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(r, list, list)
|
||||
for_each_jset_key(k, n, j, &r->j) {
|
||||
enum bkey_type type = bkey_type(j->level, j->btree_id);
|
||||
struct bkey_s_c k_s_c = bkey_i_to_s_c(k);
|
||||
|
||||
if (btree_type_has_ptrs(type))
|
||||
bch2_btree_mark_key_initial(c, type, k_s_c);
|
||||
if (btree_type_has_ptrs(type)) {
|
||||
ret = bch2_btree_mark_key_initial(c, type, k_s_c);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool journal_entry_is_open(struct journal *j)
|
||||
|
@ -336,7 +336,7 @@ static inline bool journal_flushes_device(struct bch_dev *ca)
|
||||
}
|
||||
|
||||
void bch2_journal_start(struct bch_fs *);
|
||||
void bch2_journal_mark(struct bch_fs *, struct list_head *);
|
||||
int bch2_journal_mark(struct bch_fs *, struct list_head *);
|
||||
void bch2_journal_entries_free(struct list_head *);
|
||||
int bch2_journal_read(struct bch_fs *, struct list_head *);
|
||||
int bch2_journal_replay(struct bch_fs *, struct list_head *);
|
||||
|
@ -727,7 +727,8 @@ static const char *__bch2_fs_start(struct bch_fs *c)
|
||||
bch_verbose(c, "starting mark and sweep:");
|
||||
|
||||
err = "error in recovery";
|
||||
if (bch2_initial_gc(c, &journal))
|
||||
ret = bch2_initial_gc(c, &journal);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (c->opts.noreplay)
|
||||
@ -777,7 +778,9 @@ static const char *__bch2_fs_start(struct bch_fs *c)
|
||||
|
||||
bch_notice(c, "initializing new filesystem");
|
||||
|
||||
bch2_initial_gc(c, NULL);
|
||||
ret = bch2_initial_gc(c, &journal);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
err = "unable to allocate journal buckets";
|
||||
for_each_rw_member(ca, c, i)
|
||||
|
Loading…
Reference in New Issue
Block a user