From 92fc14824f9c1f8ce3666b11be9fab4d1739c88c Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Wed, 28 Aug 2019 13:22:29 -0400 Subject: [PATCH] Update bcachefs sources to 05ef7000f2 bcachefs: Switch reconstruct_alloc to a mount option --- .bcachefs_revision | 2 +- libbcachefs/bcachefs.h | 4 +--- libbcachefs/buckets.c | 41 +++++++++++++++++++---------------------- libbcachefs/fs.c | 3 ++- libbcachefs/opts.h | 5 +++++ libbcachefs/recovery.c | 18 ++++++++++++------ libbcachefs/replicas.c | 2 +- 7 files changed, 41 insertions(+), 34 deletions(-) diff --git a/.bcachefs_revision b/.bcachefs_revision index 97cf88b4..32775186 100644 --- a/.bcachefs_revision +++ b/.bcachefs_revision @@ -1 +1 @@ -bfb7133d71638b39411352729427c1bb14ca0b6e +05ef7000f242c003918c8675a0b33670117057ed diff --git a/libbcachefs/bcachefs.h b/libbcachefs/bcachefs.h index ac797854..a186aa52 100644 --- a/libbcachefs/bcachefs.h +++ b/libbcachefs/bcachefs.h @@ -283,9 +283,7 @@ do { \ "Force reads to use the reconstruct path, when reading" \ "from erasure coded extents") \ BCH_DEBUG_PARAM(test_restart_gc, \ - "Test restarting mark and sweep gc when bucket gens change")\ - BCH_DEBUG_PARAM(test_reconstruct_alloc, \ - "Test reconstructing the alloc btree") + "Test restarting mark and sweep gc when bucket gens change") #define BCH_DEBUG_PARAMS_ALL() BCH_DEBUG_PARAMS_ALWAYS() BCH_DEBUG_PARAMS_DEBUG() diff --git a/libbcachefs/buckets.c b/libbcachefs/buckets.c index 16559e89..bffb247d 100644 --- a/libbcachefs/buckets.c +++ b/libbcachefs/buckets.c @@ -521,7 +521,6 @@ static inline void update_replicas(struct bch_fs *c, int idx = bch2_replicas_entry_idx(c, r); BUG_ON(idx < 0); - BUG_ON(!sectors); switch (r->data_type) { case BCH_DATA_BTREE: @@ -570,8 +569,12 @@ static inline void update_replicas_list(struct btree_trans *trans, { struct replicas_delta_list *d; struct replicas_delta *n; - unsigned b = replicas_entry_bytes(r) + 8; + unsigned b; + if (!sectors) + return; + + b = replicas_entry_bytes(r) + 8; d = replicas_deltas_realloc(trans, b); n = (void *) d->d + d->used; @@ -1029,7 +1032,7 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k, fs_usage, journal_seq, flags); if (p.ptr.cached) { - if (disk_sectors && !stale) + if (!stale) update_cached_sectors(c, fs_usage, p.ptr.dev, disk_sectors); } else if (!p.ec_nr) { @@ -1048,8 +1051,7 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k, } } - if (dirty_sectors) - update_replicas(c, fs_usage, &r.e, dirty_sectors); + update_replicas(c, fs_usage, &r.e, dirty_sectors); return 0; } @@ -1413,6 +1415,7 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans, struct bkey_s_c k; struct bkey_alloc_unpacked u; struct bkey_i_alloc *a; + unsigned old; bool overflow; int ret; @@ -1441,9 +1444,9 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans, * Unless we're already updating that key: */ if (k.k->type != KEY_TYPE_alloc) { - bch_err_ratelimited(c, "pointer to nonexistent bucket %u:%zu", - p.ptr.dev, - PTR_BUCKET_NR(ca, &p.ptr)); + bch_err_ratelimited(c, "pointer to nonexistent bucket %llu:%llu", + iter->pos.inode, + iter->pos.offset); ret = -1; goto out; } @@ -1456,19 +1459,20 @@ static int bch2_trans_mark_pointer(struct btree_trans *trans, goto out; } - if (!p.ptr.cached) + if (!p.ptr.cached) { + old = u.dirty_sectors; overflow = checked_add(u.dirty_sectors, sectors); - else + } else { + old = u.cached_sectors; overflow = checked_add(u.cached_sectors, sectors); + } u.data_type = u.dirty_sectors || u.cached_sectors ? data_type : 0; bch2_fs_inconsistent_on(overflow, c, "bucket sector count overflow: %u + %lli > U16_MAX", - !p.ptr.cached - ? u.dirty_sectors - : u.cached_sectors, sectors); + old, sectors); a = trans_update_key(trans, iter, BKEY_ALLOC_U64s_MAX); ret = PTR_ERR_OR_ZERO(a); @@ -1561,12 +1565,6 @@ static int bch2_trans_mark_extent(struct btree_trans *trans, ? sectors : ptr_disk_sectors_delta(p, offset, sectors, flags); - /* - * can happen due to rounding with compressed extents: - */ - if (!disk_sectors) - continue; - ret = bch2_trans_mark_pointer(trans, p, disk_sectors, data_type); if (ret < 0) @@ -1575,7 +1573,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans, stale = ret > 0; if (p.ptr.cached) { - if (disk_sectors && !stale) + if (!stale) update_cached_sectors_list(trans, p.ptr.dev, disk_sectors); } else if (!p.ec_nr) { @@ -1593,8 +1591,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans, } } - if (dirty_sectors) - update_replicas_list(trans, &r.e, dirty_sectors); + update_replicas_list(trans, &r.e, dirty_sectors); return 0; } diff --git a/libbcachefs/fs.c b/libbcachefs/fs.c index f9ee4ac2..16017079 100644 --- a/libbcachefs/fs.c +++ b/libbcachefs/fs.c @@ -1166,7 +1166,8 @@ retry: offset_into_extent), &cur.k); bch2_key_resize(&cur.k.k, sectors); - cur.k.k.p.offset = iter->pos.offset + cur.k.k.size; + cur.k.k.p = iter->pos; + cur.k.k.p.offset += cur.k.k.size; if (have_extent) { ret = bch2_fill_extent(c, info, diff --git a/libbcachefs/opts.h b/libbcachefs/opts.h index c6ec9f7e..97a782f4 100644 --- a/libbcachefs/opts.h +++ b/libbcachefs/opts.h @@ -258,6 +258,11 @@ enum opt_type { OPT_BOOL(), \ NO_SB_OPT, false, \ NULL, "Don\'t start filesystem, only open devices") \ + x(reconstruct_alloc, u8, \ + OPT_MOUNT, \ + OPT_BOOL(), \ + NO_SB_OPT, false, \ + NULL, "Reconstruct alloc btree") \ x(version_upgrade, u8, \ OPT_MOUNT, \ OPT_BOOL(), \ diff --git a/libbcachefs/recovery.c b/libbcachefs/recovery.c index f2899ba9..c9558ccb 100644 --- a/libbcachefs/recovery.c +++ b/libbcachefs/recovery.c @@ -249,7 +249,13 @@ static int bch2_extent_replay_key(struct bch_fs *c, enum btree_id btree_id, bch2_disk_reservation_init(c, 0); struct bkey_i *split; struct bpos atomic_end; - bool split_compressed = false; + /* + * Some extents aren't equivalent - w.r.t. what the triggers do + * - if they're split: + */ + bool remark_if_split = bch2_extent_is_compressed(bkey_i_to_s_c(k)) || + k->k.type == KEY_TYPE_reflink_p; + bool remark = false; int ret; bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0); @@ -280,8 +286,8 @@ retry: if (ret) goto err; - if (!split_compressed && - bch2_extent_is_compressed(bkey_i_to_s_c(k)) && + if (!remark && + remark_if_split && bkey_cmp(atomic_end, k->k.p) < 0) { ret = bch2_disk_reservation_add(c, &disk_res, k->k.size * @@ -289,7 +295,7 @@ retry: BCH_DISK_RESERVATION_NOFAIL); BUG_ON(ret); - split_compressed = true; + remark = true; } bkey_copy(split, k); @@ -300,7 +306,7 @@ retry: bch2_btree_iter_set_pos(iter, split->k.p); } while (bkey_cmp(iter->pos, k->k.p) < 0); - if (split_compressed) { + if (remark) { ret = bch2_trans_mark_key(&trans, bkey_i_to_s_c(k), 0, -((s64) k->k.size), BCH_BUCKET_MARK_OVERWRITE) ?: @@ -653,7 +659,7 @@ static int read_btree_roots(struct bch_fs *c) continue; if (i == BTREE_ID_ALLOC && - test_reconstruct_alloc(c)) { + c->opts.reconstruct_alloc) { c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO); continue; } diff --git a/libbcachefs/replicas.c b/libbcachefs/replicas.c index f84de35c..d0602725 100644 --- a/libbcachefs/replicas.c +++ b/libbcachefs/replicas.c @@ -80,7 +80,7 @@ static void extent_to_replicas(struct bkey_s_c k, continue; if (p.ec_nr) { - r->nr_devs = 0; + r->nr_required = 0; break; }