From 9bb2977a1ad31a61e080da0bd32000f5cdbc62a0 Mon Sep 17 00:00:00 2001 From: Kent Overstreet <kent.overstreet@linux.dev> Date: Mon, 27 May 2024 11:00:22 -0400 Subject: [PATCH] Update bcachefs sources to 45845c32a41a fs: bcachefs: add missing MODULE_DESCRIPTION() --- .bcachefs_revision | 2 +- libbcachefs/btree_io.c | 13 ++- libbcachefs/btree_io.h | 6 +- libbcachefs/btree_key_cache.c | 12 +-- libbcachefs/btree_update_interior.c | 2 +- libbcachefs/fsck.c | 51 ++++-------- libbcachefs/move.c | 2 +- libbcachefs/sb-downgrade.c | 13 ++- libbcachefs/snapshot.c | 120 ++++++++++++++-------------- libbcachefs/snapshot.h | 22 +---- libbcachefs/super-io.c | 12 +-- libbcachefs/super.c | 2 +- 12 files changed, 107 insertions(+), 150 deletions(-) diff --git a/.bcachefs_revision b/.bcachefs_revision index d5cdc669..0a34e57c 100644 --- a/.bcachefs_revision +++ b/.bcachefs_revision @@ -1 +1 @@ -254510a1c2691db5fdaccbafe0e1872fd7a2e4e6 +45845c32a41abe7bd765ca02e3d563b109b5f06c diff --git a/libbcachefs/btree_io.c b/libbcachefs/btree_io.c index 0b176d4c..9f42f2c3 100644 --- a/libbcachefs/btree_io.c +++ b/libbcachefs/btree_io.c @@ -534,7 +534,7 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c, printbuf_indent_add(out, 2); prt_printf(out, "\nnode offset %u/%u", - b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key))); + b->written, btree_ptr_sectors_written(&b->key)); if (i) prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s)); if (k) @@ -689,7 +689,6 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, int write, bool have_retry, bool *saw_error) { unsigned version = le16_to_cpu(i->version); - unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); struct printbuf buf1 = PRINTBUF; struct printbuf buf2 = PRINTBUF; int ret = 0; @@ -733,13 +732,11 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca, btree_node_unsupported_version, "BSET_SEPARATE_WHITEOUTS no longer supported"); - if (!write && - btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)), + if (btree_err_on(offset + sectors > btree_sectors(c), -BCH_ERR_btree_node_read_err_fixable, c, ca, b, i, NULL, bset_past_end_of_btree_node, - "bset past end of btree node (offset %u len %u but written %zu)", - offset, sectors, ptr_written ?: btree_sectors(c))) { + "bset past end of btree node")) { i->u64s = 0; ret = 0; goto out; @@ -1005,7 +1002,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca, bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 && BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v); unsigned u64s; - unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); + unsigned ptr_written = btree_ptr_sectors_written(&b->key); struct printbuf buf = PRINTBUF; int ret = 0, retry_read = 0, write = READ; u64 start_time = local_clock(); @@ -2136,7 +2133,7 @@ do_write: if (!b->written && b->key.k.type == KEY_TYPE_btree_ptr_v2) - BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write); + BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write); memset(data + bytes_to_write, 0, (sectors_to_write << 9) - bytes_to_write); diff --git a/libbcachefs/btree_io.h b/libbcachefs/btree_io.h index 63d76f5c..2b8b564f 100644 --- a/libbcachefs/btree_io.h +++ b/libbcachefs/btree_io.h @@ -27,10 +27,10 @@ static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b atomic_dec(&c->btree_cache.dirty); } -static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k) +static inline unsigned btree_ptr_sectors_written(struct bkey_i *k) { - return k.k->type == KEY_TYPE_btree_ptr_v2 - ? le16_to_cpu(bkey_s_c_to_btree_ptr_v2(k).v->sectors_written) + return k->k.type == KEY_TYPE_btree_ptr_v2 + ? le16_to_cpu(bkey_i_to_btree_ptr_v2(k)->v.sectors_written) : 0; } diff --git a/libbcachefs/btree_key_cache.c b/libbcachefs/btree_key_cache.c index 75f5e6fe..34056aae 100644 --- a/libbcachefs/btree_key_cache.c +++ b/libbcachefs/btree_key_cache.c @@ -424,18 +424,18 @@ static int btree_key_cache_fill(struct btree_trans *trans, goto err; } + ret = bch2_trans_relock(trans); + if (ret) { + kfree(new_k); + goto err; + } + if (!bch2_btree_node_relock(trans, ck_path, 0)) { kfree(new_k); trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path); ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill); goto err; } - - ret = bch2_trans_relock(trans); - if (ret) { - kfree(new_k); - goto err; - } } } diff --git a/libbcachefs/btree_update_interior.c b/libbcachefs/btree_update_interior.c index 7647ccdc..60b8544c 100644 --- a/libbcachefs/btree_update_interior.c +++ b/libbcachefs/btree_update_interior.c @@ -1359,7 +1359,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, unsigned long old, new, v; BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 && - !btree_ptr_sectors_written(bkey_i_to_s_c(insert))); + !btree_ptr_sectors_written(insert)); if (unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags))) bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p); diff --git a/libbcachefs/fsck.c b/libbcachefs/fsck.c index c8f57465..fd277bd5 100644 --- a/libbcachefs/fsck.c +++ b/libbcachefs/fsck.c @@ -77,21 +77,17 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr, struct bkey_s_c k; int ret; - bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, - POS(0, inode_nr), - BTREE_ITER_all_snapshots); - k = bch2_btree_iter_peek(&iter); - ret = bkey_err(k); - if (ret) - goto err; - - if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) { - ret = -BCH_ERR_ENOENT_inode; - goto err; + for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr), + BTREE_ITER_all_snapshots, k, ret) { + if (k.k->p.offset != inode_nr) + break; + if (!bkey_is_inode(k.k)) + continue; + ret = bch2_inode_unpack(k, inode); + goto found; } - - ret = bch2_inode_unpack(k, inode); -err: + ret = -BCH_ERR_ENOENT_inode; +found: bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr); bch2_trans_iter_exit(trans, &iter); return ret; @@ -770,25 +766,6 @@ static int get_visible_inodes(struct btree_trans *trans, return ret; } -static int check_key_has_snapshot(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_s_c k) -{ - struct bch_fs *c = trans->c; - struct printbuf buf = PRINTBUF; - int ret = 0; - - if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c, - bkey_in_missing_snapshot, - "key in missing snapshot: %s", - (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) - ret = bch2_btree_delete_at(trans, iter, - BTREE_UPDATE_internal_snapshot_node) ?: 1; -fsck_err: - printbuf_exit(&buf); - return ret; -} - static int hash_redo_key(struct btree_trans *trans, const struct bch_hash_desc desc, struct bch_hash_info *hash_info, @@ -983,7 +960,7 @@ static int check_inode(struct btree_trans *trans, bool do_update = false; int ret; - ret = check_key_has_snapshot(trans, iter, k); + ret = bch2_check_key_has_snapshot(trans, iter, k); if (ret < 0) goto err; if (ret) @@ -1487,7 +1464,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter, struct printbuf buf = PRINTBUF; int ret = 0; - ret = check_key_has_snapshot(trans, iter, k); + ret = bch2_check_key_has_snapshot(trans, iter, k); if (ret) { ret = ret < 0 ? ret : 0; goto out; @@ -2010,7 +1987,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter, struct printbuf buf = PRINTBUF; int ret = 0; - ret = check_key_has_snapshot(trans, iter, k); + ret = bch2_check_key_has_snapshot(trans, iter, k); if (ret) { ret = ret < 0 ? ret : 0; goto out; @@ -2165,7 +2142,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter, struct inode_walker_entry *i; int ret; - ret = check_key_has_snapshot(trans, iter, k); + ret = bch2_check_key_has_snapshot(trans, iter, k); if (ret < 0) return ret; if (ret) diff --git a/libbcachefs/move.c b/libbcachefs/move.c index 0e587916..8171f947 100644 --- a/libbcachefs/move.c +++ b/libbcachefs/move.c @@ -804,7 +804,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt, if (!b) goto next; - unsigned sectors = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)); + unsigned sectors = btree_ptr_sectors_written(&b->key); ret = bch2_btree_node_rewrite(trans, &iter, b, 0); bch2_trans_iter_exit(trans, &iter); diff --git a/libbcachefs/sb-downgrade.c b/libbcachefs/sb-downgrade.c index 390a1bbd..3fb23e39 100644 --- a/libbcachefs/sb-downgrade.c +++ b/libbcachefs/sb-downgrade.c @@ -146,10 +146,17 @@ static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f, for (const struct bch_sb_field_downgrade_entry *i = e->entries; (void *) i < vstruct_end(&e->field); i = downgrade_entry_next_c(i)) { + /* + * Careful: sb_field_downgrade_entry is only 2 byte aligned, but + * section sizes are 8 byte aligned - an empty entry spanning + * the end of the section is allowed (and ignored): + */ + if ((void *) &i->errors[0] > vstruct_end(&e->field)) + break; + if (flags & BCH_VALIDATE_write && - ((void *) &i->errors[0] > vstruct_end(&e->field) || - (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field))) { - prt_printf(err, "downgrade entry overruns end of superblock section)"); + (void *) downgrade_entry_next_c(i) > vstruct_end(&e->field)) { + prt_printf(err, "downgrade entry overruns end of superblock section"); return -BCH_ERR_invalid_sb_downgrade; } diff --git a/libbcachefs/snapshot.c b/libbcachefs/snapshot.c index 720c4080..51918acf 100644 --- a/libbcachefs/snapshot.c +++ b/libbcachefs/snapshot.c @@ -92,19 +92,10 @@ static int bch2_snapshot_tree_create(struct btree_trans *trans, /* Snapshot nodes: */ -void bch2_invalid_snapshot_id(struct bch_fs *c, u32 id) -{ - bch_err(c, "reference to invalid snapshot ID %u", id); - - if (c->curr_recovery_pass == BCH_RECOVERY_PASS_NR) - bch2_inconsistent_error(c); -} - -static bool __bch2_snapshot_is_ancestor_early(struct bch_fs *c, struct snapshot_table *t, - u32 id, u32 ancestor) +static bool __bch2_snapshot_is_ancestor_early(struct snapshot_table *t, u32 id, u32 ancestor) { while (id && id < ancestor) { - const struct snapshot_t *s = __snapshot_t(c, t, id); + const struct snapshot_t *s = __snapshot_t(t, id); id = s ? s->parent : 0; } return id == ancestor; @@ -113,15 +104,15 @@ static bool __bch2_snapshot_is_ancestor_early(struct bch_fs *c, struct snapshot_ static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor) { rcu_read_lock(); - bool ret = __bch2_snapshot_is_ancestor_early(c, rcu_dereference(c->snapshots), id, ancestor); + bool ret = __bch2_snapshot_is_ancestor_early(rcu_dereference(c->snapshots), id, ancestor); rcu_read_unlock(); return ret; } -static inline u32 get_ancestor_below(struct bch_fs *c, struct snapshot_table *t, u32 id, u32 ancestor) +static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor) { - const struct snapshot_t *s = __snapshot_t(c, t, id); + const struct snapshot_t *s = __snapshot_t(t, id); if (!s) return 0; @@ -134,9 +125,9 @@ static inline u32 get_ancestor_below(struct bch_fs *c, struct snapshot_table *t, return s->parent; } -static bool test_ancestor_bitmap(struct bch_fs *c, struct snapshot_table *t, u32 id, u32 ancestor) +static bool test_ancestor_bitmap(struct snapshot_table *t, u32 id, u32 ancestor) { - const struct snapshot_t *s = __snapshot_t(c, t, id); + const struct snapshot_t *s = __snapshot_t(t, id); if (!s) return false; @@ -151,18 +142,18 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor) struct snapshot_table *t = rcu_dereference(c->snapshots); if (unlikely(c->recovery_pass_done < BCH_RECOVERY_PASS_check_snapshots)) { - ret = __bch2_snapshot_is_ancestor_early(c, t, id, ancestor); + ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor); goto out; } while (id && id < ancestor - IS_ANCESTOR_BITMAP) - id = get_ancestor_below(c, t, id, ancestor); + id = get_ancestor_below(t, id, ancestor); ret = id && id < ancestor - ? test_ancestor_bitmap(c, t, id, ancestor) + ? test_ancestor_bitmap(t, id, ancestor) : id == ancestor; - EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(c, t, id, ancestor)); + EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor)); out: rcu_read_unlock(); @@ -330,7 +321,6 @@ static int __bch2_mark_snapshot(struct btree_trans *trans, t->children[1] = le32_to_cpu(s.v->children[1]); t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0; t->tree = le32_to_cpu(s.v->tree); - t->equiv = id; if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) { t->depth = le32_to_cpu(s.v->depth); @@ -1052,6 +1042,25 @@ err: return ret; } +int bch2_check_key_has_snapshot(struct btree_trans *trans, + struct btree_iter *iter, + struct bkey_s_c k) +{ + struct bch_fs *c = trans->c; + struct printbuf buf = PRINTBUF; + int ret = 0; + + if (fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c, + bkey_in_missing_snapshot, + "key in missing snapshot %s, delete?", + (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) + ret = bch2_btree_delete_at(trans, iter, + BTREE_UPDATE_internal_snapshot_node) ?: 1; +fsck_err: + printbuf_exit(&buf); + return ret; +} + /* * Mark a snapshot as deleted, for future cleanup: */ @@ -1361,35 +1370,39 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent, * that key to snapshot leaf nodes, where we can mutate it */ -static int snapshot_delete_key(struct btree_trans *trans, +static int delete_dead_snapshots_process_key(struct btree_trans *trans, struct btree_iter *iter, struct bkey_s_c k, snapshot_id_list *deleted, snapshot_id_list *equiv_seen, struct bpos *last_pos) { + int ret = bch2_check_key_has_snapshot(trans, iter, k); + if (ret) + return ret < 0 ? ret : 0; + struct bch_fs *c = trans->c; u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot); + if (!equiv) /* key for invalid snapshot node, but we chose not to delete */ + return 0; if (!bkey_eq(k.k->p, *last_pos)) equiv_seen->nr = 0; - *last_pos = k.k->p; - if (snapshot_list_has_id(deleted, k.k->p.snapshot) || - snapshot_list_has_id(equiv_seen, equiv)) { + if (snapshot_list_has_id(deleted, k.k->p.snapshot)) return bch2_btree_delete_at(trans, iter, BTREE_UPDATE_internal_snapshot_node); - } else { - return snapshot_list_add(c, equiv_seen, equiv); - } -} -static int move_key_to_correct_snapshot(struct btree_trans *trans, - struct btree_iter *iter, - struct bkey_s_c k) -{ - struct bch_fs *c = trans->c; - u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot); + if (!bpos_eq(*last_pos, k.k->p) && + snapshot_list_has_id(equiv_seen, equiv)) + return bch2_btree_delete_at(trans, iter, + BTREE_UPDATE_internal_snapshot_node); + + *last_pos = k.k->p; + + ret = snapshot_list_add_nodup(c, equiv_seen, equiv); + if (ret) + return ret; /* * When we have a linear chain of snapshot nodes, we consider @@ -1399,21 +1412,20 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans, * * If there are multiple keys in different snapshots at the same * position, we're only going to keep the one in the newest - * snapshot - the rest have been overwritten and are redundant, - * and for the key we're going to keep we need to move it to the - * equivalance class ID if it's not there already. + * snapshot (we delete the others above) - the rest have been + * overwritten and are redundant, and for the key we're going to keep we + * need to move it to the equivalance class ID if it's not there + * already. */ if (equiv != k.k->p.snapshot) { struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k); - struct btree_iter new_iter; - int ret; - - ret = PTR_ERR_OR_ZERO(new); + int ret = PTR_ERR_OR_ZERO(new); if (ret) return ret; new->k.p.snapshot = equiv; + struct btree_iter new_iter; bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p, BTREE_ITER_all_snapshots| BTREE_ITER_cached| @@ -1548,7 +1560,6 @@ int bch2_delete_dead_snapshots(struct bch_fs *c) struct btree_trans *trans; snapshot_id_list deleted = { 0 }; snapshot_id_list deleted_interior = { 0 }; - u32 id; int ret = 0; if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags)) @@ -1595,33 +1606,20 @@ int bch2_delete_dead_snapshots(struct bch_fs *c) if (ret) goto err; - for (id = 0; id < BTREE_ID_NR; id++) { + for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) { struct bpos last_pos = POS_MIN; snapshot_id_list equiv_seen = { 0 }; struct disk_reservation res = { 0 }; - if (!btree_type_has_snapshots(id)) - continue; - - /* - * deleted inodes btree is maintained by a trigger on the inodes - * btree - no work for us to do here, and it's not safe to scan - * it because we'll see out of date keys due to the btree write - * buffer: - */ - if (id == BTREE_ID_deleted_inodes) + if (!btree_type_has_snapshots(btree)) continue; ret = for_each_btree_key_commit(trans, iter, - id, POS_MIN, + btree, POS_MIN, BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, &res, NULL, BCH_TRANS_COMMIT_no_enospc, - snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?: - for_each_btree_key_commit(trans, iter, - id, POS_MIN, - BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k, - &res, NULL, BCH_TRANS_COMMIT_no_enospc, - move_key_to_correct_snapshot(trans, &iter, k)); + delete_dead_snapshots_process_key(trans, &iter, k, &deleted, + &equiv_seen, &last_pos)); bch2_disk_reservation_put(c, &res); darray_exit(&equiv_seen); diff --git a/libbcachefs/snapshot.h b/libbcachefs/snapshot.h index 29e1c5a6..31b0ee03 100644 --- a/libbcachefs/snapshot.h +++ b/libbcachefs/snapshot.h @@ -32,7 +32,7 @@ int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned, .min_val_size = 24, \ }) -static inline struct snapshot_t *__snapshot_t_noerror(struct snapshot_table *t, u32 id) +static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id) { u32 idx = U32_MAX - id; @@ -41,26 +41,9 @@ static inline struct snapshot_t *__snapshot_t_noerror(struct snapshot_table *t, : NULL; } -void bch2_invalid_snapshot_id(struct bch_fs *, u32); - -static inline struct snapshot_t *__snapshot_t(struct bch_fs *c, struct snapshot_table *t, u32 id) -{ - struct snapshot_t *s = __snapshot_t_noerror(t, id); - if (unlikely(!s || !s->equiv)) { - bch2_invalid_snapshot_id(c, id); - s = NULL; - } - return s; -} - -static inline const struct snapshot_t *snapshot_t_noerror(struct bch_fs *c, u32 id) -{ - return __snapshot_t_noerror(rcu_dereference(c->snapshots), id); -} - static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id) { - return __snapshot_t(c, rcu_dereference(c->snapshots), id); + return __snapshot_t(rcu_dereference(c->snapshots), id); } static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id) @@ -259,6 +242,7 @@ int bch2_snapshot_node_create(struct btree_trans *, u32, int bch2_check_snapshot_trees(struct bch_fs *); int bch2_check_snapshots(struct bch_fs *); int bch2_reconstruct_snapshots(struct bch_fs *); +int bch2_check_key_has_snapshot(struct btree_trans *, struct btree_iter *, struct bkey_s_c); int bch2_snapshot_node_set_deleted(struct btree_trans *, u32); void bch2_delete_dead_snapshots_work(struct work_struct *); diff --git a/libbcachefs/super-io.c b/libbcachefs/super-io.c index f1bee6c5..d73a0222 100644 --- a/libbcachefs/super-io.c +++ b/libbcachefs/super-io.c @@ -1132,18 +1132,12 @@ bool bch2_check_version_downgrade(struct bch_fs *c) * c->sb will be checked before we write the superblock, so update it as * well: */ - if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current) { + if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current) SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current); - c->sb.version_upgrade_complete = bcachefs_metadata_version_current; - } - if (c->sb.version > bcachefs_metadata_version_current) { + if (c->sb.version > bcachefs_metadata_version_current) c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current); - c->sb.version = bcachefs_metadata_version_current; - } - if (c->sb.version_min > bcachefs_metadata_version_current) { + if (c->sb.version_min > bcachefs_metadata_version_current) c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current); - c->sb.version_min = bcachefs_metadata_version_current; - } c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1); return ret; } diff --git a/libbcachefs/super.c b/libbcachefs/super.c index 2206a8de..df2bea38 100644 --- a/libbcachefs/super.c +++ b/libbcachefs/super.c @@ -564,7 +564,7 @@ static void __bch2_fs_free(struct bch_fs *c) BUG_ON(atomic_read(&c->journal_keys.ref)); bch2_fs_btree_write_buffer_exit(c); percpu_free_rwsem(&c->mark_lock); - EBUG_ON(percpu_u64_get(c->online_reserved)); + EBUG_ON(c->online_reserved && percpu_u64_get(c->online_reserved)); free_percpu(c->online_reserved); darray_exit(&c->btree_roots_extra);