mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-03-31 00:00:03 +03:00
Update bcachefs sources to 45845c32a41a fs: bcachefs: add missing MODULE_DESCRIPTION()
This commit is contained in:
parent
5a3011c48e
commit
9bb2977a1a
@ -1 +1 @@
|
||||
254510a1c2691db5fdaccbafe0e1872fd7a2e4e6
|
||||
45845c32a41abe7bd765ca02e3d563b109b5f06c
|
||||
|
@ -534,7 +534,7 @@ static void btree_err_msg(struct printbuf *out, struct bch_fs *c,
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
prt_printf(out, "\nnode offset %u/%u",
|
||||
b->written, btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)));
|
||||
b->written, btree_ptr_sectors_written(&b->key));
|
||||
if (i)
|
||||
prt_printf(out, " bset u64s %u", le16_to_cpu(i->u64s));
|
||||
if (k)
|
||||
@ -689,7 +689,6 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
|
||||
int write, bool have_retry, bool *saw_error)
|
||||
{
|
||||
unsigned version = le16_to_cpu(i->version);
|
||||
unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
|
||||
struct printbuf buf1 = PRINTBUF;
|
||||
struct printbuf buf2 = PRINTBUF;
|
||||
int ret = 0;
|
||||
@ -733,13 +732,11 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
|
||||
btree_node_unsupported_version,
|
||||
"BSET_SEPARATE_WHITEOUTS no longer supported");
|
||||
|
||||
if (!write &&
|
||||
btree_err_on(offset + sectors > (ptr_written ?: btree_sectors(c)),
|
||||
if (btree_err_on(offset + sectors > btree_sectors(c),
|
||||
-BCH_ERR_btree_node_read_err_fixable,
|
||||
c, ca, b, i, NULL,
|
||||
bset_past_end_of_btree_node,
|
||||
"bset past end of btree node (offset %u len %u but written %zu)",
|
||||
offset, sectors, ptr_written ?: btree_sectors(c))) {
|
||||
"bset past end of btree node")) {
|
||||
i->u64s = 0;
|
||||
ret = 0;
|
||||
goto out;
|
||||
@ -1005,7 +1002,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
|
||||
unsigned u64s;
|
||||
unsigned ptr_written = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
|
||||
unsigned ptr_written = btree_ptr_sectors_written(&b->key);
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0, retry_read = 0, write = READ;
|
||||
u64 start_time = local_clock();
|
||||
@ -2136,7 +2133,7 @@ do_write:
|
||||
|
||||
if (!b->written &&
|
||||
b->key.k.type == KEY_TYPE_btree_ptr_v2)
|
||||
BUG_ON(btree_ptr_sectors_written(bkey_i_to_s_c(&b->key)) != sectors_to_write);
|
||||
BUG_ON(btree_ptr_sectors_written(&b->key) != sectors_to_write);
|
||||
|
||||
memset(data + bytes_to_write, 0,
|
||||
(sectors_to_write << 9) - bytes_to_write);
|
||||
|
@ -27,10 +27,10 @@ static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b
|
||||
atomic_dec(&c->btree_cache.dirty);
|
||||
}
|
||||
|
||||
static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k)
|
||||
static inline unsigned btree_ptr_sectors_written(struct bkey_i *k)
|
||||
{
|
||||
return k.k->type == KEY_TYPE_btree_ptr_v2
|
||||
? le16_to_cpu(bkey_s_c_to_btree_ptr_v2(k).v->sectors_written)
|
||||
return k->k.type == KEY_TYPE_btree_ptr_v2
|
||||
? le16_to_cpu(bkey_i_to_btree_ptr_v2(k)->v.sectors_written)
|
||||
: 0;
|
||||
}
|
||||
|
||||
|
@ -424,18 +424,18 @@ static int btree_key_cache_fill(struct btree_trans *trans,
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_trans_relock(trans);
|
||||
if (ret) {
|
||||
kfree(new_k);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!bch2_btree_node_relock(trans, ck_path, 0)) {
|
||||
kfree(new_k);
|
||||
trace_and_count(trans->c, trans_restart_relock_key_cache_fill, trans, _THIS_IP_, ck_path);
|
||||
ret = btree_trans_restart(trans, BCH_ERR_transaction_restart_key_cache_fill);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = bch2_trans_relock(trans);
|
||||
if (ret) {
|
||||
kfree(new_k);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1359,7 +1359,7 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as,
|
||||
unsigned long old, new, v;
|
||||
|
||||
BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
!btree_ptr_sectors_written(bkey_i_to_s_c(insert)));
|
||||
!btree_ptr_sectors_written(insert));
|
||||
|
||||
if (unlikely(!test_bit(JOURNAL_replay_done, &c->journal.flags)))
|
||||
bch2_journal_key_overwritten(c, b->c.btree_id, b->c.level, insert->k.p);
|
||||
|
@ -77,21 +77,17 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
|
||||
POS(0, inode_nr),
|
||||
BTREE_ITER_all_snapshots);
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
|
||||
ret = -BCH_ERR_ENOENT_inode;
|
||||
goto err;
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_inodes, POS(0, inode_nr),
|
||||
BTREE_ITER_all_snapshots, k, ret) {
|
||||
if (k.k->p.offset != inode_nr)
|
||||
break;
|
||||
if (!bkey_is_inode(k.k))
|
||||
continue;
|
||||
ret = bch2_inode_unpack(k, inode);
|
||||
goto found;
|
||||
}
|
||||
|
||||
ret = bch2_inode_unpack(k, inode);
|
||||
err:
|
||||
ret = -BCH_ERR_ENOENT_inode;
|
||||
found:
|
||||
bch_err_msg(trans->c, ret, "fetching inode %llu", inode_nr);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
@ -770,25 +766,6 @@ static int get_visible_inodes(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_key_has_snapshot(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (mustfix_fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
|
||||
bkey_in_missing_snapshot,
|
||||
"key in missing snapshot: %s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_internal_snapshot_node) ?: 1;
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int hash_redo_key(struct btree_trans *trans,
|
||||
const struct bch_hash_desc desc,
|
||||
struct bch_hash_info *hash_info,
|
||||
@ -983,7 +960,7 @@ static int check_inode(struct btree_trans *trans,
|
||||
bool do_update = false;
|
||||
int ret;
|
||||
|
||||
ret = check_key_has_snapshot(trans, iter, k);
|
||||
ret = bch2_check_key_has_snapshot(trans, iter, k);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
if (ret)
|
||||
@ -1487,7 +1464,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
ret = check_key_has_snapshot(trans, iter, k);
|
||||
ret = bch2_check_key_has_snapshot(trans, iter, k);
|
||||
if (ret) {
|
||||
ret = ret < 0 ? ret : 0;
|
||||
goto out;
|
||||
@ -2010,7 +1987,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
ret = check_key_has_snapshot(trans, iter, k);
|
||||
ret = bch2_check_key_has_snapshot(trans, iter, k);
|
||||
if (ret) {
|
||||
ret = ret < 0 ? ret : 0;
|
||||
goto out;
|
||||
@ -2165,7 +2142,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct inode_walker_entry *i;
|
||||
int ret;
|
||||
|
||||
ret = check_key_has_snapshot(trans, iter, k);
|
||||
ret = bch2_check_key_has_snapshot(trans, iter, k);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (ret)
|
||||
|
@ -804,7 +804,7 @@ int bch2_evacuate_bucket(struct moving_context *ctxt,
|
||||
if (!b)
|
||||
goto next;
|
||||
|
||||
unsigned sectors = btree_ptr_sectors_written(bkey_i_to_s_c(&b->key));
|
||||
unsigned sectors = btree_ptr_sectors_written(&b->key);
|
||||
|
||||
ret = bch2_btree_node_rewrite(trans, &iter, b, 0);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
@ -146,10 +146,17 @@ static int bch2_sb_downgrade_validate(struct bch_sb *sb, struct bch_sb_field *f,
|
||||
for (const struct bch_sb_field_downgrade_entry *i = e->entries;
|
||||
(void *) i < vstruct_end(&e->field);
|
||||
i = downgrade_entry_next_c(i)) {
|
||||
/*
|
||||
* Careful: sb_field_downgrade_entry is only 2 byte aligned, but
|
||||
* section sizes are 8 byte aligned - an empty entry spanning
|
||||
* the end of the section is allowed (and ignored):
|
||||
*/
|
||||
if ((void *) &i->errors[0] > vstruct_end(&e->field))
|
||||
break;
|
||||
|
||||
if (flags & BCH_VALIDATE_write &&
|
||||
((void *) &i->errors[0] > vstruct_end(&e->field) ||
|
||||
(void *) downgrade_entry_next_c(i) > vstruct_end(&e->field))) {
|
||||
prt_printf(err, "downgrade entry overruns end of superblock section)");
|
||||
(void *) downgrade_entry_next_c(i) > vstruct_end(&e->field)) {
|
||||
prt_printf(err, "downgrade entry overruns end of superblock section");
|
||||
return -BCH_ERR_invalid_sb_downgrade;
|
||||
}
|
||||
|
||||
|
@ -92,19 +92,10 @@ static int bch2_snapshot_tree_create(struct btree_trans *trans,
|
||||
|
||||
/* Snapshot nodes: */
|
||||
|
||||
void bch2_invalid_snapshot_id(struct bch_fs *c, u32 id)
|
||||
{
|
||||
bch_err(c, "reference to invalid snapshot ID %u", id);
|
||||
|
||||
if (c->curr_recovery_pass == BCH_RECOVERY_PASS_NR)
|
||||
bch2_inconsistent_error(c);
|
||||
}
|
||||
|
||||
static bool __bch2_snapshot_is_ancestor_early(struct bch_fs *c, struct snapshot_table *t,
|
||||
u32 id, u32 ancestor)
|
||||
static bool __bch2_snapshot_is_ancestor_early(struct snapshot_table *t, u32 id, u32 ancestor)
|
||||
{
|
||||
while (id && id < ancestor) {
|
||||
const struct snapshot_t *s = __snapshot_t(c, t, id);
|
||||
const struct snapshot_t *s = __snapshot_t(t, id);
|
||||
id = s ? s->parent : 0;
|
||||
}
|
||||
return id == ancestor;
|
||||
@ -113,15 +104,15 @@ static bool __bch2_snapshot_is_ancestor_early(struct bch_fs *c, struct snapshot_
|
||||
static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
|
||||
{
|
||||
rcu_read_lock();
|
||||
bool ret = __bch2_snapshot_is_ancestor_early(c, rcu_dereference(c->snapshots), id, ancestor);
|
||||
bool ret = __bch2_snapshot_is_ancestor_early(rcu_dereference(c->snapshots), id, ancestor);
|
||||
rcu_read_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline u32 get_ancestor_below(struct bch_fs *c, struct snapshot_table *t, u32 id, u32 ancestor)
|
||||
static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ancestor)
|
||||
{
|
||||
const struct snapshot_t *s = __snapshot_t(c, t, id);
|
||||
const struct snapshot_t *s = __snapshot_t(t, id);
|
||||
if (!s)
|
||||
return 0;
|
||||
|
||||
@ -134,9 +125,9 @@ static inline u32 get_ancestor_below(struct bch_fs *c, struct snapshot_table *t,
|
||||
return s->parent;
|
||||
}
|
||||
|
||||
static bool test_ancestor_bitmap(struct bch_fs *c, struct snapshot_table *t, u32 id, u32 ancestor)
|
||||
static bool test_ancestor_bitmap(struct snapshot_table *t, u32 id, u32 ancestor)
|
||||
{
|
||||
const struct snapshot_t *s = __snapshot_t(c, t, id);
|
||||
const struct snapshot_t *s = __snapshot_t(t, id);
|
||||
if (!s)
|
||||
return false;
|
||||
|
||||
@ -151,18 +142,18 @@ bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
|
||||
struct snapshot_table *t = rcu_dereference(c->snapshots);
|
||||
|
||||
if (unlikely(c->recovery_pass_done < BCH_RECOVERY_PASS_check_snapshots)) {
|
||||
ret = __bch2_snapshot_is_ancestor_early(c, t, id, ancestor);
|
||||
ret = __bch2_snapshot_is_ancestor_early(t, id, ancestor);
|
||||
goto out;
|
||||
}
|
||||
|
||||
while (id && id < ancestor - IS_ANCESTOR_BITMAP)
|
||||
id = get_ancestor_below(c, t, id, ancestor);
|
||||
id = get_ancestor_below(t, id, ancestor);
|
||||
|
||||
ret = id && id < ancestor
|
||||
? test_ancestor_bitmap(c, t, id, ancestor)
|
||||
? test_ancestor_bitmap(t, id, ancestor)
|
||||
: id == ancestor;
|
||||
|
||||
EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(c, t, id, ancestor));
|
||||
EBUG_ON(ret != __bch2_snapshot_is_ancestor_early(t, id, ancestor));
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -330,7 +321,6 @@ static int __bch2_mark_snapshot(struct btree_trans *trans,
|
||||
t->children[1] = le32_to_cpu(s.v->children[1]);
|
||||
t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
|
||||
t->tree = le32_to_cpu(s.v->tree);
|
||||
t->equiv = id;
|
||||
|
||||
if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
|
||||
t->depth = le32_to_cpu(s.v->depth);
|
||||
@ -1052,6 +1042,25 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_check_key_has_snapshot(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
if (fsck_err_on(!bch2_snapshot_equiv(c, k.k->p.snapshot), c,
|
||||
bkey_in_missing_snapshot,
|
||||
"key in missing snapshot %s, delete?",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_internal_snapshot_node) ?: 1;
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark a snapshot as deleted, for future cleanup:
|
||||
*/
|
||||
@ -1361,35 +1370,39 @@ int bch2_snapshot_node_create(struct btree_trans *trans, u32 parent,
|
||||
* that key to snapshot leaf nodes, where we can mutate it
|
||||
*/
|
||||
|
||||
static int snapshot_delete_key(struct btree_trans *trans,
|
||||
static int delete_dead_snapshots_process_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k,
|
||||
snapshot_id_list *deleted,
|
||||
snapshot_id_list *equiv_seen,
|
||||
struct bpos *last_pos)
|
||||
{
|
||||
int ret = bch2_check_key_has_snapshot(trans, iter, k);
|
||||
if (ret)
|
||||
return ret < 0 ? ret : 0;
|
||||
|
||||
struct bch_fs *c = trans->c;
|
||||
u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
|
||||
if (!equiv) /* key for invalid snapshot node, but we chose not to delete */
|
||||
return 0;
|
||||
|
||||
if (!bkey_eq(k.k->p, *last_pos))
|
||||
equiv_seen->nr = 0;
|
||||
*last_pos = k.k->p;
|
||||
|
||||
if (snapshot_list_has_id(deleted, k.k->p.snapshot) ||
|
||||
snapshot_list_has_id(equiv_seen, equiv)) {
|
||||
if (snapshot_list_has_id(deleted, k.k->p.snapshot))
|
||||
return bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
} else {
|
||||
return snapshot_list_add(c, equiv_seen, equiv);
|
||||
}
|
||||
}
|
||||
|
||||
static int move_key_to_correct_snapshot(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
u32 equiv = bch2_snapshot_equiv(c, k.k->p.snapshot);
|
||||
if (!bpos_eq(*last_pos, k.k->p) &&
|
||||
snapshot_list_has_id(equiv_seen, equiv))
|
||||
return bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_internal_snapshot_node);
|
||||
|
||||
*last_pos = k.k->p;
|
||||
|
||||
ret = snapshot_list_add_nodup(c, equiv_seen, equiv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* When we have a linear chain of snapshot nodes, we consider
|
||||
@ -1399,21 +1412,20 @@ static int move_key_to_correct_snapshot(struct btree_trans *trans,
|
||||
*
|
||||
* If there are multiple keys in different snapshots at the same
|
||||
* position, we're only going to keep the one in the newest
|
||||
* snapshot - the rest have been overwritten and are redundant,
|
||||
* and for the key we're going to keep we need to move it to the
|
||||
* equivalance class ID if it's not there already.
|
||||
* snapshot (we delete the others above) - the rest have been
|
||||
* overwritten and are redundant, and for the key we're going to keep we
|
||||
* need to move it to the equivalance class ID if it's not there
|
||||
* already.
|
||||
*/
|
||||
if (equiv != k.k->p.snapshot) {
|
||||
struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
|
||||
struct btree_iter new_iter;
|
||||
int ret;
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(new);
|
||||
int ret = PTR_ERR_OR_ZERO(new);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
new->k.p.snapshot = equiv;
|
||||
|
||||
struct btree_iter new_iter;
|
||||
bch2_trans_iter_init(trans, &new_iter, iter->btree_id, new->k.p,
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_cached|
|
||||
@ -1548,7 +1560,6 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
struct btree_trans *trans;
|
||||
snapshot_id_list deleted = { 0 };
|
||||
snapshot_id_list deleted_interior = { 0 };
|
||||
u32 id;
|
||||
int ret = 0;
|
||||
|
||||
if (!test_and_clear_bit(BCH_FS_need_delete_dead_snapshots, &c->flags))
|
||||
@ -1595,33 +1606,20 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
for (id = 0; id < BTREE_ID_NR; id++) {
|
||||
for (unsigned btree = 0; btree < BTREE_ID_NR; btree++) {
|
||||
struct bpos last_pos = POS_MIN;
|
||||
snapshot_id_list equiv_seen = { 0 };
|
||||
struct disk_reservation res = { 0 };
|
||||
|
||||
if (!btree_type_has_snapshots(id))
|
||||
continue;
|
||||
|
||||
/*
|
||||
* deleted inodes btree is maintained by a trigger on the inodes
|
||||
* btree - no work for us to do here, and it's not safe to scan
|
||||
* it because we'll see out of date keys due to the btree write
|
||||
* buffer:
|
||||
*/
|
||||
if (id == BTREE_ID_deleted_inodes)
|
||||
if (!btree_type_has_snapshots(btree))
|
||||
continue;
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter,
|
||||
id, POS_MIN,
|
||||
btree, POS_MIN,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
snapshot_delete_key(trans, &iter, k, &deleted, &equiv_seen, &last_pos)) ?:
|
||||
for_each_btree_key_commit(trans, iter,
|
||||
id, POS_MIN,
|
||||
BTREE_ITER_prefetch|BTREE_ITER_all_snapshots, k,
|
||||
&res, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
move_key_to_correct_snapshot(trans, &iter, k));
|
||||
delete_dead_snapshots_process_key(trans, &iter, k, &deleted,
|
||||
&equiv_seen, &last_pos));
|
||||
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
darray_exit(&equiv_seen);
|
||||
|
@ -32,7 +32,7 @@ int bch2_mark_snapshot(struct btree_trans *, enum btree_id, unsigned,
|
||||
.min_val_size = 24, \
|
||||
})
|
||||
|
||||
static inline struct snapshot_t *__snapshot_t_noerror(struct snapshot_table *t, u32 id)
|
||||
static inline struct snapshot_t *__snapshot_t(struct snapshot_table *t, u32 id)
|
||||
{
|
||||
u32 idx = U32_MAX - id;
|
||||
|
||||
@ -41,26 +41,9 @@ static inline struct snapshot_t *__snapshot_t_noerror(struct snapshot_table *t,
|
||||
: NULL;
|
||||
}
|
||||
|
||||
void bch2_invalid_snapshot_id(struct bch_fs *, u32);
|
||||
|
||||
static inline struct snapshot_t *__snapshot_t(struct bch_fs *c, struct snapshot_table *t, u32 id)
|
||||
{
|
||||
struct snapshot_t *s = __snapshot_t_noerror(t, id);
|
||||
if (unlikely(!s || !s->equiv)) {
|
||||
bch2_invalid_snapshot_id(c, id);
|
||||
s = NULL;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
static inline const struct snapshot_t *snapshot_t_noerror(struct bch_fs *c, u32 id)
|
||||
{
|
||||
return __snapshot_t_noerror(rcu_dereference(c->snapshots), id);
|
||||
}
|
||||
|
||||
static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
|
||||
{
|
||||
return __snapshot_t(c, rcu_dereference(c->snapshots), id);
|
||||
return __snapshot_t(rcu_dereference(c->snapshots), id);
|
||||
}
|
||||
|
||||
static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
|
||||
@ -259,6 +242,7 @@ int bch2_snapshot_node_create(struct btree_trans *, u32,
|
||||
int bch2_check_snapshot_trees(struct bch_fs *);
|
||||
int bch2_check_snapshots(struct bch_fs *);
|
||||
int bch2_reconstruct_snapshots(struct bch_fs *);
|
||||
int bch2_check_key_has_snapshot(struct btree_trans *, struct btree_iter *, struct bkey_s_c);
|
||||
|
||||
int bch2_snapshot_node_set_deleted(struct btree_trans *, u32);
|
||||
void bch2_delete_dead_snapshots_work(struct work_struct *);
|
||||
|
@ -1132,18 +1132,12 @@ bool bch2_check_version_downgrade(struct bch_fs *c)
|
||||
* c->sb will be checked before we write the superblock, so update it as
|
||||
* well:
|
||||
*/
|
||||
if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current) {
|
||||
if (BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb) > bcachefs_metadata_version_current)
|
||||
SET_BCH_SB_VERSION_UPGRADE_COMPLETE(c->disk_sb.sb, bcachefs_metadata_version_current);
|
||||
c->sb.version_upgrade_complete = bcachefs_metadata_version_current;
|
||||
}
|
||||
if (c->sb.version > bcachefs_metadata_version_current) {
|
||||
if (c->sb.version > bcachefs_metadata_version_current)
|
||||
c->disk_sb.sb->version = cpu_to_le16(bcachefs_metadata_version_current);
|
||||
c->sb.version = bcachefs_metadata_version_current;
|
||||
}
|
||||
if (c->sb.version_min > bcachefs_metadata_version_current) {
|
||||
if (c->sb.version_min > bcachefs_metadata_version_current)
|
||||
c->disk_sb.sb->version_min = cpu_to_le16(bcachefs_metadata_version_current);
|
||||
c->sb.version_min = bcachefs_metadata_version_current;
|
||||
}
|
||||
c->disk_sb.sb->compat[0] &= cpu_to_le64((1ULL << BCH_COMPAT_NR) - 1);
|
||||
return ret;
|
||||
}
|
||||
|
@ -564,7 +564,7 @@ static void __bch2_fs_free(struct bch_fs *c)
|
||||
BUG_ON(atomic_read(&c->journal_keys.ref));
|
||||
bch2_fs_btree_write_buffer_exit(c);
|
||||
percpu_free_rwsem(&c->mark_lock);
|
||||
EBUG_ON(percpu_u64_get(c->online_reserved));
|
||||
EBUG_ON(c->online_reserved && percpu_u64_get(c->online_reserved));
|
||||
free_percpu(c->online_reserved);
|
||||
|
||||
darray_exit(&c->btree_roots_extra);
|
||||
|
Loading…
Reference in New Issue
Block a user