mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to ee560a3929 bcachefs: Print version, options earlier in startup path
This commit is contained in:
parent
a7b0ba44f0
commit
44fc32e7ef
@ -1 +1 @@
|
||||
e14d7c7195b974bbaf400f9c3f2bdaa94fc8d372
|
||||
ee560a3929f32350ed7e04550ad009c58ab73d5e
|
||||
|
@ -134,31 +134,15 @@ static noinline int backpointer_mod_err(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *trans,
|
||||
struct bpos bucket,
|
||||
struct bkey_i_backpointer *bp_k,
|
||||
struct bch_backpointer bp,
|
||||
struct bkey_s_c orig_k,
|
||||
bool insert)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_i_backpointer *bp_k;
|
||||
struct btree_iter bp_iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer));
|
||||
ret = PTR_ERR_OR_ZERO(bp_k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bkey_backpointer_init(&bp_k->k_i);
|
||||
bp_k->k.p = bucket_pos_to_bp(c, bucket, bp.bucket_offset);
|
||||
bp_k->v = bp;
|
||||
|
||||
if (!insert) {
|
||||
bp_k->k.type = KEY_TYPE_deleted;
|
||||
set_bkey_val_u64s(&bp_k->k, 0);
|
||||
}
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &bp_iter, BTREE_ID_backpointers,
|
||||
bp_k->k.p,
|
||||
BTREE_ITER_INTENT|
|
||||
@ -477,7 +461,7 @@ missing:
|
||||
prt_printf(&buf, "\nbp pos ");
|
||||
bch2_bpos_to_text(&buf, bp_iter.pos);
|
||||
|
||||
if (c->sb.version < bcachefs_metadata_version_backpointers ||
|
||||
if (c->sb.version_upgrade_complete < bcachefs_metadata_version_backpointers ||
|
||||
c->opts.reconstruct_alloc ||
|
||||
fsck_err(c, "%s", buf.buf))
|
||||
ret = bch2_bucket_backpointer_mod(trans, bucket, bp, orig_k, true);
|
||||
|
@ -54,7 +54,7 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bpos,
|
||||
int bch2_bucket_backpointer_mod_nowritebuffer(struct btree_trans *, struct bkey_i_backpointer *,
|
||||
struct bch_backpointer, struct bkey_s_c, bool);
|
||||
|
||||
static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
|
||||
@ -67,9 +67,6 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
|
||||
struct bkey_i_backpointer *bp_k;
|
||||
int ret;
|
||||
|
||||
if (unlikely(bch2_backpointers_no_use_write_buffer))
|
||||
return bch2_bucket_backpointer_mod_nowritebuffer(trans, bucket, bp, orig_k, insert);
|
||||
|
||||
bp_k = bch2_trans_kmalloc_nomemzero(trans, sizeof(struct bkey_i_backpointer));
|
||||
ret = PTR_ERR_OR_ZERO(bp_k);
|
||||
if (ret)
|
||||
@ -84,6 +81,9 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
|
||||
set_bkey_val_u64s(&bp_k->k, 0);
|
||||
}
|
||||
|
||||
if (unlikely(bch2_backpointers_no_use_write_buffer))
|
||||
return bch2_bucket_backpointer_mod_nowritebuffer(trans, bp_k, bp, orig_k, insert);
|
||||
|
||||
return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k->k_i);
|
||||
}
|
||||
|
||||
|
@ -563,7 +563,6 @@ enum {
|
||||
BCH_FS_CLEAN_SHUTDOWN,
|
||||
|
||||
/* fsck passes: */
|
||||
BCH_FS_TOPOLOGY_REPAIR_DONE,
|
||||
BCH_FS_FSCK_DONE,
|
||||
BCH_FS_INITIAL_GC_UNFIXED, /* kill when we enumerate fsck errors */
|
||||
BCH_FS_NEED_ANOTHER_GC,
|
||||
@ -666,6 +665,7 @@ enum bch_write_ref {
|
||||
x(stripes_read, PASS_ALWAYS) \
|
||||
x(initialize_subvolumes, 0) \
|
||||
x(snapshots_read, PASS_ALWAYS) \
|
||||
x(check_topology, 0) \
|
||||
x(check_allocations, PASS_FSCK) \
|
||||
x(set_may_go_rw, PASS_ALWAYS|PASS_SILENT) \
|
||||
x(journal_replay, PASS_ALWAYS) \
|
||||
@ -677,11 +677,11 @@ enum bch_write_ref {
|
||||
x(check_alloc_to_lru_refs, PASS_FSCK) \
|
||||
x(fs_freespace_init, PASS_ALWAYS|PASS_SILENT) \
|
||||
x(bucket_gens_init, 0) \
|
||||
x(fs_upgrade_for_subvolumes, 0) \
|
||||
x(check_snapshot_trees, PASS_FSCK) \
|
||||
x(check_snapshots, PASS_FSCK) \
|
||||
x(check_subvols, PASS_FSCK) \
|
||||
x(delete_dead_snapshots, PASS_FSCK|PASS_UNCLEAN|PASS_SILENT) \
|
||||
x(delete_dead_snapshots, PASS_FSCK|PASS_UNCLEAN) \
|
||||
x(fs_upgrade_for_subvolumes, 0) \
|
||||
x(check_inodes, PASS_FSCK|PASS_UNCLEAN) \
|
||||
x(check_extents, PASS_FSCK) \
|
||||
x(check_dirents, PASS_FSCK) \
|
||||
@ -1179,6 +1179,19 @@ static inline bool bch2_dev_exists2(const struct bch_fs *c, unsigned dev)
|
||||
return dev < c->sb.nr_devices && c->devs[dev];
|
||||
}
|
||||
|
||||
/*
|
||||
* For when we need to rewind recovery passes and run a pass we skipped:
|
||||
*/
|
||||
static inline int bch2_run_explicit_recovery_pass(struct bch_fs *c,
|
||||
enum bch_recovery_pass pass)
|
||||
{
|
||||
BUG_ON(c->curr_recovery_pass < pass);
|
||||
|
||||
c->recovery_passes_explicit |= BIT_ULL(pass);
|
||||
c->curr_recovery_pass = pass;
|
||||
return -BCH_ERR_restart_recovery;
|
||||
}
|
||||
|
||||
#define BKEY_PADDED_ONSTACK(key, pad) \
|
||||
struct { struct bkey_i key; __u64 key ## _pad[pad]; }
|
||||
|
||||
|
@ -97,6 +97,7 @@ static inline int bch2_mark_key(struct btree_trans *trans,
|
||||
enum btree_update_flags {
|
||||
__BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE = __BTREE_ITER_FLAGS_END,
|
||||
__BTREE_UPDATE_NOJOURNAL,
|
||||
__BTREE_UPDATE_PREJOURNAL,
|
||||
__BTREE_UPDATE_KEY_CACHE_RECLAIM,
|
||||
|
||||
__BTREE_TRIGGER_NORUN, /* Don't run triggers at all */
|
||||
@ -111,6 +112,7 @@ enum btree_update_flags {
|
||||
|
||||
#define BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE (1U << __BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE)
|
||||
#define BTREE_UPDATE_NOJOURNAL (1U << __BTREE_UPDATE_NOJOURNAL)
|
||||
#define BTREE_UPDATE_PREJOURNAL (1U << __BTREE_UPDATE_PREJOURNAL)
|
||||
#define BTREE_UPDATE_KEY_CACHE_RECLAIM (1U << __BTREE_UPDATE_KEY_CACHE_RECLAIM)
|
||||
|
||||
#define BTREE_TRIGGER_NORUN (1U << __BTREE_TRIGGER_NORUN)
|
||||
|
@ -40,6 +40,12 @@
|
||||
#define DROP_THIS_NODE 10
|
||||
#define DROP_PREV_NODE 11
|
||||
|
||||
static bool should_restart_for_topology_repair(struct bch_fs *c)
|
||||
{
|
||||
return c->opts.fix_errors != FSCK_FIX_no &&
|
||||
!(c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology));
|
||||
}
|
||||
|
||||
static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
|
||||
{
|
||||
preempt_disable();
|
||||
@ -96,9 +102,9 @@ static int bch2_gc_check_topology(struct bch_fs *c,
|
||||
" cur %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1.buf, buf2.buf) &&
|
||||
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
|
||||
should_restart_for_topology_repair(c)) {
|
||||
bch_info(c, "Halting mark and sweep to start topology repair pass");
|
||||
ret = -BCH_ERR_need_topology_repair;
|
||||
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
|
||||
goto err;
|
||||
} else {
|
||||
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
|
||||
@ -124,9 +130,9 @@ static int bch2_gc_check_topology(struct bch_fs *c,
|
||||
" expected %s",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1.buf, buf2.buf) &&
|
||||
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
|
||||
should_restart_for_topology_repair(c)) {
|
||||
bch_info(c, "Halting mark and sweep to start topology repair pass");
|
||||
ret = -BCH_ERR_need_topology_repair;
|
||||
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
|
||||
goto err;
|
||||
} else {
|
||||
set_bit(BCH_FS_INITIAL_GC_UNFIXED, &c->flags);
|
||||
@ -520,7 +526,7 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_repair_topology(struct bch_fs *c)
|
||||
int bch2_check_topology(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree *b;
|
||||
@ -969,9 +975,9 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
b->c.level - 1,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
|
||||
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags)) {
|
||||
ret = -BCH_ERR_need_topology_repair;
|
||||
should_restart_for_topology_repair(c)) {
|
||||
bch_info(c, "Halting mark and sweep to start topology repair pass");
|
||||
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
|
||||
goto fsck_err;
|
||||
} else {
|
||||
/* Continue marking when opted to not
|
||||
@ -1808,32 +1814,8 @@ again:
|
||||
|
||||
bch2_mark_superblocks(c);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
|
||||
(BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb) &&
|
||||
c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations &&
|
||||
c->opts.fix_errors != FSCK_FIX_no)) {
|
||||
bch_info(c, "Starting topology repair pass");
|
||||
ret = bch2_repair_topology(c);
|
||||
if (ret)
|
||||
goto out;
|
||||
bch_info(c, "Topology repair pass done");
|
||||
|
||||
set_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags);
|
||||
}
|
||||
|
||||
ret = bch2_gc_btrees(c, initial, metadata_only);
|
||||
|
||||
if (ret == -BCH_ERR_need_topology_repair &&
|
||||
!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags) &&
|
||||
c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_allocations) {
|
||||
set_bit(BCH_FS_NEED_ANOTHER_GC, &c->flags);
|
||||
SET_BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb, true);
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
if (ret == -BCH_ERR_need_topology_repair)
|
||||
ret = -BCH_ERR_fsck_errors_not_fixed;
|
||||
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#include "btree_types.h"
|
||||
|
||||
int bch2_check_topology(struct bch_fs *);
|
||||
int bch2_gc(struct bch_fs *, bool, bool);
|
||||
int bch2_gc_gens(struct bch_fs *);
|
||||
void bch2_gc_thread_stop(struct bch_fs *);
|
||||
|
@ -612,7 +612,7 @@ static int __btree_err(enum btree_err_type type,
|
||||
case BTREE_ERR_BAD_NODE:
|
||||
bch2_print_string_as_lines(KERN_ERR, out.buf);
|
||||
bch2_topology_error(c);
|
||||
ret = -BCH_ERR_need_topology_repair;
|
||||
ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
|
||||
break;
|
||||
case BTREE_ERR_INCOMPATIBLE:
|
||||
bch2_print_string_as_lines(KERN_ERR, out.buf);
|
||||
@ -1568,7 +1568,8 @@ void bch2_btree_node_read(struct bch_fs *c, struct btree *b,
|
||||
btree_pos_to_text(&buf, c, b);
|
||||
bch_err(c, "%s", buf.buf);
|
||||
|
||||
if (test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags))
|
||||
if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
|
||||
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
|
||||
bch2_fatal_error(c);
|
||||
|
||||
set_btree_node_read_error(b);
|
||||
|
@ -1080,7 +1080,7 @@ void bch2_btree_key_cache_exit(void)
|
||||
|
||||
int __init bch2_btree_key_cache_init(void)
|
||||
{
|
||||
bch2_key_cache = KMEM_CACHE(bkey_cached, 0);
|
||||
bch2_key_cache = KMEM_CACHE(bkey_cached, SLAB_RECLAIM_ACCOUNT);
|
||||
if (!bch2_key_cache)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -390,6 +390,7 @@ struct btree_insert_entry {
|
||||
u8 old_btree_u64s;
|
||||
struct bkey_i *k;
|
||||
struct btree_path *path;
|
||||
u64 seq;
|
||||
/* key being overwritten: */
|
||||
struct bkey old_k;
|
||||
const struct bch_val *old_v;
|
||||
|
@ -72,6 +72,8 @@ int bch2_btree_delete_range_trans(struct btree_trans *, enum btree_id,
|
||||
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
|
||||
struct bpos, struct bpos, unsigned, u64 *);
|
||||
|
||||
int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool);
|
||||
|
||||
int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
|
||||
struct btree *, unsigned);
|
||||
void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
|
||||
@ -111,6 +113,8 @@ int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
|
||||
|
||||
int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
|
||||
struct bkey_i *, enum btree_update_flags);
|
||||
int __must_check bch2_trans_update_seq(struct btree_trans *, u64, struct btree_iter *,
|
||||
struct bkey_i *, enum btree_update_flags);
|
||||
int __must_check bch2_trans_update_buffered(struct btree_trans *,
|
||||
enum btree_id, struct bkey_i *);
|
||||
|
||||
|
@ -66,7 +66,8 @@ static void verify_update_old_key(struct btree_trans *trans, struct btree_insert
|
||||
|
||||
static int __must_check
|
||||
bch2_trans_update_by_path(struct btree_trans *, struct btree_path *,
|
||||
struct bkey_i *, enum btree_update_flags);
|
||||
struct bkey_i *, enum btree_update_flags,
|
||||
unsigned long ip);
|
||||
|
||||
static inline int btree_insert_entry_cmp(const struct btree_insert_entry *l,
|
||||
const struct btree_insert_entry *r)
|
||||
@ -290,12 +291,6 @@ inline void bch2_btree_insert_key_leaf(struct btree_trans *trans,
|
||||
bch2_trans_node_reinit_iter(trans, b);
|
||||
}
|
||||
|
||||
static void btree_insert_key_leaf(struct btree_trans *trans,
|
||||
struct btree_insert_entry *insert)
|
||||
{
|
||||
bch2_btree_insert_key_leaf(trans, insert->path, insert->k, trans->journal_res.seq);
|
||||
}
|
||||
|
||||
/* Cached btree updates: */
|
||||
|
||||
/* Normal update interface: */
|
||||
@ -752,9 +747,14 @@ bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
|
||||
trans_for_each_update(trans, i) {
|
||||
i->k->k.needs_whiteout = false;
|
||||
|
||||
if (!i->cached)
|
||||
btree_insert_key_leaf(trans, i);
|
||||
else if (!i->key_cache_already_flushed)
|
||||
if (!i->cached) {
|
||||
u64 seq = trans->journal_res.seq;
|
||||
|
||||
if (i->flags & BTREE_UPDATE_PREJOURNAL)
|
||||
seq = i->seq;
|
||||
|
||||
bch2_btree_insert_key_leaf(trans, i->path, i->k, seq);
|
||||
} else if (!i->key_cache_already_flushed)
|
||||
bch2_btree_insert_key_cached(trans, flags, i);
|
||||
else {
|
||||
bch2_btree_key_cache_drop(trans, i->path);
|
||||
@ -1495,7 +1495,7 @@ int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
|
||||
ret = bch2_trans_update_by_path(trans, iter.path, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
||||
flags);
|
||||
flags, _RET_IP_);
|
||||
if (ret)
|
||||
goto err;
|
||||
goto out;
|
||||
@ -1533,11 +1533,6 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __must_check
|
||||
bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *path,
|
||||
struct bkey_i *k, enum btree_update_flags flags,
|
||||
unsigned long ip);
|
||||
|
||||
static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
struct btree_insert_entry *i,
|
||||
@ -1568,25 +1563,34 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
i->flags |= BTREE_TRIGGER_NORUN;
|
||||
|
||||
btree_path_set_should_be_locked(btree_path);
|
||||
ret = bch2_trans_update_by_path_trace(trans, btree_path, i->k, flags, ip);
|
||||
ret = bch2_trans_update_by_path(trans, btree_path, i->k, flags, ip);
|
||||
out:
|
||||
bch2_path_put(trans, btree_path, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __must_check
|
||||
bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *path,
|
||||
struct bkey_i *k, enum btree_update_flags flags,
|
||||
unsigned long ip)
|
||||
bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
|
||||
struct bkey_i *k, enum btree_update_flags flags,
|
||||
unsigned long ip)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i, n;
|
||||
u64 seq = 0;
|
||||
int cmp;
|
||||
|
||||
EBUG_ON(!path->should_be_locked);
|
||||
EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
|
||||
EBUG_ON(!bpos_eq(k->k.p, path->pos));
|
||||
|
||||
/*
|
||||
* The transaction journal res hasn't been allocated at this point.
|
||||
* That occurs at commit time. Reuse the seq field to pass in the seq
|
||||
* of a prejournaled key.
|
||||
*/
|
||||
if (flags & BTREE_UPDATE_PREJOURNAL)
|
||||
seq = trans->journal_res.seq;
|
||||
|
||||
n = (struct btree_insert_entry) {
|
||||
.flags = flags,
|
||||
.bkey_type = __btree_node_type(path->level, path->btree_id),
|
||||
@ -1595,6 +1599,7 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
|
||||
.cached = path->cached,
|
||||
.path = path,
|
||||
.k = k,
|
||||
.seq = seq,
|
||||
.ip_allocated = ip,
|
||||
};
|
||||
|
||||
@ -1622,6 +1627,7 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
|
||||
i->cached = n.cached;
|
||||
i->k = n.k;
|
||||
i->path = n.path;
|
||||
i->seq = n.seq;
|
||||
i->ip_allocated = n.ip_allocated;
|
||||
} else {
|
||||
array_insert_item(trans->updates, trans->nr_updates,
|
||||
@ -1656,13 +1662,6 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int __must_check
|
||||
bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
|
||||
struct bkey_i *k, enum btree_update_flags flags)
|
||||
{
|
||||
return bch2_trans_update_by_path_trace(trans, path, k, flags, _RET_IP_);
|
||||
}
|
||||
|
||||
int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_i *k, enum btree_update_flags flags)
|
||||
{
|
||||
@ -1723,7 +1722,19 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
|
||||
path = iter->key_cache_path;
|
||||
}
|
||||
|
||||
return bch2_trans_update_by_path(trans, path, k, flags);
|
||||
return bch2_trans_update_by_path(trans, path, k, flags, _RET_IP_);
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a transaction update for a key that has already been journaled.
|
||||
*/
|
||||
int __must_check bch2_trans_update_seq(struct btree_trans *trans, u64 seq,
|
||||
struct btree_iter *iter, struct bkey_i *k,
|
||||
enum btree_update_flags flags)
|
||||
{
|
||||
trans->journal_res.seq = seq;
|
||||
return bch2_trans_update(trans, iter, k, flags|BTREE_UPDATE_NOJOURNAL|
|
||||
BTREE_UPDATE_PREJOURNAL);
|
||||
}
|
||||
|
||||
int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
|
||||
@ -1985,6 +1996,24 @@ int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_btree_bit_mod(struct btree_trans *trans, enum btree_id btree,
|
||||
struct bpos pos, bool set)
|
||||
{
|
||||
struct bkey_i *k;
|
||||
int ret = 0;
|
||||
|
||||
k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
bkey_init(&k->k);
|
||||
k->k.type = set ? KEY_TYPE_set : KEY_TYPE_deleted;
|
||||
k->k.p = pos;
|
||||
|
||||
return bch2_trans_update_buffered(trans, btree, k);
|
||||
}
|
||||
|
||||
static int __bch2_trans_log_msg(darray_u64 *entries, const char *fmt, va_list args)
|
||||
{
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
@ -75,7 +75,7 @@ static int bch2_btree_write_buffer_flush_one(struct btree_trans *trans,
|
||||
}
|
||||
return 0;
|
||||
trans_commit:
|
||||
return bch2_trans_update(trans, iter, &wb->k, 0) ?:
|
||||
return bch2_trans_update_seq(trans, wb->journal_seq, iter, &wb->k, 0) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
commit_flags|
|
||||
BTREE_INSERT_NOCHECK_RW|
|
||||
@ -103,6 +103,32 @@ static union btree_write_buffer_state btree_write_buffer_switch(struct btree_wri
|
||||
return old;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update a btree with a write buffered key using the journal seq of the
|
||||
* original write buffer insert.
|
||||
*
|
||||
* It is not safe to rejournal the key once it has been inserted into the write
|
||||
* buffer because that may break recovery ordering. For example, the key may
|
||||
* have already been modified in the active write buffer in a seq that comes
|
||||
* before the current transaction. If we were to journal this key again and
|
||||
* crash, recovery would process updates in the wrong order.
|
||||
*/
|
||||
static int
|
||||
btree_write_buffered_insert(struct btree_trans *trans,
|
||||
struct btree_write_buffered_key *wb)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
int ret;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, wb->btree, bkey_start_pos(&wb->k.k),
|
||||
BTREE_ITER_CACHED|BTREE_ITER_INTENT);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update_seq(trans, wb->journal_seq, &iter, &wb->k, 0);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __bch2_btree_write_buffer_flush(struct btree_trans *trans, unsigned commit_flags,
|
||||
bool locked)
|
||||
{
|
||||
@ -238,7 +264,7 @@ slowpath:
|
||||
commit_flags|
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_JOURNAL_RECLAIM,
|
||||
__bch2_btree_insert(trans, i->btree, &i->k, 0));
|
||||
btree_write_buffered_insert(trans, i));
|
||||
if (bch2_fs_fatal_err_on(ret, c, "%s: insert error %s", __func__, bch2_err_str(ret)))
|
||||
break;
|
||||
}
|
||||
|
@ -102,18 +102,6 @@ void bch2_dev_usage_read_fast(struct bch_dev *ca, struct bch_dev_usage *usage)
|
||||
} while (read_seqcount_retry(&c->usage_lock, seq));
|
||||
}
|
||||
|
||||
static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
|
||||
unsigned journal_seq,
|
||||
bool gc)
|
||||
{
|
||||
percpu_rwsem_assert_held(&c->mark_lock);
|
||||
BUG_ON(!gc && !journal_seq);
|
||||
|
||||
return this_cpu_ptr(gc
|
||||
? c->usage_gc
|
||||
: c->usage[journal_seq & JOURNAL_BUF_MASK]);
|
||||
}
|
||||
|
||||
u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
|
||||
{
|
||||
ssize_t offset = v - (u64 *) c->usage_base;
|
||||
@ -460,7 +448,7 @@ static int __replicas_deltas_realloc(struct btree_trans *trans, unsigned more,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
|
||||
int bch2_replicas_deltas_realloc(struct btree_trans *trans, unsigned more)
|
||||
{
|
||||
return allocate_dropping_locks_errcode(trans,
|
||||
__replicas_deltas_realloc(trans, more, _gfp));
|
||||
@ -479,7 +467,7 @@ static inline int update_replicas_list(struct btree_trans *trans,
|
||||
return 0;
|
||||
|
||||
b = replicas_entry_bytes(r) + 8;
|
||||
ret = replicas_deltas_realloc(trans, b);
|
||||
ret = bch2_replicas_deltas_realloc(trans, b);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -1137,38 +1125,6 @@ int bch2_mark_stripe(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_mark_inode(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_fs_usage *fs_usage;
|
||||
u64 journal_seq = trans->journal_res.seq;
|
||||
|
||||
if (flags & BTREE_TRIGGER_INSERT) {
|
||||
struct bch_inode_v3 *v = (struct bch_inode_v3 *) new.v;
|
||||
|
||||
BUG_ON(!journal_seq);
|
||||
BUG_ON(new.k->type != KEY_TYPE_inode_v3);
|
||||
|
||||
v->bi_journal_seq = cpu_to_le64(journal_seq);
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
percpu_down_read(&c->mark_lock);
|
||||
preempt_disable();
|
||||
|
||||
fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
|
||||
fs_usage->nr_inodes += bkey_is_inode(new.k);
|
||||
fs_usage->nr_inodes -= bkey_is_inode(old.k);
|
||||
|
||||
preempt_enable();
|
||||
percpu_up_read(&c->mark_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_mark_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
@ -1272,7 +1228,7 @@ int bch2_mark_reflink_p(struct btree_trans *trans,
|
||||
|
||||
BUG_ON(!(flags & BTREE_TRIGGER_GC));
|
||||
|
||||
if (c->sb.version >= bcachefs_metadata_version_reflink_p_fix) {
|
||||
if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_reflink_p_fix) {
|
||||
idx -= le32_to_cpu(p.v->front_pad);
|
||||
end += le32_to_cpu(p.v->back_pad);
|
||||
}
|
||||
@ -1715,27 +1671,6 @@ int bch2_trans_mark_stripe(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_inode(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_i *new,
|
||||
unsigned flags)
|
||||
{
|
||||
int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
|
||||
|
||||
if (nr) {
|
||||
int ret = replicas_deltas_realloc(trans, 0);
|
||||
struct replicas_delta_list *d = trans->fs_usage_deltas;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
d->nr_inodes += nr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
@ -1754,7 +1689,7 @@ int bch2_trans_mark_reservation(struct btree_trans *trans,
|
||||
sectors = -sectors;
|
||||
sectors *= replicas;
|
||||
|
||||
ret = replicas_deltas_realloc(trans, 0);
|
||||
ret = bch2_replicas_deltas_realloc(trans, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -249,6 +249,20 @@ bch2_fs_usage_read_short(struct bch_fs *);
|
||||
|
||||
/* key/bucket marking: */
|
||||
|
||||
static inline struct bch_fs_usage *fs_usage_ptr(struct bch_fs *c,
|
||||
unsigned journal_seq,
|
||||
bool gc)
|
||||
{
|
||||
percpu_rwsem_assert_held(&c->mark_lock);
|
||||
BUG_ON(!gc && !journal_seq);
|
||||
|
||||
return this_cpu_ptr(gc
|
||||
? c->usage_gc
|
||||
: c->usage[journal_seq & JOURNAL_BUF_MASK]);
|
||||
}
|
||||
|
||||
int bch2_replicas_deltas_realloc(struct btree_trans *, unsigned);
|
||||
|
||||
void bch2_fs_usage_initialize(struct bch_fs *);
|
||||
|
||||
int bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
|
||||
@ -261,8 +275,6 @@ int bch2_mark_extent(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s_c, unsigned);
|
||||
int bch2_mark_stripe(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s_c, unsigned);
|
||||
int bch2_mark_inode(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s_c, unsigned);
|
||||
int bch2_mark_reservation(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s_c, unsigned);
|
||||
int bch2_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned,
|
||||
@ -270,7 +282,6 @@ int bch2_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned,
|
||||
|
||||
int bch2_trans_mark_extent(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
|
||||
int bch2_trans_mark_stripe(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
|
||||
int bch2_trans_mark_inode(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
|
||||
int bch2_trans_mark_reservation(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
|
||||
int bch2_trans_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
|
||||
|
||||
|
@ -240,7 +240,8 @@ int bch2_bio_uncompress_inplace(struct bch_fs *c, struct bio *bio,
|
||||
data = __bounce_alloc(c, dst_len, WRITE);
|
||||
|
||||
if (__bio_uncompress(c, bio, data.b, *crc)) {
|
||||
bch_err(c, "error rewriting existing data: decompression error");
|
||||
if (!c->opts.no_data_io)
|
||||
bch_err(c, "error rewriting existing data: decompression error");
|
||||
bio_unmap_or_unbounce(c, data);
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -157,8 +157,7 @@
|
||||
x(BCH_ERR_fsck, fsck_errors_not_fixed) \
|
||||
x(BCH_ERR_fsck, fsck_repair_unimplemented) \
|
||||
x(BCH_ERR_fsck, fsck_repair_impossible) \
|
||||
x(0, need_snapshot_cleanup) \
|
||||
x(0, need_topology_repair) \
|
||||
x(0, restart_recovery) \
|
||||
x(0, unwritten_extent_update) \
|
||||
x(EINVAL, device_state_not_allowed) \
|
||||
x(EINVAL, member_info_missing) \
|
||||
@ -171,6 +170,7 @@
|
||||
x(EINVAL, device_already_online) \
|
||||
x(EINVAL, insufficient_devices_to_start) \
|
||||
x(EINVAL, invalid) \
|
||||
x(EINVAL, internal_fsck_err) \
|
||||
x(EROFS, erofs_trans_commit) \
|
||||
x(EROFS, erofs_no_writes) \
|
||||
x(EROFS, erofs_journal_err) \
|
||||
|
@ -27,9 +27,6 @@ bool bch2_inconsistent_error(struct bch_fs *c)
|
||||
|
||||
void bch2_topology_error(struct bch_fs *c)
|
||||
{
|
||||
if (!test_bit(BCH_FS_TOPOLOGY_REPAIR_DONE, &c->flags))
|
||||
return;
|
||||
|
||||
set_bit(BCH_FS_TOPOLOGY_ERROR, &c->flags);
|
||||
if (test_bit(BCH_FS_FSCK_DONE, &c->flags))
|
||||
bch2_inconsistent_error(c);
|
||||
|
@ -1926,7 +1926,7 @@ int __init bch2_vfs_init(void)
|
||||
{
|
||||
int ret = -ENOMEM;
|
||||
|
||||
bch2_inode_cache = KMEM_CACHE(bch_inode_info, 0);
|
||||
bch2_inode_cache = KMEM_CACHE(bch_inode_info, SLAB_RECLAIM_ACCOUNT);
|
||||
if (!bch2_inode_cache)
|
||||
goto err;
|
||||
|
||||
|
@ -471,28 +471,6 @@ static inline void snapshots_seen_init(struct snapshots_seen *s)
|
||||
memset(s, 0, sizeof(*s));
|
||||
}
|
||||
|
||||
static int snapshots_seen_add(struct bch_fs *c, struct snapshots_seen *s, u32 id)
|
||||
{
|
||||
struct snapshots_seen_entry *i, n = { id, id };
|
||||
int ret;
|
||||
|
||||
darray_for_each(s->ids, i) {
|
||||
if (n.equiv < i->equiv)
|
||||
break;
|
||||
|
||||
if (i->equiv == n.equiv) {
|
||||
bch_err(c, "%s(): adding duplicate snapshot", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = darray_insert_item(&s->ids, i - s->ids.data, n);
|
||||
if (ret)
|
||||
bch_err(c, "error reallocating snapshots_seen table (size %zu)",
|
||||
s->ids.size);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
|
||||
enum btree_id btree_id, struct bpos pos)
|
||||
{
|
||||
@ -505,27 +483,31 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
|
||||
if (!bkey_eq(s->pos, pos))
|
||||
s->ids.nr = 0;
|
||||
|
||||
pos.snapshot = n.equiv;
|
||||
s->pos = pos;
|
||||
s->pos.snapshot = n.equiv;
|
||||
|
||||
darray_for_each(s->ids, i)
|
||||
if (i->equiv == n.equiv) {
|
||||
if (fsck_err_on(i->id != n.id, c,
|
||||
"snapshot deletion did not run correctly:\n"
|
||||
" duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
|
||||
bch2_btree_ids[btree_id],
|
||||
pos.inode, pos.offset,
|
||||
i->id, n.id, n.equiv))
|
||||
return -BCH_ERR_need_snapshot_cleanup;
|
||||
|
||||
darray_for_each(s->ids, i) {
|
||||
if (i->id == n.id)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We currently don't rigorously track for snapshot cleanup
|
||||
* needing to be run, so it shouldn't be a fsck error yet:
|
||||
*/
|
||||
if (i->equiv == n.equiv) {
|
||||
bch_err(c, "snapshot deletion did not finish:\n"
|
||||
" duplicate keys in btree %s at %llu:%llu snapshots %u, %u (equiv %u)\n",
|
||||
bch2_btree_ids[btree_id],
|
||||
pos.inode, pos.offset,
|
||||
i->id, n.id, n.equiv);
|
||||
return bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_delete_dead_snapshots);
|
||||
}
|
||||
}
|
||||
|
||||
ret = darray_push(&s->ids, n);
|
||||
if (ret)
|
||||
bch_err(c, "error reallocating snapshots_seen table (size %zu)",
|
||||
s->ids.size);
|
||||
fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -541,13 +523,13 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see
|
||||
ssize_t i;
|
||||
u32 top = seen->ids.nr ? seen->ids.data[seen->ids.nr - 1].equiv : 0;
|
||||
|
||||
BUG_ON(id > ancestor);
|
||||
BUG_ON(!bch2_snapshot_is_equiv(c, id));
|
||||
BUG_ON(!bch2_snapshot_is_equiv(c, ancestor));
|
||||
EBUG_ON(id > ancestor);
|
||||
EBUG_ON(!bch2_snapshot_is_equiv(c, id));
|
||||
EBUG_ON(!bch2_snapshot_is_equiv(c, ancestor));
|
||||
|
||||
/* @ancestor should be the snapshot most recently added to @seen */
|
||||
BUG_ON(ancestor != seen->pos.snapshot);
|
||||
BUG_ON(ancestor != top);
|
||||
EBUG_ON(ancestor != seen->pos.snapshot);
|
||||
EBUG_ON(ancestor != top);
|
||||
|
||||
if (id == ancestor)
|
||||
return true;
|
||||
@ -555,11 +537,20 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see
|
||||
if (!bch2_snapshot_is_ancestor(c, id, ancestor))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* We know that @id is a descendant of @ancestor, we're checking if
|
||||
* we've seen a key that overwrote @ancestor - i.e. also a descendent of
|
||||
* @ascestor and with @id as a descendent.
|
||||
*
|
||||
* But we already know that we're scanning IDs between @id and @ancestor
|
||||
* numerically, since snapshot ID lists are kept sorted, so if we find
|
||||
* an id that's an ancestor of @id we're done:
|
||||
*/
|
||||
|
||||
for (i = seen->ids.nr - 2;
|
||||
i >= 0 && seen->ids.data[i].equiv >= id;
|
||||
--i)
|
||||
if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i].equiv) &&
|
||||
bch2_snapshot_is_ancestor(c, seen->ids.data[i].equiv, ancestor))
|
||||
if (bch2_snapshot_is_ancestor(c, id, seen->ids.data[i].equiv))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
@ -606,12 +597,14 @@ static int ref_visible2(struct bch_fs *c,
|
||||
struct inode_walker_entry {
|
||||
struct bch_inode_unpacked inode;
|
||||
u32 snapshot;
|
||||
bool seen_this_pos;
|
||||
u64 count;
|
||||
};
|
||||
|
||||
struct inode_walker {
|
||||
bool first_this_inode;
|
||||
u64 cur_inum;
|
||||
bool recalculate_sums;
|
||||
struct bpos last_pos;
|
||||
|
||||
DARRAY(struct inode_walker_entry) inodes;
|
||||
};
|
||||
@ -648,9 +641,7 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
|
||||
u32 restart_count = trans->restart_count;
|
||||
int ret;
|
||||
|
||||
if (w->cur_inum == inum)
|
||||
return 0;
|
||||
|
||||
w->recalculate_sums = false;
|
||||
w->inodes.nr = 0;
|
||||
|
||||
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, inum),
|
||||
@ -666,8 +657,7 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
w->cur_inum = inum;
|
||||
w->first_this_inode = true;
|
||||
w->first_this_inode = true;
|
||||
|
||||
if (trans_was_restarted(trans, restart_count))
|
||||
return -BCH_ERR_transaction_restart_nested;
|
||||
@ -676,8 +666,8 @@ static int get_inodes_all_snapshots(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
static struct inode_walker_entry *
|
||||
lookup_inode_for_snapshot(struct bch_fs *c,
|
||||
struct inode_walker *w, u32 snapshot)
|
||||
lookup_inode_for_snapshot(struct bch_fs *c, struct inode_walker *w,
|
||||
u32 snapshot, bool is_whiteout)
|
||||
{
|
||||
struct inode_walker_entry *i;
|
||||
|
||||
@ -691,35 +681,50 @@ lookup_inode_for_snapshot(struct bch_fs *c,
|
||||
found:
|
||||
BUG_ON(snapshot > i->snapshot);
|
||||
|
||||
if (snapshot != i->snapshot) {
|
||||
if (snapshot != i->snapshot && !is_whiteout) {
|
||||
struct inode_walker_entry new = *i;
|
||||
size_t pos;
|
||||
int ret;
|
||||
|
||||
new.snapshot = snapshot;
|
||||
new.count = 0;
|
||||
|
||||
bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u",
|
||||
w->cur_inum, snapshot, i->snapshot);
|
||||
w->last_pos.inode, snapshot, i->snapshot);
|
||||
|
||||
while (i > w->inodes.data && i[-1].snapshot > snapshot)
|
||||
--i;
|
||||
|
||||
ret = darray_insert_item(&w->inodes, i - w->inodes.data, new);
|
||||
pos = i - w->inodes.data;
|
||||
ret = darray_insert_item(&w->inodes, pos, new);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
i = w->inodes.data + pos;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
|
||||
struct inode_walker *w, struct bpos pos)
|
||||
struct inode_walker *w, struct bpos pos,
|
||||
bool is_whiteout)
|
||||
{
|
||||
int ret = get_inodes_all_snapshots(trans, w, pos.inode);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
if (w->last_pos.inode != pos.inode) {
|
||||
int ret = get_inodes_all_snapshots(trans, w, pos.inode);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
} else if (bkey_cmp(w->last_pos, pos)) {
|
||||
struct inode_walker_entry *i;
|
||||
|
||||
return lookup_inode_for_snapshot(trans->c, w, pos.snapshot);
|
||||
darray_for_each(w->inodes, i)
|
||||
i->seen_this_pos = false;
|
||||
|
||||
}
|
||||
|
||||
w->last_pos = pos;
|
||||
|
||||
return lookup_inode_for_snapshot(trans->c, w, pos.snapshot, is_whiteout);
|
||||
}
|
||||
|
||||
static int __get_visible_inodes(struct btree_trans *trans,
|
||||
@ -1034,47 +1039,6 @@ int bch2_check_inodes(struct bch_fs *c)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Checking for overlapping extents needs to be reimplemented
|
||||
*/
|
||||
#if 0
|
||||
static int fix_overlapping_extent(struct btree_trans *trans,
|
||||
struct bkey_s_c k, struct bpos cut_at)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_i *u;
|
||||
int ret;
|
||||
|
||||
u = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bkey_reassemble(u, k);
|
||||
bch2_cut_front(cut_at, u);
|
||||
|
||||
|
||||
/*
|
||||
* We don't want to go through the extent_handle_overwrites path:
|
||||
*
|
||||
* XXX: this is going to screw up disk accounting, extent triggers
|
||||
* assume things about extent overwrites - we should be running the
|
||||
* triggers manually here
|
||||
*/
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents, u->k.p,
|
||||
BTREE_ITER_INTENT|BTREE_ITER_NOT_EXTENTS);
|
||||
|
||||
BUG_ON(iter.flags & BTREE_ITER_IS_EXTENTS);
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, u, BTREE_TRIGGER_NORUN) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_LAZY_RW);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bpos pos)
|
||||
@ -1128,19 +1092,20 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
|
||||
if (i->inode.bi_sectors == i->count)
|
||||
continue;
|
||||
|
||||
count2 = bch2_count_inode_sectors(trans, w->cur_inum, i->snapshot);
|
||||
count2 = bch2_count_inode_sectors(trans, w->last_pos.inode, i->snapshot);
|
||||
|
||||
if (w->recalculate_sums)
|
||||
i->count = count2;
|
||||
|
||||
if (i->count != count2) {
|
||||
bch_err(c, "fsck counted i_sectors wrong: got %llu should be %llu",
|
||||
i->count, count2);
|
||||
i->count = count2;
|
||||
if (i->inode.bi_sectors == i->count)
|
||||
continue;
|
||||
bch_err(c, "fsck counted i_sectors wrong for inode %llu:%u: got %llu should be %llu",
|
||||
w->last_pos.inode, i->snapshot, i->count, count2);
|
||||
return -BCH_ERR_internal_fsck_err;
|
||||
}
|
||||
|
||||
if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SECTORS_DIRTY), c,
|
||||
"inode %llu:%u has incorrect i_sectors: got %llu, should be %llu",
|
||||
w->cur_inum, i->snapshot,
|
||||
w->last_pos.inode, i->snapshot,
|
||||
i->inode.bi_sectors, i->count)) {
|
||||
i->inode.bi_sectors = i->count;
|
||||
ret = write_inode(trans, &i->inode, i->snapshot);
|
||||
@ -1162,85 +1127,40 @@ struct extent_end {
|
||||
struct snapshots_seen seen;
|
||||
};
|
||||
|
||||
typedef DARRAY(struct extent_end) extent_ends;
|
||||
struct extent_ends {
|
||||
struct bpos last_pos;
|
||||
DARRAY(struct extent_end) e;
|
||||
};
|
||||
|
||||
static int get_print_extent(struct btree_trans *trans, struct bpos pos, struct printbuf *out)
|
||||
static void extent_ends_reset(struct extent_ends *extent_ends)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_extents, pos,
|
||||
BTREE_ITER_SLOTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_NOT_EXTENTS);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bch2_bkey_val_to_text(out, trans->c, k);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_overlapping_extents(struct btree_trans *trans,
|
||||
struct snapshots_seen *seen,
|
||||
extent_ends *extent_ends,
|
||||
struct bkey_s_c k,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct extent_end *i;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret = 0;
|
||||
|
||||
darray_for_each(*extent_ends, i) {
|
||||
/* duplicate, due to transaction restart: */
|
||||
if (i->offset == k.k->p.offset &&
|
||||
i->snapshot == k.k->p.snapshot)
|
||||
continue;
|
||||
darray_for_each(extent_ends->e, i)
|
||||
snapshots_seen_exit(&i->seen);
|
||||
|
||||
if (!ref_visible2(c,
|
||||
k.k->p.snapshot, seen,
|
||||
i->snapshot, &i->seen))
|
||||
continue;
|
||||
|
||||
if (i->offset <= bkey_start_offset(k.k))
|
||||
continue;
|
||||
|
||||
printbuf_reset(&buf);
|
||||
prt_str(&buf, "overlapping extents:\n ");
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
prt_str(&buf, "\n ");
|
||||
|
||||
ret = get_print_extent(trans, SPOS(k.k->p.inode, i->offset, i->snapshot), &buf);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (fsck_err(c, "%s", buf.buf)) {
|
||||
struct bkey_i *update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
bkey_reassemble(update, k);
|
||||
ret = bch2_trans_update_extent(trans, iter, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
err:
|
||||
fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
extent_ends->e.nr = 0;
|
||||
}
|
||||
|
||||
static int extent_ends_at(extent_ends *extent_ends,
|
||||
static void extent_ends_exit(struct extent_ends *extent_ends)
|
||||
{
|
||||
extent_ends_reset(extent_ends);
|
||||
darray_exit(&extent_ends->e);
|
||||
}
|
||||
|
||||
static void extent_ends_init(struct extent_ends *extent_ends)
|
||||
{
|
||||
memset(extent_ends, 0, sizeof(*extent_ends));
|
||||
}
|
||||
|
||||
static int extent_ends_at(struct bch_fs *c,
|
||||
struct extent_ends *extent_ends,
|
||||
struct snapshots_seen *seen,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct extent_end *i, n = (struct extent_end) {
|
||||
.snapshot = k.k->p.snapshot,
|
||||
.offset = k.k->p.offset,
|
||||
.snapshot = k.k->p.snapshot,
|
||||
.seen = *seen,
|
||||
};
|
||||
|
||||
@ -1250,7 +1170,7 @@ static int extent_ends_at(extent_ends *extent_ends,
|
||||
if (!n.seen.ids.data)
|
||||
return -BCH_ERR_ENOMEM_fsck_extent_ends_at;
|
||||
|
||||
darray_for_each(*extent_ends, i) {
|
||||
darray_for_each(extent_ends->e, i) {
|
||||
if (i->snapshot == k.k->p.snapshot) {
|
||||
snapshots_seen_exit(&i->seen);
|
||||
*i = n;
|
||||
@ -1261,136 +1181,238 @@ static int extent_ends_at(extent_ends *extent_ends,
|
||||
break;
|
||||
}
|
||||
|
||||
return darray_insert_item(extent_ends, i - extent_ends->data, n);
|
||||
return darray_insert_item(&extent_ends->e, i - extent_ends->e.data, n);
|
||||
}
|
||||
|
||||
static void extent_ends_reset(extent_ends *extent_ends)
|
||||
static int overlapping_extents_found(struct btree_trans *trans,
|
||||
enum btree_id btree,
|
||||
struct bpos pos1, struct bkey pos2,
|
||||
bool *fixed)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
u32 snapshot = min(pos1.snapshot, pos2.p.snapshot);
|
||||
int ret;
|
||||
|
||||
BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
|
||||
|
||||
prt_str(&buf, "\n ");
|
||||
bch2_bpos_to_text(&buf, pos1);
|
||||
prt_str(&buf, "\n ");
|
||||
|
||||
bch2_bkey_to_text(&buf, &pos2);
|
||||
prt_str(&buf, "\n ");
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree, SPOS(pos1.inode, pos1.offset - 1, snapshot), 0);
|
||||
k = bch2_btree_iter_peek_upto(&iter, POS(pos1.inode, U64_MAX));
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
|
||||
if (!bpos_eq(pos1, k.k->p)) {
|
||||
bch_err(c, "%s: error finding first overlapping extent when repairing%s",
|
||||
__func__, buf.buf);
|
||||
ret = -BCH_ERR_internal_fsck_err;
|
||||
goto err;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
bch2_btree_iter_advance(&iter);
|
||||
|
||||
k = bch2_btree_iter_peek_upto(&iter, POS(pos1.inode, U64_MAX));
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (bkey_ge(k.k->p, pos2.p))
|
||||
break;
|
||||
|
||||
}
|
||||
|
||||
prt_str(&buf, "\n ");
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
|
||||
if (bkey_gt(k.k->p, pos2.p) ||
|
||||
pos2.size != k.k->size) {
|
||||
bch_err(c, "%s: error finding seconding overlapping extent when repairing%s",
|
||||
__func__, buf.buf);
|
||||
ret = -BCH_ERR_internal_fsck_err;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (fsck_err(c, "overlapping extents%s", buf.buf)) {
|
||||
struct bpos update_pos = pos1.snapshot < pos2.p.snapshot ? pos1 : pos2.p;
|
||||
struct btree_iter update_iter;
|
||||
|
||||
struct bkey_i *update = bch2_bkey_get_mut(trans, &update_iter,
|
||||
btree, update_pos,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
bch2_trans_iter_exit(trans, &update_iter);
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
|
||||
*fixed = true;
|
||||
}
|
||||
fsck_err:
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int check_overlapping_extents(struct btree_trans *trans,
|
||||
struct snapshots_seen *seen,
|
||||
struct extent_ends *extent_ends,
|
||||
struct bkey_s_c k,
|
||||
u32 equiv,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct extent_end *i;
|
||||
bool fixed = false;
|
||||
int ret = 0;
|
||||
|
||||
darray_for_each(*extent_ends, i)
|
||||
snapshots_seen_exit(&i->seen);
|
||||
/* transaction restart, running again */
|
||||
if (bpos_eq(extent_ends->last_pos, k.k->p))
|
||||
return 0;
|
||||
|
||||
extent_ends->nr = 0;
|
||||
if (extent_ends->last_pos.inode != k.k->p.inode)
|
||||
extent_ends_reset(extent_ends);
|
||||
|
||||
darray_for_each(extent_ends->e, i) {
|
||||
if (i->offset <= bkey_start_offset(k.k))
|
||||
continue;
|
||||
|
||||
if (!ref_visible2(c,
|
||||
k.k->p.snapshot, seen,
|
||||
i->snapshot, &i->seen))
|
||||
continue;
|
||||
|
||||
ret = overlapping_extents_found(trans, iter->btree_id,
|
||||
SPOS(iter->pos.inode,
|
||||
i->offset,
|
||||
i->snapshot),
|
||||
*k.k, &fixed);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = extent_ends_at(c, extent_ends, seen, k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
extent_ends->last_pos = k.k->p;
|
||||
err:
|
||||
return ret ?: fixed;
|
||||
}
|
||||
|
||||
static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_s_c k,
|
||||
struct inode_walker *inode,
|
||||
struct snapshots_seen *s,
|
||||
extent_ends *extent_ends)
|
||||
struct extent_ends *extent_ends)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct inode_walker_entry *i;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
struct bpos equiv;
|
||||
struct bpos equiv = k.k->p;
|
||||
int ret = 0;
|
||||
|
||||
equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
|
||||
|
||||
ret = check_key_has_snapshot(trans, iter, k);
|
||||
if (ret) {
|
||||
ret = ret < 0 ? ret : 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
equiv = k.k->p;
|
||||
equiv.snapshot = bch2_snapshot_equiv(c, k.k->p.snapshot);
|
||||
if (inode->last_pos.inode != k.k->p.inode) {
|
||||
ret = check_i_sectors(trans, inode);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
i = walk_inode(trans, inode, equiv, k.k->type == KEY_TYPE_whiteout);
|
||||
ret = PTR_ERR_OR_ZERO(i);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = snapshots_seen_update(c, s, iter->btree_id, k.k->p);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (k.k->type == KEY_TYPE_whiteout)
|
||||
goto out;
|
||||
if (k.k->type != KEY_TYPE_whiteout) {
|
||||
if (fsck_err_on(!i, c,
|
||||
"extent in missing inode:\n %s",
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
goto delete;
|
||||
|
||||
if (inode->cur_inum != k.k->p.inode) {
|
||||
ret = check_i_sectors(trans, inode);
|
||||
if (ret)
|
||||
if (fsck_err_on(i &&
|
||||
!S_ISREG(i->inode.bi_mode) &&
|
||||
!S_ISLNK(i->inode.bi_mode), c,
|
||||
"extent in non regular inode mode %o:\n %s",
|
||||
i->inode.bi_mode,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
|
||||
goto delete;
|
||||
|
||||
ret = check_overlapping_extents(trans, s, extent_ends, k,
|
||||
equiv.snapshot, iter);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
extent_ends_reset(extent_ends);
|
||||
}
|
||||
|
||||
BUG_ON(!iter->path->should_be_locked);
|
||||
|
||||
ret = check_overlapping_extents(trans, s, extent_ends, k, iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = extent_ends_at(extent_ends, s, k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
i = walk_inode(trans, inode, equiv);
|
||||
ret = PTR_ERR_OR_ZERO(i);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (fsck_err_on(!i, c,
|
||||
"extent in missing inode:\n %s",
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!i)
|
||||
goto out;
|
||||
|
||||
if (fsck_err_on(!S_ISREG(i->inode.bi_mode) &&
|
||||
!S_ISLNK(i->inode.bi_mode), c,
|
||||
"extent in non regular inode mode %o:\n %s",
|
||||
i->inode.bi_mode,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
ret = bch2_btree_delete_at(trans, iter,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
goto out;
|
||||
if (ret)
|
||||
inode->recalculate_sums = true;
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check inodes in reverse order, from oldest snapshots to newest, so
|
||||
* that we emit the fewest number of whiteouts necessary:
|
||||
* Check inodes in reverse order, from oldest snapshots to newest,
|
||||
* starting from the inode that matches this extent's snapshot. If we
|
||||
* didn't have one, iterate over all inodes:
|
||||
*/
|
||||
for (i = inode->inodes.data + inode->inodes.nr - 1;
|
||||
i >= inode->inodes.data;
|
||||
if (!i)
|
||||
i = inode->inodes.data + inode->inodes.nr - 1;
|
||||
|
||||
for (;
|
||||
inode->inodes.data && i >= inode->inodes.data;
|
||||
--i) {
|
||||
if (i->snapshot > equiv.snapshot ||
|
||||
!key_visible_in_snapshot(c, s, i->snapshot, equiv.snapshot))
|
||||
continue;
|
||||
|
||||
if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
|
||||
k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
|
||||
!bkey_extent_is_reservation(k), c,
|
||||
"extent type past end of inode %llu:%u, i_size %llu\n %s",
|
||||
i->inode.bi_inum, i->snapshot, i->inode.bi_size,
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
struct btree_iter iter2;
|
||||
if (k.k->type != KEY_TYPE_whiteout) {
|
||||
if (fsck_err_on(!(i->inode.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
|
||||
k.k->p.offset > round_up(i->inode.bi_size, block_bytes(c)) >> 9 &&
|
||||
!bkey_extent_is_reservation(k), c,
|
||||
"extent type past end of inode %llu:%u, i_size %llu\n %s",
|
||||
i->inode.bi_inum, i->snapshot, i->inode.bi_size,
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
struct btree_iter iter2;
|
||||
|
||||
bch2_trans_copy_iter(&iter2, iter);
|
||||
bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
|
||||
ret = bch2_btree_iter_traverse(&iter2) ?:
|
||||
bch2_btree_delete_at(trans, &iter2,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
bch2_trans_iter_exit(trans, &iter2);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (i->snapshot != equiv.snapshot) {
|
||||
ret = snapshots_seen_add(c, s, i->snapshot);
|
||||
bch2_trans_copy_iter(&iter2, iter);
|
||||
bch2_btree_iter_set_snapshot(&iter2, i->snapshot);
|
||||
ret = bch2_btree_iter_traverse(&iter2) ?:
|
||||
bch2_btree_delete_at(trans, &iter2,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
bch2_trans_iter_exit(trans, &iter2);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
iter->k.type = KEY_TYPE_whiteout;
|
||||
}
|
||||
|
||||
if (bkey_extent_is_allocation(k.k))
|
||||
i->count += k.k->size;
|
||||
}
|
||||
|
||||
i->seen_this_pos = true;
|
||||
}
|
||||
|
||||
if (bkey_extent_is_allocation(k.k))
|
||||
for_each_visible_inode(c, s, inode, equiv.snapshot, i)
|
||||
i->count += k.k->size;
|
||||
#if 0
|
||||
bch2_bkey_buf_reassemble(&prev, c, k);
|
||||
#endif
|
||||
|
||||
out:
|
||||
err:
|
||||
fsck_err:
|
||||
@ -1399,6 +1421,9 @@ fsck_err:
|
||||
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
delete:
|
||||
ret = bch2_btree_delete_at(trans, iter, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1412,11 +1437,12 @@ int bch2_check_extents(struct bch_fs *c)
|
||||
struct btree_trans trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
extent_ends extent_ends = { 0 };
|
||||
struct extent_ends extent_ends;
|
||||
struct disk_reservation res = { 0 };
|
||||
int ret = 0;
|
||||
|
||||
snapshots_seen_init(&s);
|
||||
extent_ends_init(&extent_ends);
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
|
||||
|
||||
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_extents,
|
||||
@ -1426,11 +1452,11 @@ int bch2_check_extents(struct bch_fs *c)
|
||||
BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL, ({
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
check_extent(&trans, &iter, k, &w, &s, &extent_ends);
|
||||
}));
|
||||
})) ?:
|
||||
check_i_sectors(&trans, &w);
|
||||
|
||||
bch2_disk_reservation_put(c, &res);
|
||||
extent_ends_reset(&extent_ends);
|
||||
darray_exit(&extent_ends);
|
||||
extent_ends_exit(&extent_ends);
|
||||
inode_walker_exit(&w);
|
||||
bch2_trans_exit(&trans);
|
||||
snapshots_seen_exit(&s);
|
||||
@ -1452,7 +1478,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
|
||||
if (i->inode.bi_nlink == i->count)
|
||||
continue;
|
||||
|
||||
count2 = bch2_count_subdirs(trans, w->cur_inum, i->snapshot);
|
||||
count2 = bch2_count_subdirs(trans, w->last_pos.inode, i->snapshot);
|
||||
if (count2 < 0)
|
||||
return count2;
|
||||
|
||||
@ -1466,7 +1492,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
|
||||
|
||||
if (fsck_err_on(i->inode.bi_nlink != i->count, c,
|
||||
"directory %llu:%u with wrong i_nlink: got %u, should be %llu",
|
||||
w->cur_inum, i->snapshot, i->inode.bi_nlink, i->count)) {
|
||||
w->last_pos.inode, i->snapshot, i->inode.bi_nlink, i->count)) {
|
||||
i->inode.bi_nlink = i->count;
|
||||
ret = write_inode(trans, &i->inode, i->snapshot);
|
||||
if (ret)
|
||||
@ -1630,7 +1656,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
if (k.k->type == KEY_TYPE_whiteout)
|
||||
goto out;
|
||||
|
||||
if (dir->cur_inum != k.k->p.inode) {
|
||||
if (dir->last_pos.inode != k.k->p.inode) {
|
||||
ret = check_subdir_count(trans, dir);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1638,7 +1664,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
|
||||
BUG_ON(!iter->path->should_be_locked);
|
||||
|
||||
i = walk_inode(trans, dir, equiv);
|
||||
i = walk_inode(trans, dir, equiv, k.k->type == KEY_TYPE_whiteout);
|
||||
ret = PTR_ERR_OR_ZERO(i);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
@ -1815,7 +1841,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i = walk_inode(trans, inode, k.k->p);
|
||||
i = walk_inode(trans, inode, k.k->p, k.k->type == KEY_TYPE_whiteout);
|
||||
ret = PTR_ERR_OR_ZERO(i);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -519,6 +519,59 @@ void bch2_inode_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
|
||||
__bch2_inode_unpacked_to_text(out, &inode);
|
||||
}
|
||||
|
||||
int bch2_trans_mark_inode(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_i *new,
|
||||
unsigned flags)
|
||||
{
|
||||
int nr = bkey_is_inode(&new->k) - bkey_is_inode(old.k);
|
||||
|
||||
if (nr) {
|
||||
int ret = bch2_replicas_deltas_realloc(trans, 0);
|
||||
struct replicas_delta_list *d = trans->fs_usage_deltas;
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
d->nr_inodes += nr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_mark_inode(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_fs_usage *fs_usage;
|
||||
u64 journal_seq = trans->journal_res.seq;
|
||||
|
||||
if (flags & BTREE_TRIGGER_INSERT) {
|
||||
struct bch_inode_v3 *v = (struct bch_inode_v3 *) new.v;
|
||||
|
||||
BUG_ON(!journal_seq);
|
||||
BUG_ON(new.k->type != KEY_TYPE_inode_v3);
|
||||
|
||||
v->bi_journal_seq = cpu_to_le64(journal_seq);
|
||||
}
|
||||
|
||||
if (flags & BTREE_TRIGGER_GC) {
|
||||
percpu_down_read(&c->mark_lock);
|
||||
preempt_disable();
|
||||
|
||||
fs_usage = fs_usage_ptr(c, journal_seq, flags & BTREE_TRIGGER_GC);
|
||||
fs_usage->nr_inodes += bkey_is_inode(new.k);
|
||||
fs_usage->nr_inodes -= bkey_is_inode(old.k);
|
||||
|
||||
preempt_enable();
|
||||
percpu_up_read(&c->mark_lock);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_inode_generation_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
||||
enum bkey_invalid_flags flags,
|
||||
struct printbuf *err)
|
||||
|
@ -16,6 +16,11 @@ int bch2_inode_v3_invalid(const struct bch_fs *, struct bkey_s_c,
|
||||
enum bkey_invalid_flags, struct printbuf *);
|
||||
void bch2_inode_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
|
||||
|
||||
int bch2_trans_mark_inode(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_i *, unsigned);
|
||||
int bch2_mark_inode(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_s_c, unsigned);
|
||||
|
||||
#define bch2_bkey_ops_inode ((struct bkey_ops) { \
|
||||
.key_invalid = bch2_inode_invalid, \
|
||||
.val_to_text = bch2_inode_to_text, \
|
||||
|
@ -1082,7 +1082,8 @@ static enum prep_encoded_ret {
|
||||
op->incompressible)) {
|
||||
if (!crc_is_compressed(op->crc) &&
|
||||
op->csum_type != op->crc.csum_type &&
|
||||
bch2_write_rechecksum(c, op, op->csum_type))
|
||||
bch2_write_rechecksum(c, op, op->csum_type) &&
|
||||
!c->opts.no_data_io)
|
||||
return PREP_ENCODED_CHECKSUM_ERR;
|
||||
|
||||
return PREP_ENCODED_DO_WRITE;
|
||||
@ -1102,7 +1103,7 @@ static enum prep_encoded_ret {
|
||||
csum = bch2_checksum_bio(c, op->crc.csum_type,
|
||||
extent_nonce(op->version, op->crc),
|
||||
bio);
|
||||
if (bch2_crc_cmp(op->crc.csum, csum))
|
||||
if (bch2_crc_cmp(op->crc.csum, csum) && !c->opts.no_data_io)
|
||||
return PREP_ENCODED_CHECKSUM_ERR;
|
||||
|
||||
if (bch2_bio_uncompress_inplace(c, bio, &op->crc))
|
||||
@ -1120,7 +1121,8 @@ static enum prep_encoded_ret {
|
||||
*/
|
||||
if ((op->crc.live_size != op->crc.uncompressed_size ||
|
||||
op->crc.csum_type != op->csum_type) &&
|
||||
bch2_write_rechecksum(c, op, op->csum_type))
|
||||
bch2_write_rechecksum(c, op, op->csum_type) &&
|
||||
!c->opts.no_data_io)
|
||||
return PREP_ENCODED_CHECKSUM_ERR;
|
||||
|
||||
/*
|
||||
@ -2416,7 +2418,8 @@ static void __bch2_read_endio(struct work_struct *work)
|
||||
if (ret)
|
||||
goto decrypt_err;
|
||||
|
||||
if (bch2_bio_uncompress(c, src, dst, dst_iter, crc))
|
||||
if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) &&
|
||||
!c->opts.no_data_io)
|
||||
goto decompression_err;
|
||||
} else {
|
||||
/* don't need to decrypt the entire bio: */
|
||||
|
@ -41,28 +41,12 @@ void bch2_lru_pos_to_text(struct printbuf *out, struct bpos lru)
|
||||
}
|
||||
|
||||
static int __bch2_lru_set(struct btree_trans *trans, u16 lru_id,
|
||||
u64 dev_bucket, u64 time, unsigned key_type)
|
||||
u64 dev_bucket, u64 time, bool set)
|
||||
{
|
||||
struct bkey_i *k;
|
||||
int ret = 0;
|
||||
|
||||
if (!time)
|
||||
return 0;
|
||||
|
||||
k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
bkey_init(&k->k);
|
||||
k->k.type = key_type;
|
||||
k->k.p = lru_pos(lru_id, dev_bucket, time);
|
||||
|
||||
EBUG_ON(lru_pos_id(k->k.p) != lru_id);
|
||||
EBUG_ON(lru_pos_time(k->k.p) != time);
|
||||
EBUG_ON(k->k.p.offset != dev_bucket);
|
||||
|
||||
return bch2_trans_update_buffered(trans, BTREE_ID_lru, k);
|
||||
return time
|
||||
? bch2_btree_bit_mod(trans, BTREE_ID_lru,
|
||||
lru_pos(lru_id, dev_bucket, time), set)
|
||||
: 0;
|
||||
}
|
||||
|
||||
int bch2_lru_del(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time)
|
||||
|
@ -5,13 +5,6 @@
|
||||
#define LRU_TIME_BITS 48
|
||||
#define LRU_TIME_MAX ((1ULL << LRU_TIME_BITS) - 1)
|
||||
|
||||
static inline struct bpos lru_pos(u16 lru_id, u64 dev_bucket, u64 time)
|
||||
{
|
||||
EBUG_ON(time > LRU_TIME_MAX);
|
||||
|
||||
return POS(((u64) lru_id << LRU_TIME_BITS)|time, dev_bucket);
|
||||
}
|
||||
|
||||
static inline u64 lru_pos_id(struct bpos pos)
|
||||
{
|
||||
return pos.inode >> LRU_TIME_BITS;
|
||||
@ -22,6 +15,18 @@ static inline u64 lru_pos_time(struct bpos pos)
|
||||
return pos.inode & ~(~0ULL << LRU_TIME_BITS);
|
||||
}
|
||||
|
||||
static inline struct bpos lru_pos(u16 lru_id, u64 dev_bucket, u64 time)
|
||||
{
|
||||
struct bpos pos = POS(((u64) lru_id << LRU_TIME_BITS)|time, dev_bucket);
|
||||
|
||||
EBUG_ON(time > LRU_TIME_MAX);
|
||||
EBUG_ON(lru_pos_id(pos) != lru_id);
|
||||
EBUG_ON(lru_pos_time(pos) != time);
|
||||
EBUG_ON(pos.offset != dev_bucket);
|
||||
|
||||
return pos;
|
||||
}
|
||||
|
||||
#define BCH_LRU_TYPES() \
|
||||
x(read) \
|
||||
x(fragmentation)
|
||||
|
@ -562,7 +562,7 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans,
|
||||
int ret;
|
||||
|
||||
ret = bch2_snapshot_tree_lookup(trans,
|
||||
snapshot_t(c, k.k->p.snapshot)->tree, &s_t);
|
||||
bch2_snapshot_tree(c, k.k->p.snapshot), &s_t);
|
||||
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
|
||||
"%s: snapshot tree %u not found", __func__,
|
||||
snapshot_t(c, k.k->p.snapshot)->tree);
|
||||
|
@ -1262,20 +1262,16 @@ static int bch2_run_recovery_pass(struct bch_fs *c, enum bch_recovery_pass pass)
|
||||
static int bch2_run_recovery_passes(struct bch_fs *c)
|
||||
{
|
||||
int ret = 0;
|
||||
again:
|
||||
|
||||
while (c->curr_recovery_pass < ARRAY_SIZE(recovery_passes)) {
|
||||
ret = bch2_run_recovery_pass(c, c->curr_recovery_pass);
|
||||
if (bch2_err_matches(ret, BCH_ERR_restart_recovery))
|
||||
continue;
|
||||
if (ret)
|
||||
break;
|
||||
c->curr_recovery_pass++;
|
||||
}
|
||||
|
||||
if (bch2_err_matches(ret, BCH_ERR_need_snapshot_cleanup)) {
|
||||
set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
|
||||
c->curr_recovery_pass = BCH_RECOVERY_PASS_delete_dead_snapshots;
|
||||
goto again;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1453,6 +1449,11 @@ use_clean:
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (c->opts.fsck &&
|
||||
(IS_ENABLED(CONFIG_BCACHEFS_DEBUG) ||
|
||||
BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb)))
|
||||
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_check_topology);
|
||||
|
||||
ret = bch2_run_recovery_passes(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -25,20 +25,25 @@ static inline u32 get_ancestor_below(struct snapshot_table *t, u32 id, u32 ances
|
||||
return s->parent;
|
||||
}
|
||||
|
||||
bool bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
|
||||
bool __bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
|
||||
{
|
||||
struct snapshot_table *t;
|
||||
bool ret;
|
||||
|
||||
EBUG_ON(c->curr_recovery_pass <= BCH_RECOVERY_PASS_check_snapshots);
|
||||
|
||||
rcu_read_lock();
|
||||
t = rcu_dereference(c->snapshots);
|
||||
|
||||
while (id && id < ancestor)
|
||||
while (id && id < ancestor - IS_ANCESTOR_BITMAP)
|
||||
id = get_ancestor_below(t, id, ancestor);
|
||||
|
||||
ret = id && id < ancestor
|
||||
? test_bit(ancestor - id - 1, __snapshot_t(t, id)->is_ancestor)
|
||||
: id == ancestor;
|
||||
rcu_read_unlock();
|
||||
|
||||
return id == ancestor;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool bch2_snapshot_is_ancestor_early(struct bch_fs *c, u32 id, u32 ancestor)
|
||||
@ -189,6 +194,13 @@ void bch2_snapshot_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
le32_to_cpu(s.v->children[1]),
|
||||
le32_to_cpu(s.v->subvol),
|
||||
le32_to_cpu(s.v->tree));
|
||||
|
||||
if (bkey_val_bytes(k.k) > offsetof(struct bch_snapshot, depth))
|
||||
prt_printf(out, " depth %u skiplist %u %u %u",
|
||||
le32_to_cpu(s.v->depth),
|
||||
le32_to_cpu(s.v->skip[0]),
|
||||
le32_to_cpu(s.v->skip[1]),
|
||||
le32_to_cpu(s.v->skip[2]));
|
||||
}
|
||||
|
||||
int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
||||
@ -263,11 +275,12 @@ int bch2_mark_snapshot(struct btree_trans *trans,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct snapshot_t *t;
|
||||
u32 id = new.k->p.offset;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&c->snapshot_table_lock);
|
||||
|
||||
t = snapshot_t_mut(c, new.k->p.offset);
|
||||
t = snapshot_t_mut(c, id);
|
||||
if (!t) {
|
||||
ret = -BCH_ERR_ENOMEM_mark_snapshot;
|
||||
goto err;
|
||||
@ -275,25 +288,36 @@ int bch2_mark_snapshot(struct btree_trans *trans,
|
||||
|
||||
if (new.k->type == KEY_TYPE_snapshot) {
|
||||
struct bkey_s_c_snapshot s = bkey_s_c_to_snapshot(new);
|
||||
u32 parent = id;
|
||||
|
||||
t->parent = le32_to_cpu(s.v->parent);
|
||||
t->skip[0] = le32_to_cpu(s.v->skip[0]);
|
||||
t->skip[1] = le32_to_cpu(s.v->skip[1]);
|
||||
t->skip[2] = le32_to_cpu(s.v->skip[2]);
|
||||
t->depth = le32_to_cpu(s.v->depth);
|
||||
t->children[0] = le32_to_cpu(s.v->children[0]);
|
||||
t->children[1] = le32_to_cpu(s.v->children[1]);
|
||||
t->subvol = BCH_SNAPSHOT_SUBVOL(s.v) ? le32_to_cpu(s.v->subvol) : 0;
|
||||
t->tree = le32_to_cpu(s.v->tree);
|
||||
|
||||
if (BCH_SNAPSHOT_DELETED(s.v))
|
||||
if (bkey_val_bytes(s.k) > offsetof(struct bch_snapshot, depth)) {
|
||||
t->depth = le32_to_cpu(s.v->depth);
|
||||
t->skip[0] = le32_to_cpu(s.v->skip[0]);
|
||||
t->skip[1] = le32_to_cpu(s.v->skip[1]);
|
||||
t->skip[2] = le32_to_cpu(s.v->skip[2]);
|
||||
} else {
|
||||
t->depth = 0;
|
||||
t->skip[0] = 0;
|
||||
t->skip[1] = 0;
|
||||
t->skip[2] = 0;
|
||||
}
|
||||
|
||||
while ((parent = bch2_snapshot_parent_early(c, parent)) &&
|
||||
parent - id - 1 < IS_ANCESTOR_BITMAP)
|
||||
__set_bit(parent - id - 1, t->is_ancestor);
|
||||
|
||||
if (BCH_SNAPSHOT_DELETED(s.v)) {
|
||||
set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
|
||||
c->recovery_passes_explicit |= BIT_ULL(BCH_RECOVERY_PASS_delete_dead_snapshots);
|
||||
}
|
||||
} else {
|
||||
t->parent = 0;
|
||||
t->children[0] = 0;
|
||||
t->children[1] = 0;
|
||||
t->subvol = 0;
|
||||
t->tree = 0;
|
||||
memset(t, 0, sizeof(*t));
|
||||
}
|
||||
err:
|
||||
mutex_unlock(&c->snapshot_table_lock);
|
||||
@ -573,7 +597,7 @@ static int snapshot_tree_ptr_good(struct btree_trans *trans,
|
||||
return bch2_snapshot_is_ancestor_early(trans->c, snap_id, le32_to_cpu(s_t.root_snapshot));
|
||||
}
|
||||
|
||||
static u32 snapshot_rand_ancestor_get(struct bch_fs *c, u32 id)
|
||||
static u32 snapshot_skiplist_get(struct bch_fs *c, u32 id)
|
||||
{
|
||||
const struct snapshot_t *s;
|
||||
|
||||
@ -589,8 +613,7 @@ static u32 snapshot_rand_ancestor_get(struct bch_fs *c, u32 id)
|
||||
return id;
|
||||
}
|
||||
|
||||
static int snapshot_rand_ancestor_good(struct btree_trans *trans,
|
||||
struct bch_snapshot s)
|
||||
static int snapshot_skiplist_good(struct btree_trans *trans, struct bch_snapshot s)
|
||||
{
|
||||
struct bch_snapshot a;
|
||||
unsigned i;
|
||||
@ -778,10 +801,10 @@ static int check_snapshot(struct btree_trans *trans,
|
||||
|
||||
real_depth = bch2_snapshot_depth(c, parent_id);
|
||||
|
||||
if (fsck_err_on(le32_to_cpu(s.depth) != real_depth, c,
|
||||
"snapshot with incorrect depth fields, should be %u:\n %s",
|
||||
real_depth,
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
if (le32_to_cpu(s.depth) != real_depth &&
|
||||
(c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
|
||||
fsck_err(c, "snapshot with incorrect depth field, should be %u:\n %s",
|
||||
real_depth, (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
|
||||
u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
@ -791,19 +814,21 @@ static int check_snapshot(struct btree_trans *trans,
|
||||
s = u->v;
|
||||
}
|
||||
|
||||
ret = snapshot_rand_ancestor_good(trans, s);
|
||||
ret = snapshot_skiplist_good(trans, s);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
if (fsck_err_on(!ret, c, "snapshot with bad rand_ancestor field:\n %s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
if (!ret &&
|
||||
(c->sb.version_upgrade_complete < bcachefs_metadata_version_snapshot_skiplists ||
|
||||
fsck_err(c, "snapshot with bad skiplist field:\n %s",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf)))) {
|
||||
u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
|
||||
u->v.skip[i] = cpu_to_le32(snapshot_rand_ancestor_get(c, parent_id));
|
||||
u->v.skip[i] = cpu_to_le32(snapshot_skiplist_get(c, parent_id));
|
||||
|
||||
bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_int);
|
||||
s = u->v;
|
||||
@ -1096,7 +1121,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
|
||||
n->v.depth = cpu_to_le32(depth);
|
||||
|
||||
for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
|
||||
n->v.skip[j] = cpu_to_le32(snapshot_rand_ancestor_get(c, parent));
|
||||
n->v.skip[j] = cpu_to_le32(snapshot_skiplist_get(c, parent));
|
||||
|
||||
bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_int);
|
||||
SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
|
||||
@ -1255,9 +1280,6 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
u32 i, id;
|
||||
int ret = 0;
|
||||
|
||||
if (!test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
|
||||
return 0;
|
||||
|
||||
if (!test_bit(BCH_FS_STARTED, &c->flags)) {
|
||||
ret = bch2_fs_read_write_early(c);
|
||||
if (ret) {
|
||||
@ -1352,7 +1374,8 @@ static void bch2_delete_dead_snapshots_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_fs *c = container_of(work, struct bch_fs, snapshot_delete_work);
|
||||
|
||||
bch2_delete_dead_snapshots(c);
|
||||
if (test_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags))
|
||||
bch2_delete_dead_snapshots(c);
|
||||
bch2_write_ref_put(c, BCH_WRITE_REF_delete_dead_snapshots);
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,15 @@ static inline const struct snapshot_t *snapshot_t(struct bch_fs *c, u32 id)
|
||||
return __snapshot_t(rcu_dereference(c->snapshots), id);
|
||||
}
|
||||
|
||||
static inline u32 bch2_snapshot_tree(struct bch_fs *c, u32 id)
|
||||
{
|
||||
rcu_read_lock();
|
||||
id = snapshot_t(c, id)->tree;
|
||||
rcu_read_unlock();
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
static inline u32 __bch2_snapshot_parent_early(struct bch_fs *c, u32 id)
|
||||
{
|
||||
return snapshot_t(c, id)->parent;
|
||||
@ -157,7 +166,14 @@ static inline u32 bch2_snapshot_sibling(struct bch_fs *c, u32 id)
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool bch2_snapshot_is_ancestor(struct bch_fs *, u32, u32);
|
||||
bool __bch2_snapshot_is_ancestor(struct bch_fs *, u32, u32);
|
||||
|
||||
static inline bool bch2_snapshot_is_ancestor(struct bch_fs *c, u32 id, u32 ancestor)
|
||||
{
|
||||
return id == ancestor
|
||||
? true
|
||||
: __bch2_snapshot_is_ancestor(c, id, ancestor);
|
||||
}
|
||||
|
||||
static inline bool bch2_snapshot_has_children(struct bch_fs *c, u32 id)
|
||||
{
|
||||
|
@ -6,6 +6,8 @@
|
||||
|
||||
typedef DARRAY(u32) snapshot_id_list;
|
||||
|
||||
#define IS_ANCESTOR_BITMAP 128
|
||||
|
||||
struct snapshot_t {
|
||||
u32 parent;
|
||||
u32 skip[3];
|
||||
@ -14,6 +16,7 @@ struct snapshot_t {
|
||||
u32 subvol; /* Nonzero only if a subvolume points to this node: */
|
||||
u32 tree;
|
||||
u32 equiv;
|
||||
unsigned long is_ancestor[BITS_TO_LONGS(IS_ANCESTOR_BITMAP)];
|
||||
};
|
||||
|
||||
struct snapshot_table {
|
||||
|
@ -418,6 +418,9 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
|
||||
SET_BCH_SB_JOURNAL_FLUSH_DELAY(sb, 1000);
|
||||
if (!BCH_SB_JOURNAL_RECLAIM_DELAY(sb))
|
||||
SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 1000);
|
||||
|
||||
if (!BCH_SB_VERSION_UPGRADE_COMPLETE(sb))
|
||||
SET_BCH_SB_VERSION_UPGRADE_COMPLETE(sb, le16_to_cpu(sb->version));
|
||||
}
|
||||
|
||||
for (opt_id = 0; opt_id < bch2_opts_nr; opt_id++) {
|
||||
@ -492,7 +495,7 @@ static void bch2_sb_update(struct bch_fs *c)
|
||||
c->sb.user_uuid = src->user_uuid;
|
||||
c->sb.version = le16_to_cpu(src->version);
|
||||
c->sb.version_min = le16_to_cpu(src->version_min);
|
||||
c->sb.version_upgrade_complete = BCH_SB_VERSION_UPGRADE_COMPLETE(src) ?: c->sb.version;
|
||||
c->sb.version_upgrade_complete = BCH_SB_VERSION_UPGRADE_COMPLETE(src);
|
||||
c->sb.nr_devices = src->nr_devices;
|
||||
c->sb.clean = BCH_SB_CLEAN(src);
|
||||
c->sb.encryption_type = BCH_SB_ENCRYPTION_TYPE(src);
|
||||
|
@ -885,7 +885,7 @@ static void print_mount_opts(struct bch_fs *c)
|
||||
struct printbuf p = PRINTBUF;
|
||||
bool first = true;
|
||||
|
||||
prt_str(&p, "mounted version ");
|
||||
prt_str(&p, "mounting version ");
|
||||
bch2_version_to_text(&p, c->sb.version);
|
||||
|
||||
if (c->opts.read_only) {
|
||||
@ -921,6 +921,8 @@ int bch2_fs_start(struct bch_fs *c)
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
print_mount_opts(c);
|
||||
|
||||
down_write(&c->state_lock);
|
||||
|
||||
BUG_ON(test_bit(BCH_FS_STARTED, &c->flags));
|
||||
@ -974,7 +976,6 @@ int bch2_fs_start(struct bch_fs *c)
|
||||
goto err;
|
||||
}
|
||||
|
||||
print_mount_opts(c);
|
||||
ret = 0;
|
||||
out:
|
||||
up_write(&c->state_lock);
|
||||
|
Loading…
Reference in New Issue
Block a user