mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-02 00:00:03 +03:00
Update bcachefs sources to ed2a5f4260 bcachefs: Add a missing bch2_btree_path_traverse() call
This commit is contained in:
parent
787768043d
commit
a06dee6da2
@ -1 +1 @@
|
||||
de3b30303e8a52dcbf738065efb4cf183fdbf1c1
|
||||
ed2a5f4260b65f3d613dcd76a97ac091bc88a126
|
||||
|
@ -1096,6 +1096,75 @@ TRACE_EVENT(trans_restart_key_cache_key_realloced,
|
||||
__entry->new_u64s)
|
||||
);
|
||||
|
||||
DECLARE_EVENT_CLASS(node_lock_fail,
|
||||
TP_PROTO(unsigned long trans_ip,
|
||||
unsigned long caller_ip,
|
||||
enum btree_id btree_id,
|
||||
struct bpos *pos,
|
||||
unsigned level, u32 iter_seq, struct btree *b, u32 node_seq),
|
||||
TP_ARGS(trans_ip, caller_ip, btree_id, pos,
|
||||
level, iter_seq, b, node_seq),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, trans_ip )
|
||||
__field(unsigned long, caller_ip )
|
||||
__field(u8, btree_id )
|
||||
__field(u64, pos_inode )
|
||||
__field(u64, pos_offset )
|
||||
__field(u32, pos_snapshot )
|
||||
__field(u32, level )
|
||||
__field(u32, iter_seq )
|
||||
__array(char, node, 24 )
|
||||
__field(u32, node_seq )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->trans_ip = trans_ip;
|
||||
__entry->caller_ip = caller_ip;
|
||||
__entry->btree_id = btree_id;
|
||||
__entry->pos_inode = pos->inode;
|
||||
__entry->pos_offset = pos->offset;
|
||||
__entry->pos_snapshot = pos->snapshot;
|
||||
__entry->level = level;
|
||||
__entry->iter_seq = iter_seq;
|
||||
if (IS_ERR(b))
|
||||
strscpy(__entry->node, bch2_err_str(PTR_ERR(b)), sizeof(__entry->node));
|
||||
else
|
||||
scnprintf(__entry->node, sizeof(__entry->node), "%px", b);
|
||||
__entry->node_seq = node_seq;
|
||||
),
|
||||
|
||||
TP_printk("%ps %pS btree %u pos %llu:%llu:%u level %u iter seq %u node %s node seq %u",
|
||||
(void *) __entry->trans_ip,
|
||||
(void *) __entry->caller_ip,
|
||||
__entry->btree_id,
|
||||
__entry->pos_inode,
|
||||
__entry->pos_offset,
|
||||
__entry->pos_snapshot,
|
||||
__entry->level, __entry->iter_seq,
|
||||
__entry->node, __entry->node_seq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(node_lock_fail, node_upgrade_fail,
|
||||
TP_PROTO(unsigned long trans_ip,
|
||||
unsigned long caller_ip,
|
||||
enum btree_id btree_id,
|
||||
struct bpos *pos,
|
||||
unsigned level, u32 iter_seq, struct btree *b, u32 node_seq),
|
||||
TP_ARGS(trans_ip, caller_ip, btree_id, pos,
|
||||
level, iter_seq, b, node_seq)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(node_lock_fail, node_relock_fail,
|
||||
TP_PROTO(unsigned long trans_ip,
|
||||
unsigned long caller_ip,
|
||||
enum btree_id btree_id,
|
||||
struct bpos *pos,
|
||||
unsigned level, u32 iter_seq, struct btree *b, u32 node_seq),
|
||||
TP_ARGS(trans_ip, caller_ip, btree_id, pos,
|
||||
level, iter_seq, b, node_seq)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_BCACHE_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@ -386,7 +386,6 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
|
||||
{
|
||||
struct bch_alloc_v4 _a;
|
||||
const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
|
||||
const struct bch_backpointer *bps;
|
||||
unsigned i;
|
||||
|
||||
prt_newline(out);
|
||||
@ -413,33 +412,41 @@ void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c
|
||||
prt_newline(out);
|
||||
prt_printf(out, "io_time[WRITE] %llu", a->io_time[WRITE]);
|
||||
prt_newline(out);
|
||||
prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a));
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
bps = alloc_v4_backpointers_c(a);
|
||||
for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a); i++) {
|
||||
if (k.k->type == KEY_TYPE_alloc_v4) {
|
||||
struct bkey_s_c_alloc_v4 a_raw = bkey_s_c_to_alloc_v4(k);
|
||||
const struct bch_backpointer *bps = alloc_v4_backpointers_c(a_raw.v);
|
||||
|
||||
prt_printf(out, "bp_start %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a_raw.v));
|
||||
prt_newline(out);
|
||||
bch2_backpointer_to_text(out, &bps[i]);
|
||||
|
||||
prt_printf(out, "backpointers: %llu", BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v));
|
||||
printbuf_indent_add(out, 2);
|
||||
|
||||
for (i = 0; i < BCH_ALLOC_V4_NR_BACKPOINTERS(a_raw.v); i++) {
|
||||
prt_newline(out);
|
||||
bch2_backpointer_to_text(out, &bps[i]);
|
||||
}
|
||||
|
||||
printbuf_indent_sub(out, 2);
|
||||
}
|
||||
|
||||
printbuf_indent_sub(out, 4);
|
||||
printbuf_indent_sub(out, 2);
|
||||
}
|
||||
|
||||
void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
|
||||
{
|
||||
if (k.k->type == KEY_TYPE_alloc_v4) {
|
||||
int d;
|
||||
void *src, *dst;
|
||||
|
||||
*out = *bkey_s_c_to_alloc_v4(k).v;
|
||||
|
||||
d = (int) BCH_ALLOC_V4_U64s -
|
||||
(int) (BCH_ALLOC_V4_BACKPOINTERS_START(out) ?: BCH_ALLOC_V4_U64s_V0);
|
||||
if (unlikely(d > 0)) {
|
||||
memset((u64 *) out + BCH_ALLOC_V4_BACKPOINTERS_START(out),
|
||||
0,
|
||||
d * sizeof(u64));
|
||||
SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
|
||||
}
|
||||
src = alloc_v4_backpointers(out);
|
||||
SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
|
||||
dst = alloc_v4_backpointers(out);
|
||||
|
||||
if (src < dst)
|
||||
memset(src, 0, dst - src);
|
||||
} else {
|
||||
struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
|
||||
|
||||
@ -465,20 +472,20 @@ static noinline struct bkey_i_alloc_v4 *
|
||||
__bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
|
||||
{
|
||||
struct bkey_i_alloc_v4 *ret;
|
||||
unsigned bytes = k.k->type == KEY_TYPE_alloc_v4
|
||||
? bkey_bytes(k.k)
|
||||
: sizeof(struct bkey_i_alloc_v4);
|
||||
|
||||
/*
|
||||
* Reserve space for one more backpointer here:
|
||||
* Not sketchy at doing it this way, nope...
|
||||
*/
|
||||
ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
|
||||
if (IS_ERR(ret))
|
||||
return ret;
|
||||
|
||||
if (k.k->type == KEY_TYPE_alloc_v4) {
|
||||
struct bch_backpointer *src, *dst;
|
||||
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
|
||||
unsigned bytes = sizeof(struct bkey_i_alloc_v4) +
|
||||
BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) *
|
||||
sizeof(struct bch_backpointer);
|
||||
void *src, *dst;
|
||||
|
||||
/*
|
||||
* Reserve space for one more backpointer here:
|
||||
* Not sketchy at doing it this way, nope...
|
||||
*/
|
||||
ret = bch2_trans_kmalloc(trans, bytes + sizeof(struct bch_backpointer));
|
||||
if (IS_ERR(ret))
|
||||
return ret;
|
||||
|
||||
bkey_reassemble(&ret->k_i, k);
|
||||
|
||||
@ -488,9 +495,15 @@ __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
|
||||
|
||||
memmove(dst, src, BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v) *
|
||||
sizeof(struct bch_backpointer));
|
||||
memset(src, 0, dst - src);
|
||||
if (src < dst)
|
||||
memset(src, 0, dst - src);
|
||||
set_alloc_v4_u64s(ret);
|
||||
} else {
|
||||
ret = bch2_trans_kmalloc(trans, sizeof(struct bkey_i_alloc_v4) +
|
||||
sizeof(struct bch_backpointer));
|
||||
if (IS_ERR(ret))
|
||||
return ret;
|
||||
|
||||
bkey_alloc_v4_init(&ret->k_i);
|
||||
ret->k.p = k.k->p;
|
||||
bch2_alloc_to_v4(k, &ret->v);
|
||||
@ -508,10 +521,8 @@ static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_
|
||||
*/
|
||||
struct bkey_i_alloc_v4 *ret =
|
||||
bch2_trans_kmalloc_nomemzero(trans, bkey_bytes(k.k) + sizeof(struct bch_backpointer));
|
||||
if (!IS_ERR(ret)) {
|
||||
if (!IS_ERR(ret))
|
||||
bkey_reassemble(&ret->k_i, k);
|
||||
memset((void *) ret + bkey_bytes(k.k), 0, sizeof(struct bch_backpointer));
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1244,7 +1255,15 @@ static int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (need_update) {
|
||||
ret = bch2_trans_update(trans, bucket_gens_iter, &g.k_i, 0);
|
||||
struct bkey_i *k = bch2_trans_kmalloc(trans, sizeof(g));
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
memcpy(k, &g, sizeof(g));
|
||||
|
||||
ret = bch2_trans_update(trans, bucket_gens_iter, k, 0);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -1370,7 +1389,7 @@ static int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
k = bch2_trans_kmalloc(trans, sizeof(g));
|
||||
ret = PTR_ERR_OR_ZERO(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out;
|
||||
|
||||
memcpy(k, &g, sizeof(g));
|
||||
ret = bch2_trans_update(trans, iter, k, 0);
|
||||
@ -1422,7 +1441,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
|
||||
&freespace_iter,
|
||||
&bucket_gens_iter);
|
||||
if (ret)
|
||||
break;
|
||||
goto bkey_err;
|
||||
} else {
|
||||
next = k.k->p;
|
||||
|
||||
|
@ -112,8 +112,6 @@ struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *, struct bkey_s
|
||||
|
||||
int bch2_bucket_io_time_reset(struct btree_trans *, unsigned, size_t, int);
|
||||
|
||||
#define ALLOC_SCAN_BATCH(ca) max_t(size_t, 1, (ca)->mi.nbuckets >> 9)
|
||||
|
||||
int bch2_alloc_v1_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
|
||||
int bch2_alloc_v2_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
|
||||
int bch2_alloc_v3_invalid(const struct bch_fs *, struct bkey_s_c, int, struct printbuf *);
|
||||
|
@ -261,11 +261,10 @@ btree:
|
||||
prt_printf(&buf, "for ");
|
||||
bch2_bkey_val_to_text(&buf, c, orig_k);
|
||||
|
||||
if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
|
||||
bch_err(c, "%s", buf.buf);
|
||||
} else {
|
||||
bch_err(c, "%s", buf.buf);
|
||||
if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
|
||||
bch2_inconsistent_error(c);
|
||||
ret = -EIO;
|
||||
bch2_trans_inconsistent(trans, "%s", buf.buf);
|
||||
}
|
||||
printbuf_exit(&buf);
|
||||
goto err;
|
||||
@ -283,7 +282,6 @@ int bch2_bucket_backpointer_add(struct btree_trans *trans,
|
||||
struct bkey_s_c orig_k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca;
|
||||
struct bch_backpointer *bps = alloc_v4_backpointers(&a->v);
|
||||
unsigned i, nr = BCH_ALLOC_V4_NR_BACKPOINTERS(&a->v);
|
||||
struct bkey_i_backpointer *bp_k;
|
||||
@ -317,11 +315,10 @@ int bch2_bucket_backpointer_add(struct btree_trans *trans,
|
||||
prt_printf(&buf, "for ");
|
||||
bch2_bkey_val_to_text(&buf, c, orig_k);
|
||||
|
||||
if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
|
||||
bch_err(c, "%s", buf.buf);
|
||||
else {
|
||||
bch2_trans_inconsistent(trans, "%s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
bch_err(c, "%s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
|
||||
bch2_inconsistent_error(c);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
@ -334,18 +331,9 @@ int bch2_bucket_backpointer_add(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
/* Overflow: use backpointer btree */
|
||||
bp_k = bch2_trans_kmalloc(trans, sizeof(*bp_k));
|
||||
ret = PTR_ERR_OR_ZERO(bp_k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, a->k.p.inode);
|
||||
|
||||
bkey_backpointer_init(&bp_k->k_i);
|
||||
bp_k->k.p = bucket_pos_to_bp(c, a->k.p, bp.bucket_offset);
|
||||
bp_k->v = bp;
|
||||
|
||||
bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers, bp_k->k.p,
|
||||
bch2_trans_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
|
||||
bucket_pos_to_bp(c, a->k.p, bp.bucket_offset),
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_SLOTS|
|
||||
BTREE_ITER_WITH_UPDATES);
|
||||
@ -369,16 +357,22 @@ int bch2_bucket_backpointer_add(struct btree_trans *trans,
|
||||
prt_printf(&buf, "for ");
|
||||
bch2_bkey_val_to_text(&buf, c, orig_k);
|
||||
|
||||
if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags))
|
||||
bch_err(c, "%s", buf.buf);
|
||||
else {
|
||||
bch2_trans_inconsistent(trans, "%s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
bch_err(c, "%s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
if (test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
|
||||
bch2_inconsistent_error(c);
|
||||
ret = -EIO;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
bp_k = bch2_bkey_alloc(trans, &bp_iter, backpointer);
|
||||
ret = PTR_ERR_OR_ZERO(bp_k);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bp_k->v = bp;
|
||||
|
||||
ret = bch2_trans_update(trans, &bp_iter, &bp_k->k_i, 0);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &bp_iter);
|
||||
|
@ -2077,6 +2077,11 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
iter->update_path, pos,
|
||||
iter->flags & BTREE_ITER_INTENT,
|
||||
_THIS_IP_);
|
||||
ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
|
||||
if (unlikely(ret)) {
|
||||
k = bkey_s_c_err(ret);
|
||||
goto out_no_locked;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -359,8 +359,18 @@ static inline bool btree_path_get_locks(struct btree_trans *trans,
|
||||
|
||||
if (!(upgrade
|
||||
? bch2_btree_node_upgrade(trans, path, l)
|
||||
: bch2_btree_node_relock(trans, path, l)))
|
||||
: bch2_btree_node_relock(trans, path, l))) {
|
||||
(upgrade
|
||||
? trace_node_upgrade_fail
|
||||
: trace_node_relock_fail)(0, _RET_IP_,
|
||||
path->btree_id, &path->pos,
|
||||
l, path->l[l].lock_seq,
|
||||
path->l[l].b,
|
||||
is_btree_node(path, l)
|
||||
? path->l[l].b->c.lock.state.seq
|
||||
: 0);
|
||||
fail_idx = l;
|
||||
}
|
||||
|
||||
l++;
|
||||
} while (l < path->locks_want);
|
||||
|
@ -233,7 +233,7 @@ struct btree_path {
|
||||
/* btree_iter_copy starts here: */
|
||||
struct bpos pos;
|
||||
|
||||
enum btree_id btree_id:4;
|
||||
enum btree_id btree_id:5;
|
||||
bool cached:1;
|
||||
bool preserve:1;
|
||||
enum btree_path_uptodate uptodate:2;
|
||||
@ -243,7 +243,7 @@ struct btree_path {
|
||||
*/
|
||||
bool should_be_locked:1;
|
||||
unsigned level:3,
|
||||
locks_want:4;
|
||||
locks_want:3;
|
||||
u8 nodes_locked;
|
||||
|
||||
struct btree_path_level {
|
||||
@ -277,7 +277,7 @@ struct btree_iter {
|
||||
struct btree_path *update_path;
|
||||
struct btree_path *key_cache_path;
|
||||
|
||||
enum btree_id btree_id:4;
|
||||
enum btree_id btree_id:8;
|
||||
unsigned min_depth:3;
|
||||
unsigned advanced:1;
|
||||
|
||||
@ -702,15 +702,6 @@ struct btree_root {
|
||||
s8 error;
|
||||
};
|
||||
|
||||
enum btree_insert_ret {
|
||||
BTREE_INSERT_OK,
|
||||
/* leaf node needs to be split */
|
||||
BTREE_INSERT_BTREE_NODE_FULL,
|
||||
BTREE_INSERT_NEED_MARK_REPLICAS,
|
||||
BTREE_INSERT_NEED_JOURNAL_RES,
|
||||
BTREE_INSERT_NEED_JOURNAL_RECLAIM,
|
||||
};
|
||||
|
||||
enum btree_gc_coalesce_fail_reason {
|
||||
BTREE_GC_COALESCE_FAIL_RESERVE_GET,
|
||||
BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
|
||||
|
@ -324,7 +324,7 @@ static __always_inline int bch2_trans_journal_res_get(struct btree_trans *trans,
|
||||
flags|
|
||||
(trans->flags & JOURNAL_WATERMARK_MASK));
|
||||
|
||||
return ret == -EAGAIN ? BTREE_INSERT_NEED_JOURNAL_RES : ret;
|
||||
return ret == -EAGAIN ? -BCH_ERR_btree_insert_need_journal_res : ret;
|
||||
}
|
||||
|
||||
#define JSET_ENTRY_LOG_U64s 4
|
||||
@ -343,23 +343,20 @@ static void journal_transaction_name(struct btree_trans *trans)
|
||||
strncpy(l->d, trans->fn, JSET_ENTRY_LOG_U64s * sizeof(u64));
|
||||
}
|
||||
|
||||
static inline enum btree_insert_ret
|
||||
btree_key_can_insert(struct btree_trans *trans,
|
||||
struct btree *b,
|
||||
unsigned u64s)
|
||||
static inline int btree_key_can_insert(struct btree_trans *trans,
|
||||
struct btree *b, unsigned u64s)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
if (!bch2_btree_node_insert_fits(c, b, u64s))
|
||||
return BTREE_INSERT_BTREE_NODE_FULL;
|
||||
return -BCH_ERR_btree_insert_btree_node_full;
|
||||
|
||||
return BTREE_INSERT_OK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum btree_insert_ret
|
||||
btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
unsigned u64s)
|
||||
static int btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
struct btree_path *path,
|
||||
unsigned u64s)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_cached *ck = (void *) path->l[0].b;
|
||||
@ -372,7 +369,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
if (!test_bit(BKEY_CACHED_DIRTY, &ck->flags) &&
|
||||
bch2_btree_key_cache_must_wait(c) &&
|
||||
!(trans->flags & BTREE_INSERT_JOURNAL_RECLAIM))
|
||||
return BTREE_INSERT_NEED_JOURNAL_RECLAIM;
|
||||
return -BCH_ERR_btree_insert_need_journal_reclaim;
|
||||
|
||||
/*
|
||||
* bch2_varint_decode can read past the end of the buffer by at most 7
|
||||
@ -381,7 +378,7 @@ btree_key_can_insert_cached(struct btree_trans *trans,
|
||||
u64s += 1;
|
||||
|
||||
if (u64s <= ck->u64s)
|
||||
return BTREE_INSERT_OK;
|
||||
return 0;
|
||||
|
||||
new_u64s = roundup_pow_of_two(u64s);
|
||||
new_k = krealloc(ck->k, new_u64s * sizeof(u64), GFP_NOFS);
|
||||
@ -671,7 +668,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
|
||||
|
||||
if (trans->fs_usage_deltas &&
|
||||
bch2_trans_fs_usage_apply(trans, trans->fs_usage_deltas))
|
||||
return BTREE_INSERT_NEED_MARK_REPLICAS;
|
||||
return -BCH_ERR_btree_insert_need_mark_replicas;
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) {
|
||||
@ -900,12 +897,12 @@ int bch2_trans_commit_error(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
switch (ret) {
|
||||
case BTREE_INSERT_BTREE_NODE_FULL:
|
||||
case -BCH_ERR_btree_insert_btree_node_full:
|
||||
ret = bch2_btree_split_leaf(trans, i->path, trans->flags);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
trace_and_count(c, trans_restart_btree_node_split, trans, trace_ip, i->path);
|
||||
break;
|
||||
case BTREE_INSERT_NEED_MARK_REPLICAS:
|
||||
case -BCH_ERR_btree_insert_need_mark_replicas:
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
ret = bch2_replicas_delta_list_mark(c, trans->fs_usage_deltas);
|
||||
@ -916,7 +913,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
|
||||
if (ret)
|
||||
trace_and_count(c, trans_restart_mark_replicas, trans, trace_ip);
|
||||
break;
|
||||
case BTREE_INSERT_NEED_JOURNAL_RES:
|
||||
case -BCH_ERR_btree_insert_need_journal_res:
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
if ((trans->flags & BTREE_INSERT_JOURNAL_RECLAIM) &&
|
||||
@ -933,7 +930,7 @@ int bch2_trans_commit_error(struct btree_trans *trans,
|
||||
if (ret)
|
||||
trace_and_count(c, trans_restart_journal_res_get, trans, trace_ip);
|
||||
break;
|
||||
case BTREE_INSERT_NEED_JOURNAL_RECLAIM:
|
||||
case -BCH_ERR_btree_insert_need_journal_reclaim:
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
|
||||
|
@ -53,6 +53,11 @@
|
||||
x(BCH_ERR_no_btree_node, no_btree_node_down) \
|
||||
x(BCH_ERR_no_btree_node, no_btree_node_init) \
|
||||
x(BCH_ERR_no_btree_node, no_btree_node_cached) \
|
||||
x(0, btree_insert_fail) \
|
||||
x(BCH_ERR_btree_insert_fail, btree_insert_btree_node_full) \
|
||||
x(BCH_ERR_btree_insert_fail, btree_insert_need_mark_replicas) \
|
||||
x(BCH_ERR_btree_insert_fail, btree_insert_need_journal_res) \
|
||||
x(BCH_ERR_btree_insert_fail, btree_insert_need_journal_reclaim) \
|
||||
x(0, backpointer_to_overwritten_btree_node) \
|
||||
x(0, lock_fail_root_changed) \
|
||||
x(0, journal_reclaim_would_deadlock) \
|
||||
|
@ -269,6 +269,8 @@ static int bch2_inode_unpack_v3(struct bkey_s_c k,
|
||||
static noinline int bch2_inode_unpack_slowpath(struct bkey_s_c k,
|
||||
struct bch_inode_unpacked *unpacked)
|
||||
{
|
||||
memset(unpacked, 0, sizeof(*unpacked));
|
||||
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_inode: {
|
||||
struct bkey_s_c_inode inode = bkey_s_c_to_inode(k);
|
||||
|
@ -129,12 +129,12 @@ search:
|
||||
if (!*idx)
|
||||
*idx = __bch2_journal_key_search(keys, btree_id, level, pos);
|
||||
|
||||
while (*idx < keys->nr &&
|
||||
(k = idx_to_key(keys, *idx),
|
||||
k->btree_id == btree_id &&
|
||||
k->level == level &&
|
||||
bpos_le(k->k->k.p, end_pos))) {
|
||||
if (bpos_ge(k->k->k.p, pos) && !k->overwritten)
|
||||
while ((k = *idx < keys->nr ? idx_to_key(keys, *idx) : NULL)) {
|
||||
if (__journal_key_cmp(btree_id, level, end_pos, k) < 0)
|
||||
return NULL;
|
||||
|
||||
if (__journal_key_cmp(btree_id, level, pos, k) <= 0 &&
|
||||
!k->overwritten)
|
||||
return k->k;
|
||||
|
||||
(*idx)++;
|
||||
@ -922,6 +922,7 @@ static bool btree_id_is_alloc(enum btree_id id)
|
||||
case BTREE_ID_backpointers:
|
||||
case BTREE_ID_need_discard:
|
||||
case BTREE_ID_freespace:
|
||||
case BTREE_ID_bucket_gens:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
|
@ -804,6 +804,11 @@ int bch2_write_super(struct bch_fs *c)
|
||||
closure_init_stack(cl);
|
||||
memset(&sb_written, 0, sizeof(sb_written));
|
||||
|
||||
if (c->opts.version_upgrade) {
|
||||
c->disk_sb.sb->magic = BCHFS_MAGIC;
|
||||
c->disk_sb.sb->layout.magic = BCHFS_MAGIC;
|
||||
}
|
||||
|
||||
le64_add_cpu(&c->disk_sb.sb->seq, 1);
|
||||
|
||||
if (test_bit(BCH_FS_ERROR, &c->flags))
|
||||
|
Loading…
Reference in New Issue
Block a user