mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-01-22 00:04:31 +03:00
Update bcachefs sources to 71a5b27e017d bcachefs: Make backpointer fsck wb flush check more rigorous
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
3bd4653767
commit
f27b135285
@ -1 +1 @@
|
||||
676dd269f0f8d59f730ba8e96474448576e4f677
|
||||
71a5b27e017df6ebae391da58857b22fdc406276
|
||||
|
9
Makefile
9
Makefile
@ -218,10 +218,11 @@ update-bcachefs-sources:
|
||||
git add include/linux/kmemleak.h
|
||||
cp $(LINUX_DIR)/lib/math/int_sqrt.c linux/
|
||||
git add linux/int_sqrt.c
|
||||
cp $(LINUX_DIR)/lib/math/mean_and_variance.c linux/
|
||||
git add linux/mean_and_variance.c
|
||||
cp $(LINUX_DIR)/include/linux/mean_and_variance.h include/linux/
|
||||
git add include/linux/mean_and_variance.h
|
||||
rm libbcachefs/mean_and_variance_test.c
|
||||
# cp $(LINUX_DIR)/lib/math/mean_and_variance.c linux/
|
||||
# git add linux/mean_and_variance.c
|
||||
# cp $(LINUX_DIR)/include/linux/mean_and_variance.h include/linux/
|
||||
# git add include/linux/mean_and_variance.h
|
||||
cp $(LINUX_DIR)/scripts/Makefile.compiler ./
|
||||
git add Makefile.compiler
|
||||
$(RM) libbcachefs/*.mod.c
|
||||
|
@ -3,6 +3,7 @@
|
||||
#include "bbpos.h"
|
||||
#include "alloc_background.h"
|
||||
#include "backpointers.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_cache.h"
|
||||
#include "btree_update.h"
|
||||
#include "btree_update_interior.h"
|
||||
@ -404,18 +405,13 @@ int bch2_check_btree_backpointers(struct bch_fs *c)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct bpos_level {
|
||||
unsigned level;
|
||||
struct bpos pos;
|
||||
};
|
||||
|
||||
static int check_bp_exists(struct btree_trans *trans,
|
||||
struct bpos bucket,
|
||||
struct bch_backpointer bp,
|
||||
struct bkey_s_c orig_k,
|
||||
struct bpos bucket_start,
|
||||
struct bpos bucket_end,
|
||||
struct bpos_level *last_flushed)
|
||||
struct bkey_buf *last_flushed)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter bp_iter = { NULL };
|
||||
@ -439,14 +435,19 @@ static int check_bp_exists(struct btree_trans *trans,
|
||||
|
||||
if (bp_k.k->type != KEY_TYPE_backpointer ||
|
||||
memcmp(bkey_s_c_to_backpointer(bp_k).v, &bp, sizeof(bp))) {
|
||||
if (last_flushed->level != bp.level ||
|
||||
!bpos_eq(last_flushed->pos, orig_k.k->p)) {
|
||||
if (!bpos_eq(orig_k.k->p, last_flushed->k->k.p) ||
|
||||
bkey_bytes(orig_k.k) != bkey_bytes(&last_flushed->k->k) ||
|
||||
memcmp(orig_k.v, &last_flushed->k->v, bkey_val_bytes(orig_k.k))) {
|
||||
if (bp.level) {
|
||||
bch2_trans_unlock(trans);
|
||||
bch2_btree_interior_updates_flush(c);
|
||||
}
|
||||
|
||||
ret = bch2_btree_write_buffer_flush_sync(trans);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
last_flushed->level = bp.level;
|
||||
last_flushed->pos = orig_k.k->p;
|
||||
bch2_bkey_buf_reassemble(last_flushed, c, orig_k);
|
||||
ret = -BCH_ERR_transaction_restart_write_buffer_flush;
|
||||
goto out;
|
||||
}
|
||||
@ -477,7 +478,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
|
||||
enum btree_id btree, unsigned level,
|
||||
struct bpos bucket_start,
|
||||
struct bpos bucket_end,
|
||||
struct bpos_level *last_flushed,
|
||||
struct bkey_buf *last_flushed,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -511,7 +512,7 @@ static int check_btree_root_to_backpointers(struct btree_trans *trans,
|
||||
enum btree_id btree_id,
|
||||
struct bpos bucket_start,
|
||||
struct bpos bucket_end,
|
||||
struct bpos_level *last_flushed,
|
||||
struct bkey_buf *last_flushed,
|
||||
int *level)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -616,10 +617,13 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
|
||||
struct btree_iter iter;
|
||||
enum btree_id btree_id;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_buf last_flushed;
|
||||
int ret = 0;
|
||||
|
||||
bch2_bkey_buf_init(&last_flushed);
|
||||
bkey_init(&last_flushed.k->k);
|
||||
|
||||
for (btree_id = 0; btree_id < btree_id_nr_alive(c); btree_id++) {
|
||||
struct bpos_level last_flushed = { UINT_MAX, POS_MIN };
|
||||
int level, depth = btree_type_has_ptrs(btree_id) ? 0 : 1;
|
||||
|
||||
ret = commit_do(trans, NULL, NULL,
|
||||
@ -664,6 +668,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
bch2_bkey_buf_exit(&last_flushed, c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1087,9 +1087,6 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
if (initial)
|
||||
trans->is_initial_gc = true;
|
||||
|
||||
for (i = 0; i < BTREE_ID_NR; i++)
|
||||
ids[i] = i;
|
||||
bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
|
||||
|
@ -2870,8 +2870,6 @@ struct btree_trans *__bch2_trans_get(struct bch_fs *c, unsigned fn_idx)
|
||||
struct btree_trans *trans;
|
||||
struct btree_transaction_stats *s;
|
||||
|
||||
bch2_assert_btree_nodes_not_locked();
|
||||
|
||||
trans = bch2_trans_alloc(c);
|
||||
|
||||
memset(trans, 0, sizeof(*trans));
|
||||
|
@ -10,15 +10,16 @@ void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
|
||||
enum six_lock_init_flags flags)
|
||||
{
|
||||
__six_lock_init(&b->lock, "b->c.lock", &bch2_btree_node_lock_key, flags);
|
||||
#ifdef CONFIG_DEBUG_LOCK_ALLOC
|
||||
lockdep_set_no_check_recursion(&b->lock.dep_map);
|
||||
#endif
|
||||
lockdep_set_novalidate_class(&b->lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
void bch2_assert_btree_nodes_not_locked(void)
|
||||
{
|
||||
#if 0
|
||||
//Re-enable when lock_class_is_held() is merged:
|
||||
BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -769,13 +770,6 @@ void bch2_trans_unlock(struct btree_trans *trans)
|
||||
|
||||
trans_for_each_path(trans, path)
|
||||
__bch2_btree_path_unlock(trans, path);
|
||||
|
||||
/*
|
||||
* bch2_gc_btree_init_recurse() doesn't use btree iterators for walking
|
||||
* btree nodes, it implements its own walking:
|
||||
*/
|
||||
if (!trans->is_initial_gc)
|
||||
bch2_assert_btree_nodes_not_locked();
|
||||
}
|
||||
|
||||
void bch2_trans_unlock_long(struct btree_trans *trans)
|
||||
|
@ -399,7 +399,6 @@ struct btree_trans {
|
||||
bool memory_allocation_failure:1;
|
||||
bool journal_transaction_names:1;
|
||||
bool journal_replay_not_finished:1;
|
||||
bool is_initial_gc:1;
|
||||
bool notrace_relock_fail:1;
|
||||
bool write_locked:1;
|
||||
enum bch_errcode restarted:16;
|
||||
|
@ -314,7 +314,7 @@ next:
|
||||
}
|
||||
continue;
|
||||
nowork:
|
||||
if (m->stats && m->stats) {
|
||||
if (m->stats) {
|
||||
BUG_ON(k.k->p.offset <= iter.pos.offset);
|
||||
atomic64_inc(&m->stats->keys_raced);
|
||||
atomic64_add(k.k->p.offset - iter.pos.offset,
|
||||
|
@ -40,9 +40,10 @@
|
||||
#include <linux/limits.h>
|
||||
#include <linux/math.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/mean_and_variance.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include "mean_and_variance.h"
|
||||
|
||||
u128_u u128_div(u128_u n, u64 d)
|
||||
{
|
||||
u128_u r;
|
@ -22,9 +22,9 @@
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/mean_and_variance.h>
|
||||
|
||||
#include "eytzinger.h"
|
||||
#include "mean_and_variance.h"
|
||||
#include "util.h"
|
||||
|
||||
static const char si_units[] = "?kMGTPEZY";
|
||||
|
@ -17,7 +17,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mean_and_variance.h>
|
||||
|
||||
#include "mean_and_variance.h"
|
||||
|
||||
#include "darray.h"
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user