mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
Update bcachefs sources to 99175e5712 bcachefs: Fix bch2_check_discard_freespace_key()
This commit is contained in:
parent
0708303f49
commit
4d185cfa51
@ -1 +1 @@
|
||||
01d7ad6d95c85cf5434a891e6dd7971797e0e1fa
|
||||
99175e5712ecc323930db29565b43d5d0e55a276
|
||||
|
@ -788,10 +788,12 @@ static int bch2_bucket_do_index(struct btree_trans *trans,
|
||||
if (ca->mi.freespace_initialized &&
|
||||
test_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags) &&
|
||||
bch2_trans_inconsistent_on(old.k->type != old_type, trans,
|
||||
"incorrect key when %s %s btree (got %s should be %s)\n"
|
||||
"incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
|
||||
" for %s",
|
||||
set ? "setting" : "clearing",
|
||||
bch2_btree_ids[btree],
|
||||
iter.pos.inode,
|
||||
iter.pos.offset,
|
||||
bch2_bkey_types[old.k->type],
|
||||
bch2_bkey_types[old_type],
|
||||
(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
|
||||
@ -1278,8 +1280,8 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_check_discard_freespace_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter)
|
||||
static int __bch2_check_discard_freespace_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter alloc_iter;
|
||||
@ -1321,6 +1323,7 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans,
|
||||
goto delete;
|
||||
out:
|
||||
fsck_err:
|
||||
set_btree_iter_dontneed(&alloc_iter);
|
||||
bch2_trans_iter_exit(trans, &alloc_iter);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
@ -1330,6 +1333,24 @@ delete:
|
||||
goto out;
|
||||
}
|
||||
|
||||
static int bch2_check_discard_freespace_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bpos end)
|
||||
{
|
||||
if (!btree_node_type_is_extents(iter->btree_id)) {
|
||||
return __bch2_check_discard_freespace_key(trans, iter);
|
||||
} else {
|
||||
int ret;
|
||||
|
||||
while (!bkey_eq(iter->pos, end) &&
|
||||
!(ret = btree_trans_too_many_iters(trans) ?:
|
||||
__bch2_check_discard_freespace_key(trans, iter)))
|
||||
bch2_btree_iter_set_pos(iter, bpos_nosnap_successor(iter->pos));
|
||||
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We've already checked that generation numbers in the bucket_gens btree are
|
||||
* valid for buckets that exist; this just checks for keys for nonexistent
|
||||
@ -1485,12 +1506,12 @@ bkey_err:
|
||||
BTREE_ID_need_discard, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
bch2_check_discard_freespace_key(&trans, &iter)) ?:
|
||||
bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?:
|
||||
for_each_btree_key_commit(&trans, iter,
|
||||
BTREE_ID_freespace, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
bch2_check_discard_freespace_key(&trans, &iter)) ?:
|
||||
bch2_check_discard_freespace_key(&trans, &iter, k.k->p)) ?:
|
||||
for_each_btree_key_commit(&trans, iter,
|
||||
BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_PREFETCH, k,
|
||||
|
@ -793,7 +793,7 @@ static noinline struct btree *bch2_btree_node_fill(struct btree_trans *trans,
|
||||
|
||||
/* Unlock before doing IO: */
|
||||
if (trans && sync)
|
||||
bch2_trans_unlock(trans);
|
||||
bch2_trans_unlock_noassert(trans);
|
||||
|
||||
bch2_btree_node_read(c, b, sync);
|
||||
|
||||
|
@ -2859,6 +2859,7 @@ static noinline void bch2_trans_reset_srcu_lock(struct btree_trans *trans)
|
||||
u32 bch2_trans_begin(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
u64 now;
|
||||
|
||||
bch2_trans_reset_updates(trans);
|
||||
|
||||
@ -2887,10 +2888,14 @@ u32 bch2_trans_begin(struct btree_trans *trans)
|
||||
path->preserve = false;
|
||||
}
|
||||
|
||||
now = local_clock();
|
||||
if (!trans->restarted &&
|
||||
(need_resched() ||
|
||||
local_clock() - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS))
|
||||
now - trans->last_begin_time > BTREE_TRANS_MAX_LOCK_HOLD_TIME_NS)) {
|
||||
drop_locks_do(trans, (cond_resched(), 0));
|
||||
now = local_clock();
|
||||
}
|
||||
trans->last_begin_time = now;
|
||||
|
||||
if (unlikely(time_after(jiffies, trans->srcu_lock_time + msecs_to_jiffies(10))))
|
||||
bch2_trans_reset_srcu_lock(trans);
|
||||
@ -2901,7 +2906,6 @@ u32 bch2_trans_begin(struct btree_trans *trans)
|
||||
trans->notrace_relock_fail = false;
|
||||
}
|
||||
|
||||
trans->last_begin_time = local_clock();
|
||||
return trans->restart_count;
|
||||
}
|
||||
|
||||
|
@ -714,6 +714,14 @@ int bch2_trans_relock_notrace(struct btree_trans *trans)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bch2_trans_unlock_noassert(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
|
||||
trans_for_each_path(trans, path)
|
||||
__bch2_btree_path_unlock(trans, path);
|
||||
}
|
||||
|
||||
void bch2_trans_unlock(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_path *path;
|
||||
|
@ -22,6 +22,8 @@ void bch2_assert_btree_nodes_not_locked(void);
|
||||
static inline void bch2_assert_btree_nodes_not_locked(void) {}
|
||||
#endif
|
||||
|
||||
void bch2_trans_unlock_noassert(struct btree_trans *);
|
||||
|
||||
static inline bool is_btree_node(struct btree_path *path, unsigned l)
|
||||
{
|
||||
return l < BTREE_MAX_DEPTH && !IS_ERR_OR_NULL(path->l[l].b);
|
||||
|
@ -1077,7 +1077,7 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
|
||||
BKEY_BTREE_PTR_U64s_MAX * (1 + split)))
|
||||
break;
|
||||
|
||||
split = true;
|
||||
split = path->l[update_level].b->nr.live_u64s > BTREE_SPLIT_THRESHOLD(c);
|
||||
}
|
||||
|
||||
if (flags & BTREE_INSERT_GC_LOCK_HELD)
|
||||
|
@ -474,7 +474,7 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||
if (crc_is_compressed(p.crc))
|
||||
reserve_sectors += k.k->size;
|
||||
|
||||
m->op.nr_replicas += bch2_extent_ptr_durability(c, &p);
|
||||
m->op.nr_replicas += bch2_extent_ptr_desired_durability(c, &p);
|
||||
} else if (!p.ptr.cached) {
|
||||
bch2_dev_list_add_dev(&m->op.devs_have, p.ptr.dev);
|
||||
}
|
||||
|
@ -641,9 +641,8 @@ unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
|
||||
return replicas;
|
||||
}
|
||||
|
||||
unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
|
||||
unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
|
||||
{
|
||||
unsigned durability = 0;
|
||||
struct bch_dev *ca;
|
||||
|
||||
if (p->ptr.cached)
|
||||
@ -651,13 +650,28 @@ unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded
|
||||
|
||||
ca = bch_dev_bkey_exists(c, p->ptr.dev);
|
||||
|
||||
if (ca->mi.state != BCH_MEMBER_STATE_failed)
|
||||
durability = max_t(unsigned, durability, ca->mi.durability);
|
||||
return ca->mi.durability +
|
||||
(p->has_ec
|
||||
? p->ec.redundancy
|
||||
: 0);
|
||||
}
|
||||
|
||||
if (p->has_ec)
|
||||
durability += p->ec.redundancy;
|
||||
unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
|
||||
{
|
||||
struct bch_dev *ca;
|
||||
|
||||
return durability;
|
||||
if (p->ptr.cached)
|
||||
return 0;
|
||||
|
||||
ca = bch_dev_bkey_exists(c, p->ptr.dev);
|
||||
|
||||
if (ca->mi.state == BCH_MEMBER_STATE_failed)
|
||||
return 0;
|
||||
|
||||
return ca->mi.durability +
|
||||
(p->has_ec
|
||||
? p->ec.redundancy
|
||||
: 0);
|
||||
}
|
||||
|
||||
unsigned bch2_bkey_durability(struct bch_fs *c, struct bkey_s_c k)
|
||||
|
@ -610,6 +610,7 @@ bool bch2_bkey_is_incompressible(struct bkey_s_c);
|
||||
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
|
||||
|
||||
unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
|
||||
unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
|
||||
unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
|
||||
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
|
||||
|
||||
|
53
linux/six.c
53
linux/six.c
@ -47,26 +47,26 @@ struct six_lock_vals {
|
||||
enum six_lock_type unlock_wakeup;
|
||||
};
|
||||
|
||||
#define LOCK_VALS { \
|
||||
[SIX_LOCK_read] = { \
|
||||
.lock_val = 1U << SIX_LOCK_HELD_read_OFFSET, \
|
||||
.lock_fail = SIX_LOCK_HELD_write, \
|
||||
.held_mask = SIX_LOCK_HELD_read, \
|
||||
.unlock_wakeup = SIX_LOCK_write, \
|
||||
}, \
|
||||
[SIX_LOCK_intent] = { \
|
||||
.lock_val = SIX_LOCK_HELD_intent, \
|
||||
.lock_fail = SIX_LOCK_HELD_intent, \
|
||||
.held_mask = SIX_LOCK_HELD_intent, \
|
||||
.unlock_wakeup = SIX_LOCK_intent, \
|
||||
}, \
|
||||
[SIX_LOCK_write] = { \
|
||||
.lock_val = SIX_LOCK_HELD_write, \
|
||||
.lock_fail = SIX_LOCK_HELD_read, \
|
||||
.held_mask = SIX_LOCK_HELD_write, \
|
||||
.unlock_wakeup = SIX_LOCK_read, \
|
||||
}, \
|
||||
}
|
||||
static const struct six_lock_vals l[] = {
|
||||
[SIX_LOCK_read] = {
|
||||
.lock_val = 1U << SIX_LOCK_HELD_read_OFFSET,
|
||||
.lock_fail = SIX_LOCK_HELD_write,
|
||||
.held_mask = SIX_LOCK_HELD_read,
|
||||
.unlock_wakeup = SIX_LOCK_write,
|
||||
},
|
||||
[SIX_LOCK_intent] = {
|
||||
.lock_val = SIX_LOCK_HELD_intent,
|
||||
.lock_fail = SIX_LOCK_HELD_intent,
|
||||
.held_mask = SIX_LOCK_HELD_intent,
|
||||
.unlock_wakeup = SIX_LOCK_intent,
|
||||
},
|
||||
[SIX_LOCK_write] = {
|
||||
.lock_val = SIX_LOCK_HELD_write,
|
||||
.lock_fail = SIX_LOCK_HELD_read,
|
||||
.held_mask = SIX_LOCK_HELD_write,
|
||||
.unlock_wakeup = SIX_LOCK_read,
|
||||
},
|
||||
};
|
||||
|
||||
static inline void six_set_bitmask(struct six_lock *lock, u32 mask)
|
||||
{
|
||||
@ -116,7 +116,6 @@ static inline unsigned pcpu_read_count(struct six_lock *lock)
|
||||
static int __do_six_trylock(struct six_lock *lock, enum six_lock_type type,
|
||||
struct task_struct *task, bool try)
|
||||
{
|
||||
const struct six_lock_vals l[] = LOCK_VALS;
|
||||
int ret;
|
||||
u32 old;
|
||||
|
||||
@ -301,10 +300,10 @@ EXPORT_SYMBOL_GPL(six_trylock_ip);
|
||||
bool six_relock_ip(struct six_lock *lock, enum six_lock_type type,
|
||||
unsigned seq, unsigned long ip)
|
||||
{
|
||||
if (lock->seq != seq || !six_trylock_ip(lock, type, ip))
|
||||
if (six_lock_seq(lock) != seq || !six_trylock_ip(lock, type, ip))
|
||||
return false;
|
||||
|
||||
if (lock->seq != seq) {
|
||||
if (six_lock_seq(lock) != seq) {
|
||||
six_unlock_ip(lock, type, ip);
|
||||
return false;
|
||||
}
|
||||
@ -588,7 +587,6 @@ EXPORT_SYMBOL_GPL(six_lock_ip_waiter);
|
||||
__always_inline
|
||||
static void do_six_unlock_type(struct six_lock *lock, enum six_lock_type type)
|
||||
{
|
||||
const struct six_lock_vals l[] = LOCK_VALS;
|
||||
u32 state;
|
||||
|
||||
if (type == SIX_LOCK_intent)
|
||||
@ -638,6 +636,8 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long
|
||||
|
||||
if (type != SIX_LOCK_write)
|
||||
six_release(&lock->dep_map, ip);
|
||||
else
|
||||
lock->seq++;
|
||||
|
||||
if (type == SIX_LOCK_intent &&
|
||||
lock->intent_lock_recurse) {
|
||||
@ -645,8 +645,6 @@ void six_unlock_ip(struct six_lock *lock, enum six_lock_type type, unsigned long
|
||||
return;
|
||||
}
|
||||
|
||||
lock->seq += type == SIX_LOCK_write;
|
||||
|
||||
do_six_unlock_type(lock, type);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(six_unlock_ip);
|
||||
@ -675,7 +673,6 @@ EXPORT_SYMBOL_GPL(six_lock_downgrade);
|
||||
*/
|
||||
bool six_lock_tryupgrade(struct six_lock *lock)
|
||||
{
|
||||
const struct six_lock_vals l[] = LOCK_VALS;
|
||||
u32 old = atomic_read(&lock->state), new;
|
||||
|
||||
do {
|
||||
@ -743,8 +740,6 @@ EXPORT_SYMBOL_GPL(six_trylock_convert);
|
||||
*/
|
||||
void six_lock_increment(struct six_lock *lock, enum six_lock_type type)
|
||||
{
|
||||
const struct six_lock_vals l[] = LOCK_VALS;
|
||||
|
||||
six_acquire(&lock->dep_map, 0, type == SIX_LOCK_read, _RET_IP_);
|
||||
|
||||
/* XXX: assert already locked, and that we don't overflow: */
|
||||
|
Loading…
Reference in New Issue
Block a user