Update bcachefs sources to 792ca5ba3c9a bcachefs: kill key cache arg to bch2_assert_pos_locked()

This commit is contained in:
Kent Overstreet 2024-06-17 13:39:02 -04:00
parent fc06a0ea5e
commit d915c62f17
12 changed files with 82 additions and 40 deletions

View File

@ -1 +1 @@
c56e1ec97dfdbb888691d2fc6ebb06d7df25e8dc 792ca5ba3c9a07d762d9c1a440e31c0520f37de0

View File

@ -595,6 +595,8 @@ int bch2_alloc_read(struct bch_fs *c)
struct bch_dev *ca = NULL; struct bch_dev *ca = NULL;
int ret; int ret;
down_read(&c->gc_lock);
if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) { if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN, ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
BTREE_ITER_prefetch, k, ({ BTREE_ITER_prefetch, k, ({
@ -643,6 +645,7 @@ int bch2_alloc_read(struct bch_fs *c)
bch2_dev_put(ca); bch2_dev_put(ca);
bch2_trans_put(trans); bch2_trans_put(trans);
up_read(&c->gc_lock);
bch_err_fn(c, ret); bch_err_fn(c, ret);
return ret; return ret;
@ -2066,6 +2069,21 @@ err:
goto out; goto out;
} }
static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter,
struct bch_dev *ca, bool *wrapped)
{
struct bkey_s_c k;
again:
k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
if (!k.k && !*wrapped) {
bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
*wrapped = true;
goto again;
}
return k;
}
static void bch2_do_invalidates_work(struct work_struct *work) static void bch2_do_invalidates_work(struct work_struct *work)
{ {
struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work); struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
@ -2079,12 +2097,31 @@ static void bch2_do_invalidates_work(struct work_struct *work)
for_each_member_device(c, ca) { for_each_member_device(c, ca) {
s64 nr_to_invalidate = s64 nr_to_invalidate =
should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
struct btree_iter iter;
bool wrapped = false;
ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru, bch2_trans_iter_init(trans, &iter, BTREE_ID_lru,
lru_pos(ca->dev_idx, 0, 0), lru_pos(ca->dev_idx, 0,
lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX), ((bch2_current_io_time(c, READ) + U32_MAX) &
BTREE_ITER_intent, k, LRU_TIME_MAX)), 0);
invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
while (true) {
bch2_trans_begin(trans);
struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
if (!k.k)
break;
ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate);
if (ret)
break;
}
bch2_trans_iter_exit(trans, &iter);
if (ret < 0) { if (ret < 0) {
bch2_dev_put(ca); bch2_dev_put(ca);

View File

@ -533,8 +533,8 @@ struct bch_dev {
/* /*
* Buckets: * Buckets:
* Per-bucket arrays are protected by c->mark_lock, bucket_lock and * Per-bucket arrays are protected by c->mark_lock, bucket_lock and
* gc_gens_lock, for device resize - holding any is sufficient for * gc_lock, for device resize - holding any is sufficient for access:
* access: Or rcu_read_lock(), but only for dev_ptr_stale(): * Or rcu_read_lock(), but only for dev_ptr_stale():
*/ */
struct bucket_array __rcu *buckets_gc; struct bucket_array __rcu *buckets_gc;
struct bucket_gens __rcu *bucket_gens; struct bucket_gens __rcu *bucket_gens;

View File

@ -1229,7 +1229,7 @@ int bch2_gc_gens(struct bch_fs *c)
int ret; int ret;
/* /*
* Ideally we would be using state_lock and not gc_gens_lock here, but that * Ideally we would be using state_lock and not gc_lock here, but that
* introduces a deadlock in the RO path - we currently take the state * introduces a deadlock in the RO path - we currently take the state
* lock at the start of going RO, thus the gc thread may get stuck: * lock at the start of going RO, thus the gc thread may get stuck:
*/ */
@ -1237,8 +1237,7 @@ int bch2_gc_gens(struct bch_fs *c)
return 0; return 0;
trace_and_count(c, gc_gens_start, c); trace_and_count(c, gc_gens_start, c);
down_read(&c->gc_lock);
down_read(&c->state_lock);
for_each_member_device(c, ca) { for_each_member_device(c, ca) {
struct bucket_gens *gens = bucket_gens(ca); struct bucket_gens *gens = bucket_gens(ca);
@ -1307,7 +1306,7 @@ err:
ca->oldest_gen = NULL; ca->oldest_gen = NULL;
} }
up_read(&c->state_lock); up_read(&c->gc_lock);
mutex_unlock(&c->gc_gens_lock); mutex_unlock(&c->gc_gens_lock);
if (!bch2_err_matches(ret, EROFS)) if (!bch2_err_matches(ret, EROFS))
bch_err_fn(c, ret); bch_err_fn(c, ret);

View File

@ -1217,6 +1217,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
bucket_gens->nbuckets - bucket_gens->first_bucket; bucket_gens->nbuckets - bucket_gens->first_bucket;
if (resize) { if (resize) {
down_write(&c->gc_lock);
down_write(&ca->bucket_lock); down_write(&ca->bucket_lock);
percpu_down_write(&c->mark_lock); percpu_down_write(&c->mark_lock);
} }
@ -1239,6 +1240,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
if (resize) { if (resize) {
percpu_up_write(&c->mark_lock); percpu_up_write(&c->mark_lock);
up_write(&ca->bucket_lock); up_write(&ca->bucket_lock);
up_write(&c->gc_lock);
} }
ret = 0; ret = 0;

View File

@ -85,7 +85,7 @@ static inline struct bucket_array *gc_bucket_array(struct bch_dev *ca)
return rcu_dereference_check(ca->buckets_gc, return rcu_dereference_check(ca->buckets_gc,
!ca->fs || !ca->fs ||
percpu_rwsem_is_held(&ca->fs->mark_lock) || percpu_rwsem_is_held(&ca->fs->mark_lock) ||
lockdep_is_held(&ca->fs->state_lock) || lockdep_is_held(&ca->fs->gc_lock) ||
lockdep_is_held(&ca->bucket_lock)); lockdep_is_held(&ca->bucket_lock));
} }
@ -103,7 +103,7 @@ static inline struct bucket_gens *bucket_gens(struct bch_dev *ca)
return rcu_dereference_check(ca->bucket_gens, return rcu_dereference_check(ca->bucket_gens,
!ca->fs || !ca->fs ||
percpu_rwsem_is_held(&ca->fs->mark_lock) || percpu_rwsem_is_held(&ca->fs->mark_lock) ||
lockdep_is_held(&ca->fs->state_lock) || lockdep_is_held(&ca->fs->gc_lock) ||
lockdep_is_held(&ca->bucket_lock)); lockdep_is_held(&ca->bucket_lock));
} }

View File

@ -68,12 +68,12 @@ static const char * const disk_accounting_type_strs[] = {
NULL NULL
}; };
static inline void accounting_key_init(struct bkey_i *k, struct disk_accounting_pos pos, static inline void accounting_key_init(struct bkey_i *k, struct disk_accounting_pos *pos,
s64 *d, unsigned nr) s64 *d, unsigned nr)
{ {
struct bkey_i_accounting *acc = bkey_accounting_init(k); struct bkey_i_accounting *acc = bkey_accounting_init(k);
acc->k.p = disk_accounting_pos_to_bpos(&pos); acc->k.p = disk_accounting_pos_to_bpos(pos);
set_bkey_val_u64s(&acc->k, sizeof(struct bch_accounting) / sizeof(u64) + nr); set_bkey_val_u64s(&acc->k, sizeof(struct bch_accounting) / sizeof(u64) + nr);
memcpy_u64s_small(acc->v.d, d, nr); memcpy_u64s_small(acc->v.d, d, nr);
@ -94,7 +94,7 @@ int bch2_disk_accounting_mod(struct btree_trans *trans,
struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i; struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i;
accounting_key_init(&k_i.k, *k, d, nr); accounting_key_init(&k_i.k, k, d, nr);
return likely(!gc) return likely(!gc)
? bch2_trans_update_buffered(trans, BTREE_ID_accounting, &k_i.k) ? bch2_trans_update_buffered(trans, BTREE_ID_accounting, &k_i.k)
@ -330,7 +330,9 @@ int bch2_fs_replicas_usage_read(struct bch_fs *c, darray_char *usage)
if (!accounting_to_replicas(&u.r.r, i->pos)) if (!accounting_to_replicas(&u.r.r, i->pos))
continue; continue;
bch2_accounting_mem_read_counters(acc, i - acc->k.data, &u.r.sectors, 1, false); u64 sectors;
bch2_accounting_mem_read_counters(acc, i - acc->k.data, &sectors, 1, false);
u.r.sectors = sectors;
ret = darray_make_room(usage, replicas_usage_bytes(&u.r)); ret = darray_make_room(usage, replicas_usage_bytes(&u.r));
if (ret) if (ret)
@ -498,7 +500,7 @@ int bch2_gc_accounting_done(struct bch_fs *c)
memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta)); memset(&trans->fs_usage_delta, 0, sizeof(trans->fs_usage_delta));
struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i; struct { __BKEY_PADDED(k, BCH_ACCOUNTING_MAX_COUNTERS); } k_i;
accounting_key_init(&k_i.k, acc_k, src_v, nr); accounting_key_init(&k_i.k, &acc_k, src_v, nr);
bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), false); bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(&k_i.k), false);
preempt_disable(); preempt_disable();

View File

@ -2,6 +2,8 @@
#ifndef _BCACHEFS_FS_COMMON_H #ifndef _BCACHEFS_FS_COMMON_H
#define _BCACHEFS_FS_COMMON_H #define _BCACHEFS_FS_COMMON_H
#include "dirent.h"
struct posix_acl; struct posix_acl;
#define BCH_CREATE_TMPFILE (1U << 0) #define BCH_CREATE_TMPFILE (1U << 0)

View File

@ -678,7 +678,7 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
bch2_pagecache_add_get(inode); bch2_pagecache_add_get(inode);
folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
FGP_WRITEBEGIN | fgf_set_order(len), FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
mapping_gfp_mask(mapping)); mapping_gfp_mask(mapping));
if (IS_ERR_OR_NULL(folio)) if (IS_ERR_OR_NULL(folio))
goto err_unlock; goto err_unlock;
@ -820,8 +820,9 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
darray_init(&fs); darray_init(&fs);
ret = bch2_filemap_get_contig_folios_d(mapping, pos, end, ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
FGP_WRITEBEGIN | fgf_set_order(len), FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
mapping_gfp_mask(mapping), &fs); mapping_gfp_mask(mapping),
&fs);
if (ret) if (ret)
goto out; goto out;

View File

@ -193,8 +193,6 @@ int bch2_run_online_recovery_passes(struct bch_fs *c)
{ {
int ret = 0; int ret = 0;
down_read(&c->state_lock);
for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) { for (unsigned i = 0; i < ARRAY_SIZE(recovery_pass_fns); i++) {
struct recovery_pass_fn *p = recovery_pass_fns + i; struct recovery_pass_fn *p = recovery_pass_fns + i;
@ -210,8 +208,6 @@ int bch2_run_online_recovery_passes(struct bch_fs *c)
break; break;
} }
up_read(&c->state_lock);
return ret; return ret;
} }

View File

@ -57,13 +57,17 @@
BCH_FSCK_ERR_btree_bitmap_not_marked) \ BCH_FSCK_ERR_btree_bitmap_not_marked) \
x(disk_accounting_v2, \ x(disk_accounting_v2, \
BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
BCH_FSCK_ERR_bkey_version_in_future, \
BCH_FSCK_ERR_dev_usage_buckets_wrong, \
BCH_FSCK_ERR_dev_usage_sectors_wrong, \
BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
BCH_FSCK_ERR_accounting_mismatch) BCH_FSCK_ERR_accounting_mismatch)
#define DOWNGRADE_TABLE() \ #define DOWNGRADE_TABLE() \
x(bucket_stripe_sectors, \ x(bucket_stripe_sectors, \
0) \ 0) \
x(disk_accounting_v2, \ x(disk_accounting_v2, \
BIT_ULL(BCH_RECOVERY_PASS_check_alloc_info), \ BIT_ULL(BCH_RECOVERY_PASS_check_allocations), \
BCH_FSCK_ERR_dev_usage_buckets_wrong, \ BCH_FSCK_ERR_dev_usage_buckets_wrong, \
BCH_FSCK_ERR_dev_usage_sectors_wrong, \ BCH_FSCK_ERR_dev_usage_sectors_wrong, \
BCH_FSCK_ERR_dev_usage_fragmented_wrong, \ BCH_FSCK_ERR_dev_usage_fragmented_wrong, \
@ -187,7 +191,11 @@ static int downgrade_table_extra(struct bch_fs *c, darray_char *table)
if (ret) if (ret)
return ret; return ret;
__set_bit_le64(BCH_RECOVERY_PASS_STABLE_check_allocations, dst->recovery_passes); /* open coded __set_bit_le64, as dst is packed and
* dst->recovery_passes is misaligned */
unsigned b = BCH_RECOVERY_PASS_STABLE_check_allocations;
dst->recovery_passes[b / 64] |= cpu_to_le64(BIT_ULL(b % 64));
dst->errors[nr_errors++] = cpu_to_le16(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong); dst->errors[nr_errors++] = cpu_to_le16(BCH_FSCK_ERR_alloc_key_dirty_sectors_wrong);
} }
break; break;

View File

@ -697,19 +697,14 @@ do { \
} \ } \
} while (0) } while (0)
#define per_cpu_sum(_p) \
({ \
typeof(*_p) _ret = 0; \
\
int cpu; \
for_each_possible_cpu(cpu) \
_ret += *per_cpu_ptr(_p, cpu); \
_ret; \
})
static inline u64 percpu_u64_get(u64 __percpu *src) static inline u64 percpu_u64_get(u64 __percpu *src)
{ {
return per_cpu_sum(src); u64 ret = 0;
int cpu;
for_each_possible_cpu(cpu)
ret += *per_cpu_ptr(src, cpu);
return ret;
} }
static inline void percpu_u64_set(u64 __percpu *dst, u64 src) static inline void percpu_u64_set(u64 __percpu *dst, u64 src)