Update bcachefs sources to 44be8c1da2 fixup! bcachefs: Btree key cache improvements

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-10-15 05:29:10 -04:00
parent e0a51ccce8
commit 494421ee6e
6 changed files with 50 additions and 53 deletions

View File

@ -1 +1 @@
3e93567c5196ef0c80e2ac3c08295130d858dfd6 44be8c1da2e1d4edb23d5dcf3b522971c245c3f6

View File

@ -3041,6 +3041,13 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
void bch2_fs_btree_iter_exit(struct bch_fs *c) void bch2_fs_btree_iter_exit(struct bch_fs *c)
{ {
struct btree_transaction_stats *s;
for (s = c->btree_transaction_stats;
s < c->btree_transaction_stats + ARRAY_SIZE(c->btree_transaction_stats);
s++)
kfree(s->max_paths_text);
if (c->btree_trans_barrier_initialized) if (c->btree_trans_barrier_initialized)
cleanup_srcu_struct(&c->btree_trans_barrier); cleanup_srcu_struct(&c->btree_trans_barrier);
mempool_exit(&c->btree_trans_mem_pool); mempool_exit(&c->btree_trans_mem_pool);

View File

@ -891,15 +891,20 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
mutex_lock(&bc->lock); mutex_lock(&bc->lock);
rcu_read_lock(); /*
tbl = rht_dereference_rcu(bc->table.tbl, &bc->table); * The loop is needed to guard against racing with rehash:
if (tbl) */
for (i = 0; i < tbl->size; i++) while (atomic_long_read(&bc->nr_keys)) {
rht_for_each_entry_rcu(ck, pos, tbl, i, hash) { rcu_read_lock();
bkey_cached_evict(bc, ck); tbl = rht_dereference_rcu(bc->table.tbl, &bc->table);
list_add(&ck->list, &bc->freed_nonpcpu); if (tbl)
} for (i = 0; i < tbl->size; i++)
rcu_read_unlock(); rht_for_each_entry_rcu(ck, pos, tbl, i, hash) {
bkey_cached_evict(bc, ck);
list_add(&ck->list, &bc->freed_nonpcpu);
}
rcu_read_unlock();
}
#ifdef __KERNEL__ #ifdef __KERNEL__
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {

View File

@ -606,7 +606,7 @@ static void bch2_page_reservation_put(struct bch_fs *c,
static int bch2_page_reservation_get(struct bch_fs *c, static int bch2_page_reservation_get(struct bch_fs *c,
struct bch_inode_info *inode, struct page *page, struct bch_inode_info *inode, struct page *page,
struct bch2_page_reservation *res, struct bch2_page_reservation *res,
unsigned offset, unsigned len, bool check_enospc) unsigned offset, unsigned len)
{ {
struct bch_page_state *s = bch2_page_state_create(page, 0); struct bch_page_state *s = bch2_page_state_create(page, 0);
unsigned i, disk_sectors = 0, quota_sectors = 0; unsigned i, disk_sectors = 0, quota_sectors = 0;
@ -626,19 +626,14 @@ static int bch2_page_reservation_get(struct bch_fs *c,
} }
if (disk_sectors) { if (disk_sectors) {
ret = bch2_disk_reservation_add(c, &res->disk, ret = bch2_disk_reservation_add(c, &res->disk, disk_sectors, 0);
disk_sectors,
!check_enospc
? BCH_DISK_RESERVATION_NOFAIL
: 0);
if (unlikely(ret)) if (unlikely(ret))
return ret; return ret;
} }
if (quota_sectors) { if (quota_sectors) {
ret = bch2_quota_reservation_add(c, inode, &res->quota, ret = bch2_quota_reservation_add(c, inode, &res->quota,
quota_sectors, quota_sectors, true);
check_enospc);
if (unlikely(ret)) { if (unlikely(ret)) {
struct disk_reservation tmp = { struct disk_reservation tmp = {
.sectors = disk_sectors .sectors = disk_sectors
@ -822,7 +817,7 @@ vm_fault_t bch2_page_mkwrite(struct vm_fault *vmf)
} }
} }
if (bch2_page_reservation_get(c, inode, page, &res, 0, len, true)) { if (bch2_page_reservation_get(c, inode, page, &res, 0, len)) {
unlock_page(page); unlock_page(page);
ret = VM_FAULT_SIGBUS; ret = VM_FAULT_SIGBUS;
goto out; goto out;
@ -1530,8 +1525,7 @@ out:
goto err; goto err;
} }
ret = bch2_page_reservation_get(c, inode, page, res, ret = bch2_page_reservation_get(c, inode, page, res, offset, len);
offset, len, true);
if (ret) { if (ret) {
if (!PageUptodate(page)) { if (!PageUptodate(page)) {
/* /*
@ -1673,7 +1667,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
} }
ret = bch2_page_reservation_get(c, inode, page, &res, ret = bch2_page_reservation_get(c, inode, page, &res,
pg_offset, pg_len, true); pg_offset, pg_len);
if (ret) if (ret)
goto out; goto out;

View File

@ -1162,11 +1162,6 @@ int bch2_journal_read(struct bch_fs *c, u64 *blacklist_seq, u64 *start_seq)
le64_to_cpu(i->j.seq))) le64_to_cpu(i->j.seq)))
i->j.last_seq = i->j.seq; i->j.last_seq = i->j.seq;
pr_info("last flush %llu-%llu csum good %u",
le64_to_cpu(i->j.last_seq),
le64_to_cpu(i->j.seq),
i->csum_good);
last_seq = le64_to_cpu(i->j.last_seq); last_seq = le64_to_cpu(i->j.last_seq);
*blacklist_seq = le64_to_cpu(i->j.seq) + 1; *blacklist_seq = le64_to_cpu(i->j.seq) + 1;
break; break;

View File

@ -332,34 +332,20 @@ static int bch2_quota_check_limit(struct bch_fs *c,
if (qc->hardlimit && if (qc->hardlimit &&
qc->hardlimit < n && qc->hardlimit < n &&
!ignore_hardlimit(q)) { !ignore_hardlimit(q)) {
if (mode == KEY_TYPE_QUOTA_PREALLOC)
return -EDQUOT;
prepare_warning(qc, qtype, counter, msgs, HARDWARN); prepare_warning(qc, qtype, counter, msgs, HARDWARN);
return -EDQUOT;
} }
if (qc->softlimit && if (qc->softlimit &&
qc->softlimit < n && qc->softlimit < n) {
qc->timer && if (qc->timer == 0) {
ktime_get_real_seconds() >= qc->timer && qc->timer = ktime_get_real_seconds() + q->limits[counter].timelimit;
!ignore_hardlimit(q)) { prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
if (mode == KEY_TYPE_QUOTA_PREALLOC) } else if (ktime_get_real_seconds() >= qc->timer &&
!ignore_hardlimit(q)) {
prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
return -EDQUOT; return -EDQUOT;
}
prepare_warning(qc, qtype, counter, msgs, SOFTLONGWARN);
}
if (qc->softlimit &&
qc->softlimit < n &&
qc->timer == 0) {
if (mode == KEY_TYPE_QUOTA_PREALLOC)
return -EDQUOT;
prepare_warning(qc, qtype, counter, msgs, SOFTWARN);
/* XXX is this the right one? */
qc->timer = ktime_get_real_seconds() +
q->limits[counter].warnlimit;
} }
return 0; return 0;
@ -469,7 +455,8 @@ err:
return ret; return ret;
} }
static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k) static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k,
struct qc_dqblk *qdq)
{ {
struct bkey_s_c_quota dq; struct bkey_s_c_quota dq;
struct bch_memquota_type *q; struct bch_memquota_type *q;
@ -498,6 +485,15 @@ static int __bch2_quota_set(struct bch_fs *c, struct bkey_s_c k)
mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit); mq->c[i].softlimit = le64_to_cpu(dq.v->c[i].softlimit);
} }
if (qdq && qdq->d_fieldmask & QC_SPC_TIMER)
mq->c[Q_SPC].timer = cpu_to_le64(qdq->d_spc_timer);
if (qdq && qdq->d_fieldmask & QC_SPC_WARNS)
mq->c[Q_SPC].warns = cpu_to_le64(qdq->d_spc_warns);
if (qdq && qdq->d_fieldmask & QC_INO_TIMER)
mq->c[Q_INO].timer = cpu_to_le64(qdq->d_ino_timer);
if (qdq && qdq->d_fieldmask & QC_INO_WARNS)
mq->c[Q_INO].warns = cpu_to_le64(qdq->d_ino_warns);
mutex_unlock(&q->lock); mutex_unlock(&q->lock);
} }
@ -618,7 +614,7 @@ int bch2_fs_quota_read(struct bch_fs *c)
ret = for_each_btree_key2(&trans, iter, BTREE_ID_quotas, ret = for_each_btree_key2(&trans, iter, BTREE_ID_quotas,
POS_MIN, BTREE_ITER_PREFETCH, k, POS_MIN, BTREE_ITER_PREFETCH, k,
__bch2_quota_set(c, k)) ?: __bch2_quota_set(c, k, NULL)) ?:
for_each_btree_key2(&trans, iter, BTREE_ID_inodes, for_each_btree_key2(&trans, iter, BTREE_ID_inodes,
POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k, POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
bch2_fs_quota_read_inode(&trans, &iter, k)); bch2_fs_quota_read_inode(&trans, &iter, k));
@ -961,7 +957,7 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
ret = bch2_trans_do(c, NULL, NULL, 0, ret = bch2_trans_do(c, NULL, NULL, 0,
bch2_set_quota_trans(&trans, &new_quota, qdq)) ?: bch2_set_quota_trans(&trans, &new_quota, qdq)) ?:
__bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i)); __bch2_quota_set(c, bkey_i_to_s_c(&new_quota.k_i), qdq);
return ret; return ret;
} }