Update bcachefs sources to 100a4d92ca71 bcachefs: Split out journal flags for low on space

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2025-09-29 14:53:30 -04:00
parent 0a476a0f49
commit 9dec020282
12 changed files with 104 additions and 93 deletions

View File

@ -1 +1 @@
446f76b78b1e368462bf5b4d772777ce444fe0a5
100a4d92ca71a2bce5d68f2c6916b1c23607a429

View File

@ -461,7 +461,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
commit_flags |= BCH_WATERMARK_reclaim;
if (ck->journal.seq != journal_last_seq(j) ||
!test_bit(JOURNAL_space_low, &c->journal.flags))
!journal_low_on_space(&c->journal))
commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
struct bkey_s_c btree_k = bch2_btree_iter_peek_slot(&b_iter);

View File

@ -1191,12 +1191,12 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
flags |= watermark;
if (watermark < BCH_WATERMARK_reclaim &&
test_bit(JOURNAL_space_low, &c->journal.flags)) {
journal_low_on_space(&c->journal)) {
if (flags & BCH_TRANS_COMMIT_journal_reclaim)
return ERR_PTR(-BCH_ERR_journal_reclaim_would_deadlock);
ret = drop_locks_do(trans,
({ wait_event(c->journal.wait, !test_bit(JOURNAL_space_low, &c->journal.flags)); 0; }));
({ wait_event(c->journal.wait, !journal_low_on_space(&c->journal)); 0; }));
if (ret)
return ERR_PTR(ret);
}

View File

@ -837,16 +837,6 @@ unsigned bch2_bkey_replicas(struct bch_fs *c, struct bkey_s_c k)
return replicas;
}
static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
{
if (p->ptr.cached)
return 0;
return p->has_ec
? p->ec.redundancy + 1
: ca->mi.durability;
}
unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
struct bch_dev *ca = bch2_dev_rcu_noerror(c, p->ptr.dev);

View File

@ -603,6 +603,17 @@ void bch2_bkey_propagate_incompressible(struct bkey_i *, struct bkey_s_c);
unsigned bch2_bkey_sectors_compressed(struct bkey_s_c);
unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent_ptr_decoded *p)
{
if (p->ptr.cached)
return 0;
return p->has_ec
? p->ec.redundancy + 1
: ca->mi.durability;
}
unsigned bch2_extent_ptr_desired_durability(struct bch_fs *, struct extent_ptr_decoded *);
unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);

View File

@ -650,82 +650,82 @@ static void bch2_rbio_retry(struct work_struct *work)
.subvol = rbio->subvol,
.inum = rbio->read_pos.inode,
};
u64 read_offset = rbio->read_pos.offset;
struct bch_io_failures failed = { .nr = 0 };
CLASS(btree_trans, trans)(c);
struct bkey_buf sk;
bch2_bkey_buf_init(&sk);
bkey_init(&sk.k->k);
trace_io_read_retry(&rbio->bio);
this_cpu_add(c->counters[BCH_COUNTER_io_read_retry],
bvec_iter_sectors(rbio->bvec_iter));
get_rbio_extent(trans, rbio, &sk);
{
CLASS(btree_trans, trans)(c);
if (!bkey_deleted(&sk.k->k) &&
bch2_err_matches(rbio->ret, BCH_ERR_data_read_retry_avoid))
bch2_mark_io_failure(&failed, &rbio->pick,
rbio->ret == -BCH_ERR_data_read_retry_csum_err);
struct bkey_buf sk;
bch2_bkey_buf_init(&sk);
bkey_init(&sk.k->k);
get_rbio_extent(trans, rbio, &sk);
if (!rbio->split) {
rbio->bio.bi_status = 0;
rbio->ret = 0;
}
if (!bkey_deleted(&sk.k->k) &&
bch2_err_matches(rbio->ret, BCH_ERR_data_read_retry_avoid))
bch2_mark_io_failure(&failed, &rbio->pick,
rbio->ret == -BCH_ERR_data_read_retry_csum_err);
unsigned subvol = rbio->subvol;
struct bpos read_pos = rbio->read_pos;
rbio = bch2_rbio_free(rbio);
flags |= BCH_READ_in_retry;
flags &= ~BCH_READ_may_promote;
flags &= ~BCH_READ_last_fragment;
flags |= BCH_READ_must_clone;
int ret = rbio->data_update
? bch2_read_retry_nodecode(trans, rbio, iter, &failed, flags)
: __bch2_read(trans, rbio, iter, inum, &failed, &sk, flags);
if (ret) {
rbio->ret = ret;
rbio->bio.bi_status = BLK_STS_IOERR;
}
if (failed.nr || ret) {
CLASS(printbuf, buf)();
bch2_log_msg_start(c, &buf);
lockrestart_do(trans,
bch2_inum_offset_err_msg_trans(trans, &buf,
(subvol_inum) { subvol, read_pos.inode },
read_pos.offset << 9));
if (rbio->data_update)
prt_str(&buf, "(internal move) ");
prt_str(&buf, "data read error, ");
if (!ret) {
prt_str(&buf, "successful retry");
if (rbio->self_healing)
prt_str(&buf, ", self healing");
} else
prt_str(&buf, bch2_err_str(ret));
prt_newline(&buf);
if (!bkey_deleted(&sk.k->k)) {
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(sk.k));
prt_newline(&buf);
if (!rbio->split) {
rbio->bio.bi_status = 0;
rbio->ret = 0;
}
bch2_io_failures_to_text(&buf, c, &failed);
rbio = bch2_rbio_free(rbio);
bch2_print_str_ratelimited(c, KERN_ERR, buf.buf);
flags |= BCH_READ_in_retry;
flags &= ~BCH_READ_may_promote;
flags &= ~BCH_READ_last_fragment;
flags |= BCH_READ_must_clone;
int ret = rbio->data_update
? bch2_read_retry_nodecode(trans, rbio, iter, &failed, flags)
: __bch2_read(trans, rbio, iter, inum, &failed, &sk, flags);
if (ret) {
rbio->ret = ret;
rbio->bio.bi_status = BLK_STS_IOERR;
}
if (failed.nr || ret) {
CLASS(printbuf, buf)();
bch2_log_msg_start(c, &buf);
lockrestart_do(trans,
bch2_inum_offset_err_msg_trans(trans, &buf, inum, read_offset << 9));
if (rbio->data_update)
prt_str(&buf, "(internal move) ");
prt_str(&buf, "data read error, ");
if (!ret) {
prt_str(&buf, "successful retry");
if (rbio->self_healing)
prt_str(&buf, ", self healing");
} else
prt_str(&buf, bch2_err_str(ret));
prt_newline(&buf);
if (!bkey_deleted(&sk.k->k)) {
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(sk.k));
prt_newline(&buf);
}
bch2_io_failures_to_text(&buf, c, &failed);
bch2_print_str_ratelimited(c, KERN_ERR, buf.buf);
}
bch2_bkey_buf_exit(&sk, c);
/* drop trans before calling rbio_done() */
}
bch2_rbio_done(rbio);
bch2_bkey_buf_exit(&sk, c);
}
static void bch2_rbio_error(struct bch_read_bio *rbio,

View File

@ -684,7 +684,8 @@ out:
goto retry;
}
if (mutex_trylock(&j->reclaim_lock)) {
if (journal_low_on_space(j) &&
mutex_trylock(&j->reclaim_lock)) {
bch2_journal_reclaim(j);
mutex_unlock(&j->reclaim_lock);
}

View File

@ -121,6 +121,12 @@ static inline void journal_wake(struct journal *j)
closure_wake_up(&j->async_wait);
}
static inline bool journal_low_on_space(struct journal *j)
{
return test_bit(JOURNAL_low_on_space, &j->flags) ||
test_bit(JOURNAL_low_on_pin, &j->flags);
}
/* Sequence number of oldest dirty journal entry */
static inline u64 journal_last_seq(struct journal *j)

View File

@ -72,7 +72,9 @@ void bch2_journal_set_watermark(struct journal *j)
track_event_change(&c->times[BCH_TIME_blocked_write_buffer_full], low_on_wb))
trace_and_count(c, journal_full, c);
mod_bit(JOURNAL_space_low, &j->flags, low_on_space || low_on_pin);
mod_bit(JOURNAL_low_on_space, &j->flags, low_on_space);
mod_bit(JOURNAL_low_on_pin, &j->flags, low_on_pin);
mod_bit(JOURNAL_low_on_wb, &j->flags, low_on_wb);
swap(watermark, j->watermark);
if (watermark > j->watermark)
@ -716,7 +718,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
msecs_to_jiffies(c->opts.journal_reclaim_delay)))
min_nr = 1;
if (j->watermark != BCH_WATERMARK_stripe)
if (journal_low_on_space(j))
min_nr = 1;
size_t btree_cache_live = bc->live[0].nr + bc->live[1].nr;

View File

@ -144,7 +144,9 @@ enum journal_space_from {
x(running) \
x(may_skip_flush) \
x(need_flush_write) \
x(space_low)
x(low_on_space) \
x(low_on_pin) \
x(low_on_wb)
enum journal_flags {
#define x(n) JOURNAL_##n,

View File

@ -392,8 +392,6 @@ err:
bch2_bkey_buf_exit(&u->k, c);
kfree(u);
if (bch2_err_matches(ret, BCH_ERR_data_update_done))
return 0;
return ret;
}
@ -566,16 +564,17 @@ root_err:
else
ret2 = bch2_btree_node_scrub(trans, btree_id, level, k, data_opts.read_dev);
if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
continue;
if (bch2_err_matches(ret2, BCH_ERR_data_update_done))
ret2 = 0;
if (bch2_err_matches(ret2, ENOMEM)) {
/* memory allocation failure, wait for some IO to finish */
bch2_move_ctxt_wait_for_io(ctxt);
continue;
}
if (ret2) {
if (bch2_err_matches(ret2, BCH_ERR_transaction_restart))
continue;
if (bch2_err_matches(ret2, ENOMEM)) {
/* memory allocation failure, wait for some IO to finish */
bch2_move_ctxt_wait_for_io(ctxt);
continue;
}
/* XXX signal failure */
goto next;
}
@ -786,6 +785,8 @@ static int __bch2_move_data_phys(struct moving_context *ctxt,
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (bch2_err_matches(ret, BCH_ERR_data_update_done))
ret = 0;
if (ret == -ENOMEM) {
/* memory allocation failure, wait for some IO to finish */
bch2_move_ctxt_wait_for_io(ctxt);

View File

@ -461,10 +461,8 @@ int bch2_set_rebalance_needs_scan_trans(struct btree_trans *trans, u64 inum)
int bch2_set_rebalance_needs_scan(struct bch_fs *c, u64 inum)
{
CLASS(btree_trans, trans)(c);
int ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
return commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
bch2_set_rebalance_needs_scan_trans(trans, inum));
bch2_rebalance_wakeup(c);
return ret;
}
int bch2_set_fs_needs_rebalance(struct bch_fs *c)