Update bcachefs sources to eab5671b5262 bcachefs: bch2_member_to_text_short_sb(): fix missing newlines

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2025-11-24 20:21:45 -05:00
parent e84b0fbfa1
commit 90629093cf
24 changed files with 219 additions and 233 deletions

View File

@ -1 +1 @@
efd3df255ba56d795750510e79d8d79f7812a029
eab5671b52626036abd5a31e7743c74fb3b59635

View File

@ -87,7 +87,7 @@ typedef u8 __bitwise blk_status_t;
#define BLK_STS_NOSPC ((__force blk_status_t)3)
#define BLK_STS_TRANSPORT ((__force blk_status_t)4)
#define BLK_STS_TARGET ((__force blk_status_t)5)
#define BLK_STS_NEXUS ((__force blk_status_t)6)
#define BLK_STS_RESV_CONFLICT ((__force blk_status_t)6)
#define BLK_STS_MEDIUM ((__force blk_status_t)7)
#define BLK_STS_PROTECTION ((__force blk_status_t)8)
#define BLK_STS_RESOURCE ((__force blk_status_t)9)
@ -97,6 +97,12 @@ typedef u8 __bitwise blk_status_t;
#define BLK_STS_DM_REQUEUE ((__force blk_status_t)11)
#define BLK_STS_AGAIN ((__force blk_status_t)12)
#define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13)
#define BLK_STS_ZONE_OPEN_RESOURCE ((__force blk_status_t)14)
#define BLK_STS_ZONE_ACTIVE_RESOURCE ((__force blk_status_t)15)
#define BLK_STS_OFFLINE ((__force blk_status_t)16)
#define BLK_STS_DURATION_LIMIT ((__force blk_status_t)17)
#define BLK_STS_INVAL ((__force blk_status_t)19)
#define BIO_INLINE_VECS 4

View File

@ -1175,14 +1175,14 @@ struct bch_fs {
struct mutex fsck_error_counts_lock;
};
static inline int __bch2_err_trace(struct bch_fs *c, int err)
static inline int __bch2_err_throw(struct bch_fs *c, int err)
{
this_cpu_inc(c->counters.now[BCH_COUNTER_error_throw]);
trace_error_throw(c, err, _THIS_IP_);
return err;
}
#define bch_err_throw(_c, _err) __bch2_err_trace(_c, -BCH_ERR_##_err)
#define bch_err_throw(_c, _err) __bch2_err_throw(_c, -BCH_ERR_##_err)
static inline bool bch2_ro_ref_tryget(struct bch_fs *c)
{

View File

@ -460,7 +460,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
unsigned long can_free = 0;
unsigned long freed = 0;
unsigned long touched = 0;
unsigned i, flags;
unsigned i;
unsigned long ret = SHRINK_STOP;
bool trigger_writes = atomic_long_read(&bc->nr_dirty) + nr >= list->nr * 3 / 4;
@ -468,7 +468,7 @@ static unsigned long bch2_btree_cache_scan(struct shrinker *shrink,
return SHRINK_STOP;
mutex_lock(&bc->lock);
flags = memalloc_nofs_save();
guard(memalloc_flags)(PF_MEMALLOC_NOFS);
/*
* It's _really_ critical that we don't free too many btree nodes - we
@ -551,7 +551,6 @@ out:
mutex_unlock(&bc->lock);
out_nounlock:
ret = freed;
memalloc_nofs_restore(flags);
trace_and_count(c, btree_cache_scan, sc->nr_to_scan, can_free, ret);
return ret;
}
@ -571,14 +570,13 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
{
struct btree_cache *bc = &c->btree_cache;
struct btree *b, *t;
unsigned long flags;
shrinker_free(bc->live[1].shrink);
shrinker_free(bc->live[0].shrink);
/* vfree() can allocate memory: */
flags = memalloc_nofs_save();
mutex_lock(&bc->lock);
guard(memalloc_flags)(PF_MEMALLOC_NOFS);
guard(mutex)(&bc->lock);
if (c->verify_data)
list_move(&c->verify_data->list, &bc->live[0].list);
@ -616,9 +614,6 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
kfree(b);
}
mutex_unlock(&bc->lock);
memalloc_nofs_restore(flags);
for (unsigned i = 0; i < ARRAY_SIZE(bc->nr_by_btree); i++)
BUG_ON(bc->nr_by_btree[i]);
BUG_ON(bc->live[0].nr);

View File

@ -146,7 +146,8 @@ static int __btree_err(int ret,
int ret2;
if (ca) {
bch2_mark_btree_validate_failure(failed, ca->dev_idx);
bch2_dev_io_failures_mut(failed, ca->dev_idx)->errcode =
bch_err_throw(c, btree_node_validate_err);
struct extent_ptr_decoded pick;
have_retry = bch2_bkey_pick_read_device(c,
@ -984,7 +985,8 @@ start:
rb->have_ioref = false;
if (bio->bi_status) {
bch2_mark_io_failure(&failed, &rb->pick, false);
bch2_mark_io_failure(&failed, &rb->pick,
__bch2_err_throw(c, -blk_status_to_bch_err(bio->bi_status)));
continue;
}
@ -997,7 +999,8 @@ start:
ret = bch2_btree_node_read_done(c, ca, b, &failed, &buf);
if (ret != -BCH_ERR_btree_node_read_err_want_retry &&
ret != -BCH_ERR_btree_node_read_err_must_retry)
ret != -BCH_ERR_btree_node_read_err_must_retry &&
!bch2_err_matches(ret, BCH_ERR_blockdev_io_error))
break;
}

View File

@ -247,18 +247,16 @@ void bch2_btree_bounce_free(struct bch_fs *c, size_t size, bool used_mempool, vo
void *bch2_btree_bounce_alloc(struct bch_fs *c, size_t size, bool *used_mempool)
{
unsigned flags = memalloc_nofs_save();
void *p;
BUG_ON(size > c->opts.btree_node_size);
guard(memalloc_flags)(PF_MEMALLOC_NOFS);
*used_mempool = false;
p = kvmalloc(size, GFP_NOWAIT|__GFP_ACCOUNT|__GFP_RECLAIMABLE);
void *p = kvmalloc(size, GFP_NOWAIT|__GFP_ACCOUNT|__GFP_RECLAIMABLE);
if (!p) {
*used_mempool = true;
p = mempool_alloc(&c->btree_bounce_pool, GFP_NOFS|__GFP_ACCOUNT|__GFP_RECLAIMABLE);
}
memalloc_nofs_restore(flags);
return p;
}

View File

@ -57,19 +57,7 @@ void bch2_io_failures_to_text(struct printbuf *out,
struct bch_fs *c,
struct bch_io_failures *failed)
{
static const char * const error_types[] = {
"btree validate", "io", "checksum", "ec reconstruct", NULL
};
for (struct bch_dev_io_failures *f = failed->devs;
f < failed->devs + failed->nr;
f++) {
unsigned errflags =
((!!f->failed_btree_validate) << 0) |
((!!f->failed_io) << 1) |
((!!f->failed_csum_nr) << 2) |
((!!f->failed_ec) << 3);
darray_for_each(*failed, f) {
bch2_printbuf_make_room(out, 1024);
scoped_guard(rcu) {
guard(printbuf_atomic)(out);
@ -80,17 +68,15 @@ void bch2_io_failures_to_text(struct printbuf *out,
prt_printf(out, "(invalid device %u)", f->dev);
}
prt_char(out, ' ');
if (!f->csum_nr && !f->ec && !f->errcode)
prt_str(out, " no error - confused");
if (!errflags) {
prt_str(out, "no error - confused");
} else if (is_power_of_2(errflags)) {
prt_bitflags(out, error_types, errflags);
prt_str(out, " error");
} else {
prt_str(out, "errors: ");
prt_bitflags(out, error_types, errflags);
}
if (f->csum_nr)
prt_printf(out, " checksum (%u)", f->csum_nr);
if (f->ec)
prt_str(out, " ec reconstruct");
if (f->errcode)
prt_printf(out, " %s", bch2_err_str(f->errcode));
prt_newline(out);
}
}
@ -98,51 +84,35 @@ void bch2_io_failures_to_text(struct printbuf *out,
struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *f,
unsigned dev)
{
struct bch_dev_io_failures *i;
for (i = f->devs; i < f->devs + f->nr; i++)
if (i->dev == dev)
return i;
return NULL;
return darray_find_p(*f, i, i->dev == dev);
}
void bch2_mark_io_failure(struct bch_io_failures *failed,
struct extent_ptr_decoded *p,
bool csum_error)
{
struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, p->ptr.dev);
if (!f) {
BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
f = &failed->devs[failed->nr++];
memset(f, 0, sizeof(*f));
f->dev = p->ptr.dev;
}
if (p->do_ec_reconstruct)
f->failed_ec = true;
else if (!csum_error)
f->failed_io = true;
else
f->failed_csum_nr++;
}
void bch2_mark_btree_validate_failure(struct bch_io_failures *failed,
unsigned dev)
struct bch_dev_io_failures *bch2_dev_io_failures_mut(struct bch_io_failures *failed,
unsigned dev)
{
struct bch_dev_io_failures *f = bch2_dev_io_failures(failed, dev);
if (!f) {
BUG_ON(failed->nr >= ARRAY_SIZE(failed->devs));
BUG_ON(failed->nr >= ARRAY_SIZE(failed->data));
f = &failed->devs[failed->nr++];
f = &failed->data[failed->nr++];
memset(f, 0, sizeof(*f));
f->dev = dev;
}
f->failed_btree_validate = true;
return f;
}
void bch2_mark_io_failure(struct bch_io_failures *failed,
struct extent_ptr_decoded *p, int err)
{
struct bch_dev_io_failures *f = bch2_dev_io_failures_mut(failed, p->ptr.dev);
if (p->do_ec_reconstruct)
f->ec = true;
else if (err == -BCH_ERR_data_read_retry_csum_err)
f->csum_nr++;
else
f->errcode = err;
}
static inline u64 dev_latency(struct bch_dev *ca)
@ -253,21 +223,19 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
struct bch_dev_io_failures *f =
unlikely(failed) ? bch2_dev_io_failures(failed, p.ptr.dev) : NULL;
if (unlikely(f)) {
p.crc_retry_nr = f->failed_csum_nr;
p.has_ec &= ~f->failed_ec;
p.crc_retry_nr = f->csum_nr;
p.has_ec &= !f->ec;
if (ca) {
have_io_errors |= f->failed_io;
have_io_errors |= f->failed_btree_validate;
have_io_errors |= f->failed_ec;
have_io_errors |= f->errcode != 0;
have_io_errors |= f->ec;
}
have_csum_errors |= !!f->failed_csum_nr;
have_csum_errors |= f->csum_nr != 0;
if (p.has_ec && (f->failed_io || f->failed_csum_nr))
if (p.has_ec && (f->errcode || f->csum_nr))
p.do_ec_reconstruct = true;
else if (f->failed_io ||
f->failed_btree_validate ||
f->failed_csum_nr > c->opts.checksum_err_retry_nr)
else if (f->errcode ||
f->csum_nr > c->opts.checksum_err_retry_nr)
continue;
}

View File

@ -382,11 +382,10 @@ out: \
void bch2_io_failures_to_text(struct printbuf *, struct bch_fs *,
struct bch_io_failures *);
struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *,
unsigned);
void bch2_mark_io_failure(struct bch_io_failures *,
struct extent_ptr_decoded *, bool);
void bch2_mark_btree_validate_failure(struct bch_io_failures *, unsigned);
struct bch_dev_io_failures *bch2_dev_io_failures(struct bch_io_failures *, unsigned);
struct bch_dev_io_failures *bch2_dev_io_failures_mut(struct bch_io_failures *, unsigned);
void bch2_mark_io_failure(struct bch_io_failures *, struct extent_ptr_decoded *, int);
void bch2_mark_dev_io_failure(struct bch_io_failures *, unsigned, int);
int bch2_bkey_pick_read_device(struct bch_fs *, struct bkey_s_c,
struct bch_io_failures *,
struct extent_ptr_decoded *, int);

View File

@ -32,11 +32,10 @@ struct bch_io_failures {
u8 nr;
struct bch_dev_io_failures {
u8 dev;
unsigned failed_csum_nr:6,
failed_io:1,
failed_btree_validate:1,
failed_ec:1;
} devs[BCH_REPLICAS_MAX + 1];
unsigned csum_nr:7;
bool ec:1;
s16 errcode;
} data[BCH_REPLICAS_MAX + 1];
};
#endif /* _BCACHEFS_EXTENTS_TYPES_H */

View File

@ -110,7 +110,7 @@ static void move_write(struct data_update *u)
struct bch_read_bio *rbio = &u->rbio;
if (ctxt->stats) {
if (rbio->bio.bi_status)
if (rbio->ret)
atomic64_add(u->rbio.bvec_iter.bi_size >> 9,
&ctxt->stats->sectors_error_uncorrected);
else if (rbio->saw_error)

View File

@ -507,7 +507,7 @@ static inline struct bch_read_bio *bch2_rbio_free(struct bch_read_bio *rbio)
struct bch_read_bio *parent = rbio->parent;
if (unlikely(rbio->promote)) {
if (!rbio->bio.bi_status)
if (!rbio->ret)
promote_start(rbio);
else
promote_free(rbio, -EIO);
@ -600,6 +600,13 @@ static noinline int maybe_poison_extent(struct btree_trans *trans, struct bch_re
return 0;
}
static inline bool data_read_err_should_retry(int err)
{
return bch2_err_matches(err, BCH_ERR_transaction_restart) ||
bch2_err_matches(err, BCH_ERR_data_read_retry) ||
bch2_err_matches(err, BCH_ERR_blockdev_io_error);
}
static noinline int bch2_read_retry_nodecode(struct btree_trans *trans,
struct bch_read_bio *rbio,
struct bvec_iter bvec_iter,
@ -628,13 +635,10 @@ static noinline int bch2_read_retry_nodecode(struct btree_trans *trans,
u->btree_id,
bkey_i_to_s_c(u->k.k),
0, failed, flags, -1);
} while (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, BCH_ERR_data_read_retry));
} while (data_read_err_should_retry(ret));
if (ret) {
rbio->bio.bi_status = BLK_STS_IOERR;
rbio->ret = ret;
}
if (ret)
rbio->ret = ret;
BUG_ON(atomic_read(&rbio->bio.__bi_remaining) != 1);
return ret;
@ -683,9 +687,9 @@ static void bch2_rbio_retry(struct work_struct *work)
get_rbio_extent(trans, rbio, &sk);
if (!bkey_deleted(&sk.k->k) &&
bch2_err_matches(rbio->ret, BCH_ERR_data_read_retry_avoid)) {
bch2_mark_io_failure(&failed, &rbio->pick,
rbio->ret == -BCH_ERR_data_read_retry_csum_err);
(bch2_err_matches(rbio->ret, BCH_ERR_data_read_retry_avoid) ||
bch2_err_matches(rbio->ret, BCH_ERR_blockdev_io_error))) {
bch2_mark_io_failure(&failed, &rbio->pick, rbio->ret);
propagate_io_error_to_data_update(c, rbio, &rbio->pick);
}
@ -706,10 +710,8 @@ static void bch2_rbio_retry(struct work_struct *work)
? bch2_read_retry_nodecode(trans, rbio, iter, &failed, flags)
: __bch2_read(trans, rbio, iter, inum, &failed, &sk, flags);
if (ret) {
if (ret)
rbio->ret = ret;
rbio->bio.bi_status = BLK_STS_IOERR;
}
if (failed.nr || ret) {
CLASS(printbuf, buf)();
@ -743,28 +745,21 @@ static void bch2_rbio_retry(struct work_struct *work)
bch2_rbio_done(rbio);
}
static void bch2_rbio_error(struct bch_read_bio *rbio,
int ret, blk_status_t blk_error)
static void bch2_rbio_error(struct bch_read_bio *rbio, int ret)
{
BUG_ON(ret >= 0);
rbio->ret = ret;
rbio->bio.bi_status = blk_error;
rbio->ret = ret;
bch2_rbio_parent(rbio)->saw_error = true;
if (rbio->flags & BCH_READ_in_retry)
return;
if (bch2_err_matches(ret, BCH_ERR_data_read_retry)) {
bch2_rbio_punt(rbio, bch2_rbio_retry,
RBIO_CONTEXT_UNBOUND, system_unbound_wq);
if (data_read_err_should_retry(ret)) {
bch2_rbio_punt(rbio, bch2_rbio_retry, RBIO_CONTEXT_UNBOUND, system_unbound_wq);
} else {
rbio = bch2_rbio_free(rbio);
rbio->ret = ret;
rbio->bio.bi_status = blk_error;
rbio->ret = ret;
bch2_rbio_done(rbio);
}
}
@ -830,44 +825,6 @@ static noinline void bch2_rbio_narrow_crcs(struct bch_read_bio *rbio)
count_event(c, io_read_narrow_crcs_fail);
}
static void bch2_read_decompress_err(struct work_struct *work)
{
struct bch_read_bio *rbio =
container_of(work, struct bch_read_bio, work);
struct bch_fs *c = rbio->c;
CLASS(printbuf, buf)();
bch2_read_err_msg(c, &buf, rbio, rbio->read_pos);
prt_str(&buf, "decompression error");
struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
if (ca)
bch_err_dev_ratelimited(ca, "%s", buf.buf);
else
bch_err_ratelimited(c, "%s", buf.buf);
bch2_rbio_error(rbio, -BCH_ERR_data_read_decompress_err, BLK_STS_IOERR);
}
static void bch2_read_decrypt_err(struct work_struct *work)
{
struct bch_read_bio *rbio =
container_of(work, struct bch_read_bio, work);
struct bch_fs *c = rbio->c;
CLASS(printbuf, buf)();
bch2_read_err_msg(c, &buf, rbio, rbio->read_pos);
prt_str(&buf, "decrypt error");
struct bch_dev *ca = rbio->have_ioref ? bch2_dev_have_ref(c, rbio->pick.ptr.dev) : NULL;
if (ca)
bch_err_dev_ratelimited(ca, "%s", buf.buf);
else
bch_err_ratelimited(c, "%s", buf.buf);
bch2_rbio_error(rbio, -BCH_ERR_data_read_decrypt_err, BLK_STS_IOERR);
}
/* Inner part that may run in process context */
static void __bch2_read_endio(struct work_struct *work)
{
@ -881,11 +838,10 @@ static void __bch2_read_endio(struct work_struct *work)
struct bvec_iter dst_iter = rbio->bvec_iter;
struct bch_extent_crc_unpacked crc = rbio->pick.crc;
struct nonce nonce = extent_nonce(rbio->version, crc);
unsigned nofs_flags;
struct bch_csum csum;
int ret;
nofs_flags = memalloc_nofs_save();
guard(memalloc_flags)(PF_MEMALLOC_NOFS);
/* Reset iterator for checksumming and copying bounced data: */
if (rbio->bounce) {
@ -910,15 +866,16 @@ static void __bch2_read_endio(struct work_struct *work)
*/
if (!csum_good && !rbio->bounce && (rbio->flags & BCH_READ_user_mapped)) {
rbio->flags |= BCH_READ_must_bounce;
bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err_maybe_userspace,
BLK_STS_IOERR);
goto out;
bch2_rbio_error(rbio, bch_err_throw(c, data_read_retry_csum_err_maybe_userspace));
return;
}
bch2_account_io_completion(ca, BCH_MEMBER_ERROR_checksum, 0, csum_good);
if (!csum_good)
goto csum_err;
if (!csum_good) {
bch2_rbio_error(rbio, bch_err_throw(c, data_read_retry_csum_err));
return;
}
/*
* XXX
@ -937,12 +894,16 @@ static void __bch2_read_endio(struct work_struct *work)
if (crc_is_compressed(crc)) {
ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
if (ret)
goto decrypt_err;
if (ret) {
bch2_rbio_error(rbio, bch_err_throw(c, data_read_decrypt_err));
return;
}
if (bch2_bio_uncompress(c, src, dst, dst_iter, crc) &&
!c->opts.no_data_io)
goto decompression_err;
!c->opts.no_data_io) {
bch2_rbio_error(rbio, bch_err_throw(c, data_read_decompress_err));
return;
}
} else {
/* don't need to decrypt the entire bio: */
nonce = nonce_add(nonce, crc.offset << 9);
@ -952,8 +913,10 @@ static void __bch2_read_endio(struct work_struct *work)
src->bi_iter.bi_size = dst_iter.bi_size;
ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
if (ret)
goto decrypt_err;
if (ret) {
bch2_rbio_error(rbio, bch_err_throw(c, data_read_decrypt_err));
return;
}
if (rbio->bounce) {
struct bvec_iter src_iter = src->bi_iter;
@ -978,26 +941,16 @@ static void __bch2_read_endio(struct work_struct *work)
* rbio->crc:
*/
ret = bch2_encrypt_bio(c, crc.csum_type, nonce, src);
if (ret)
goto decrypt_err;
if (ret) {
bch2_rbio_error(rbio, bch_err_throw(c, data_read_decrypt_err));
return;
}
}
if (likely(!(rbio->flags & BCH_READ_in_retry))) {
rbio = bch2_rbio_free(rbio);
bch2_rbio_done(rbio);
}
out:
memalloc_nofs_restore(nofs_flags);
return;
csum_err:
bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_csum_err, BLK_STS_IOERR);
goto out;
decompression_err:
bch2_rbio_punt(rbio, bch2_read_decompress_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq);
goto out;
decrypt_err:
bch2_rbio_punt(rbio, bch2_read_decrypt_err, RBIO_CONTEXT_UNBOUND, system_unbound_wq);
goto out;
}
static void bch2_read_endio(struct bio *bio)
@ -1016,7 +969,7 @@ static void bch2_read_endio(struct bio *bio)
rbio->bio.bi_end_io = rbio->end_io;
if (unlikely(bio->bi_status)) {
bch2_rbio_error(rbio, -BCH_ERR_data_read_retry_io_err, bio->bi_status);
bch2_rbio_error(rbio, __bch2_err_throw(c, -blk_status_to_bch_err(bio->bi_status)));
return;
}
@ -1025,9 +978,9 @@ static void bch2_read_endio(struct bio *bio)
trace_and_count(c, io_read_reuse_race, &rbio->bio);
if (rbio->flags & BCH_READ_retry_if_stale)
bch2_rbio_error(rbio, -BCH_ERR_data_read_ptr_stale_retry, BLK_STS_AGAIN);
bch2_rbio_error(rbio, bch_err_throw(c, data_read_ptr_stale_retry));
else
bch2_rbio_error(rbio, -BCH_ERR_data_read_ptr_stale_race, BLK_STS_AGAIN);
bch2_rbio_error(rbio, bch_err_throw(c, data_read_ptr_stale_race));
return;
}
@ -1179,7 +1132,7 @@ retry_pick:
ca &&
unlikely(dev_ptr_stale(ca, &pick.ptr))) {
read_from_stale_dirty_pointer(trans, ca, k, pick.ptr);
bch2_mark_io_failure(failed, &pick, false);
bch2_mark_io_failure(failed, &pick, bch_err_throw(c, data_read_ptr_stale_dirty));
propagate_io_error_to_data_update(c, rbio, &pick);
enumerated_ref_put(&ca->io_ref[READ], BCH_DEV_READ_REF_io_read);
goto retry_pick;
@ -1347,7 +1300,7 @@ retry_pick:
if (likely(!rbio->pick.do_ec_reconstruct)) {
if (unlikely(!rbio->have_ioref)) {
ret = bch_err_throw(c, data_read_retry_device_offline);
bch2_rbio_error(rbio, ret, BLK_STS_IOERR);
bch2_rbio_error(rbio, ret);
goto out;
}
@ -1374,7 +1327,7 @@ retry_pick:
/* Attempting reconstruct read: */
if (bch2_ec_read_extent(trans, rbio, k)) {
ret = bch_err_throw(c, data_read_retry_ec_reconstruct_err);
bch2_rbio_error(rbio, ret, BLK_STS_IOERR);
bch2_rbio_error(rbio, ret);
goto out;
}
@ -1395,9 +1348,9 @@ out:
ret = rbio->ret;
rbio = bch2_rbio_free(rbio);
if (bch2_err_matches(ret, BCH_ERR_data_read_retry_avoid)) {
bch2_mark_io_failure(failed, &pick,
ret == -BCH_ERR_data_read_retry_csum_err);
if (bch2_err_matches(ret, BCH_ERR_data_read_retry_avoid) ||
bch2_err_matches(ret, BCH_ERR_blockdev_io_error)) {
bch2_mark_io_failure(failed, &pick, ret);
propagate_io_error_to_data_update(c, rbio, &pick);
}
@ -1408,8 +1361,7 @@ err:
if (flags & BCH_READ_in_retry)
return ret;
orig->bio.bi_status = BLK_STS_IOERR;
orig->ret = ret;
orig->ret = ret;
goto out_read_done;
hole:
@ -1518,9 +1470,7 @@ err:
if (ret == -BCH_ERR_data_read_retry_csum_err_maybe_userspace)
flags |= BCH_READ_must_bounce;
if (ret &&
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
!bch2_err_matches(ret, BCH_ERR_data_read_retry))
if (ret && !data_read_err_should_retry(ret))
break;
}
@ -1533,8 +1483,7 @@ err:
bch_err_ratelimited(c, "%s", buf.buf);
}
rbio->bio.bi_status = BLK_STS_IOERR;
rbio->ret = ret;
rbio->ret = ret;
if (!(flags & BCH_READ_in_retry))
bch2_rbio_done(rbio);

View File

@ -1503,15 +1503,14 @@ static void __bch2_write(struct bch_write_op *op)
struct bch_fs *c = op->c;
struct write_point *wp = NULL;
struct bio *bio = NULL;
unsigned nofs_flags;
int ret;
nofs_flags = memalloc_nofs_save();
guard(memalloc_flags)(PF_MEMALLOC_NOFS);
if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
bch2_nocow_write(op);
if (op->flags & BCH_WRITE_submitted)
goto out_nofs_restore;
return;
}
again:
memset(&op->failed, 0, sizeof(op->failed));
@ -1608,8 +1607,6 @@ err:
bch2_write_queue(op, wp);
continue_at(&op->cl, bch2_write_index, NULL);
}
out_nofs_restore:
memalloc_nofs_restore(nofs_flags);
}
static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)

View File

@ -71,3 +71,17 @@ const char *bch2_blk_status_to_str(blk_status_t status)
return "device removed";
return blk_status_to_str(status);
}
enum bch_errcode blk_status_to_bch_err(blk_status_t err)
{
if (!err)
return 0;
switch (err) {
#undef BLK_STS
#define BLK_STS(n) case BLK_STS_##n: return BCH_ERR_BLK_STS_##n;
BLK_ERRS()
#undef BLK_STS
default: return BCH_ERR_BLK_STS_UNKNOWN;
}
}

View File

@ -2,7 +2,37 @@
#ifndef _BCACHEFS_ERRCODE_H
#define _BCACHEFS_ERRCODE_H
/* we're getting away from reusing bi_status, this should go away */
#define BLK_STS_REMOVED ((__force blk_status_t)128)
#define BLK_ERRS() \
BLK_STS(NOTSUPP) \
BLK_STS(TIMEOUT) \
BLK_STS(NOSPC) \
BLK_STS(TRANSPORT) \
BLK_STS(TARGET) \
BLK_STS(RESV_CONFLICT) \
BLK_STS(MEDIUM) \
BLK_STS(PROTECTION) \
BLK_STS(RESOURCE) \
BLK_STS(IOERR) \
BLK_STS(DM_REQUEUE) \
BLK_STS(AGAIN) \
BLK_STS(DEV_RESOURCE) \
BLK_STS(ZONE_OPEN_RESOURCE) \
BLK_STS(ZONE_ACTIVE_RESOURCE) \
BLK_STS(OFFLINE) \
BLK_STS(DURATION_LIMIT) \
BLK_STS(INVAL) \
BLK_STS(REMOVED) \
#define BLK_STS(n) \
x(BCH_ERR_blockdev_io_error, BLK_STS_##n)
#define BCH_ERRCODES() \
x(EIO, blockdev_io_error) \
BLK_ERRS() \
x(BCH_ERR_blockdev_io_error, BLK_STS_UNKNOWN) \
x(ERANGE, ERANGE_option_too_small) \
x(ERANGE, ERANGE_option_too_big) \
x(ERANGE, projid_too_big) \
@ -309,6 +339,7 @@
x(EIO, journal_flush_err) \
x(EIO, journal_write_err) \
x(EIO, btree_node_read_err) \
x(EIO, btree_node_validate_err) \
x(BCH_ERR_btree_node_read_err, btree_node_read_err_cached) \
x(EIO, sb_not_downgraded) \
x(EIO, btree_node_write_all_failed) \
@ -355,10 +386,11 @@
x(BCH_ERR_data_read_retry_avoid,data_read_retry_ec_reconstruct_err) \
x(BCH_ERR_data_read_retry_avoid,data_read_retry_csum_err) \
x(BCH_ERR_data_read_retry, data_read_retry_csum_err_maybe_userspace)\
x(BCH_ERR_data_read, data_read_decompress_err) \
x(BCH_ERR_data_read, data_read_decrypt_err) \
x(BCH_ERR_data_read_retry_avoid,data_read_decompress_err) \
x(BCH_ERR_data_read_retry_avoid,data_read_decrypt_err) \
x(BCH_ERR_data_read, data_read_ptr_stale_race) \
x(BCH_ERR_data_read_retry, data_read_ptr_stale_retry) \
x(BCH_ERR_data_read_retry, data_read_ptr_stale_dirty) \
x(BCH_ERR_data_read, data_read_no_encryption_key) \
x(BCH_ERR_data_read, data_read_buffer_too_small) \
x(BCH_ERR_data_read, data_read_key_overwritten) \
@ -417,9 +449,8 @@ static inline long bch2_err_class(long err)
return err < 0 ? __bch2_err_class(err) : err;
}
#define BLK_STS_REMOVED ((__force blk_status_t)128)
#include <linux/blk_types.h>
const char *bch2_blk_status_to_str(blk_status_t);
enum bch_errcode blk_status_to_bch_err(blk_status_t);
#endif /* _BCACHFES_ERRCODE_H */

View File

@ -1133,6 +1133,7 @@ static void bch2_fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
struct bch_dev *ca = bdev_to_bch_dev(c, bdev);
if (ca) {
bool print = true;
CLASS(printbuf, buf)();
__bch2_log_msg_start(ca->name, &buf);
prt_printf(&buf, "offline from block layer\n");
@ -1149,10 +1150,11 @@ static void bch2_fs_bdev_mark_dead(struct block_device *bdev, bool surprise)
__bch2_dev_offline(c, ca);
} else {
bch2_journal_flush(&c->journal);
bch2_fs_emergency_read_only2(c, &buf);
print = bch2_fs_emergency_read_only2(c, &buf);
}
bch2_print_str(c, KERN_ERR, buf.buf);
if (print)
bch2_print_str(c, KERN_ERR, buf.buf);
bch2_dev_put(ca);
}

View File

@ -147,6 +147,7 @@ void bch2_io_error_work(struct work_struct *work)
if (ca->mi.state >= BCH_MEMBER_STATE_ro)
return;
bool print = true;
CLASS(printbuf, buf)();
__bch2_log_msg_start(ca->name, &buf);
@ -158,9 +159,10 @@ void bch2_io_error_work(struct work_struct *work)
prt_printf(&buf, "setting %s ro", dev ? "device" : "filesystem");
if (!dev)
bch2_fs_emergency_read_only2(c, &buf);
print = bch2_fs_emergency_read_only2(c, &buf);
bch2_print_str(c, KERN_ERR, buf.buf);
if (print)
bch2_print_str(c, KERN_ERR, buf.buf);
}
}

View File

@ -1532,7 +1532,7 @@ struct bch_fs *bch2_fs_open(darray_const_str *devices,
if (ret) {
prt_printf(&msg, "error starting filesystem: %s", bch2_err_str(ret));
bch2_print_string_as_lines(KERN_ERR, msg.buf);
} else {
} else if (msg.pos) {
CLASS(printbuf, msg_with_prefix)();
bch2_log_msg_start(c, &msg_with_prefix);
prt_str(&msg_with_prefix, msg.buf);

View File

@ -717,7 +717,6 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
bool kthread = (current->flags & PF_KTHREAD) != 0;
u64 seq_to_flush;
size_t min_nr, min_key_cache, nr_flushed;
unsigned flags;
int ret = 0;
/*
@ -727,7 +726,7 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
* we're holding the reclaim lock:
*/
lockdep_assert_held(&j->reclaim_lock);
flags = memalloc_nofs_save();
guard(memalloc_flags)(PF_MEMALLOC_NOFS);
do {
if (kthread && kthread_should_stop())
@ -781,8 +780,6 @@ static int __bch2_journal_reclaim(struct journal *j, bool direct, bool kicked)
wake_up(&j->reclaim_wait);
} while ((min_nr || min_key_cache) && nr_flushed && !direct);
memalloc_flags_restore(flags);
return ret;
}

View File

@ -300,8 +300,8 @@ static void bch2_member_to_text_short_sb(struct printbuf *out,
prt_newline(out);
}
prt_printf(out, "Device:\t%.*s", (int) sizeof(m->device_name), m->device_name);
prt_printf(out, "Model:\t%.*s", (int) sizeof(m->device_model), m->device_model);
prt_printf(out, "Device:\t%.*s\n", (int) sizeof(m->device_name), m->device_name);
prt_printf(out, "Model:\t%.*s\n", (int) sizeof(m->device_model), m->device_model);
prt_printf(out, "State:\t%s\n",
BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR

View File

@ -268,6 +268,9 @@ static inline void printbuf_reset_keep_tabstops(struct printbuf *buf)
buf->last_field = 0;
buf->indent = 0;
buf->cur_tabstop = 0;
if (buf->size)
printbuf_nul_terminate_reserved(buf);
}
/**

View File

@ -815,4 +815,14 @@ do { \
_ret; \
})
#include <linux/sched/mm.h>
struct memalloc_flags { unsigned flags; };
DEFINE_CLASS(memalloc_flags, struct memalloc_flags,
memalloc_flags_restore(_T.flags),
(struct memalloc_flags) { memalloc_flags_save(_flags) },
unsigned _flags)
#endif /* _BCACHEFS_UTIL_H */

View File

@ -32,10 +32,11 @@ static inline bool bio_full(struct bio *bio, unsigned len)
static void bch2_readpages_end_io(struct bio *bio)
{
struct bch_read_bio *rbio = to_rbio(bio);
struct folio_iter fi;
bio_for_each_folio_all(fi, bio)
folio_end_read(fi.folio, bio->bi_status == BLK_STS_OK);
folio_end_read(fi.folio, !rbio->ret);
bio_put(bio);
}
@ -277,7 +278,7 @@ err:
prt_printf(&buf, "data read error: %s", bch2_err_str(ret));
bch_err_ratelimited(c, "%s", buf.buf);
rbio->bio.bi_status = BLK_STS_IOERR;
rbio->ret = ret;
bio_endio(&rbio->bio);
}
}
@ -379,7 +380,7 @@ int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
blk_finish_plug(&plug);
wait_for_completion(&done);
ret = blk_status_to_errno(rbio->bio.bi_status);
ret = bch2_err_class(rbio->ret);
bio_put(&rbio->bio);
if (ret < 0)

View File

@ -52,10 +52,11 @@ static CLOSURE_CALLBACK(bch2_dio_read_complete)
static void bch2_direct_IO_read_endio(struct bio *bio)
{
struct bch_read_bio *rbio = to_rbio(bio);
struct dio_read *dio = bio->bi_private;
if (bio->bi_status)
dio->ret = blk_status_to_errno(bio->bi_status);
if (rbio->ret)
dio->ret = bch2_err_class(rbio->ret);
closure_put(&dio->cl);
}
@ -153,7 +154,7 @@ start:
ret = bch2_bio_iov_iter_get_pages(bio, iter, 0);
if (ret < 0) {
/* XXX: fault inject this path */
bio->bi_status = BLK_STS_RESOURCE;
to_rbio(bio)->ret = ret;
bio_endio(bio);
break;
}

View File

@ -30,15 +30,26 @@ static const struct {
[BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
[BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
[BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
[BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
[BLK_STS_RESV_CONFLICT] = { -EBADE, "reservation conflict" },
[BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
[BLK_STS_DEV_RESOURCE] = { -EBUSY, "device resource" },
[BLK_STS_AGAIN] = { -EAGAIN, "nonblocking retry" },
[BLK_STS_OFFLINE] = { -ENODEV, "device offline" },
/* device mapper special case, should not leak out: */
[BLK_STS_DM_REQUEUE] = { -EREMCHG, "dm internal retry" },
/* zone device specific errors */
[BLK_STS_ZONE_OPEN_RESOURCE] = { -ETOOMANYREFS, "open zones exceeded" },
[BLK_STS_ZONE_ACTIVE_RESOURCE] = { -EOVERFLOW, "active zones exceeded" },
/* Command duration limit device-side timeout */
[BLK_STS_DURATION_LIMIT] = { -ETIME, "duration limit exceeded" },
[BLK_STS_INVAL] = { -EINVAL, "invalid" },
/* everything else not covered above: */
[BLK_STS_IOERR] = { -EIO, "I/O" },
};