Update bcachefs sources to 504729f99c bcachefs: Allow answering y or n to all fsck errors of given type

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2023-04-10 14:39:18 -04:00
parent 7f102ee83d
commit 807b250927
9 changed files with 170 additions and 27 deletions

View File

@ -1 +1 @@
8fd009dd764dabd79e2b42e1c85812a08ad1d6c0
504729f99c4e1655be1da3e8c62d20b790483eba

View File

@ -682,9 +682,21 @@ DEFINE_EVENT(bkey, move_extent_finish,
TP_ARGS(k)
);
DEFINE_EVENT(bkey, move_extent_fail,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
TRACE_EVENT(move_extent_fail,
TP_PROTO(struct bch_fs *c, const char *msg),
TP_ARGS(c, msg),
TP_STRUCT__entry(
__field(dev_t, dev )
__string(msg, msg )
),
TP_fast_assign(
__entry->dev = c->dev;
__assign_str(msg, msg);
),
TP_printk("%d:%d %s", MAJOR(__entry->dev), MINOR(__entry->dev), __get_str(msg))
);
DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,

View File

@ -7,6 +7,7 @@
#include "buckets.h"
#include "data_update.h"
#include "ec.h"
#include "error.h"
#include "extents.h"
#include "io.h"
#include "keylist.h"
@ -92,6 +93,70 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans,
return ret;
}
static void trace_move_extent_fail2(struct data_update *m,
struct bkey_s_c new,
struct bkey_s_c wrote,
struct bkey_i *insert,
const char *msg)
{
struct bch_fs *c = m->op.c;
struct bkey_s_c old = bkey_i_to_s_c(m->k.k);
const union bch_extent_entry *entry;
struct bch_extent_ptr *ptr;
struct extent_ptr_decoded p;
struct printbuf buf = PRINTBUF;
unsigned i, rewrites_found = 0;
if (!trace_move_extent_fail_enabled())
return;
prt_str(&buf, msg);
if (insert) {
i = 0;
bkey_for_each_ptr_decode(old.k, bch2_bkey_ptrs_c(old), p, entry) {
struct bkey_s new_s;
new_s.k = (void *) new.k;
new_s.v = (void *) new.v;
if (((1U << i) & m->data_opts.rewrite_ptrs) &&
(ptr = bch2_extent_has_ptr(old, p, bkey_i_to_s(insert))) &&
!ptr->cached)
rewrites_found |= 1U << i;
i++;
}
}
prt_printf(&buf, "\nrewrite ptrs: %u%u%u%u",
(m->data_opts.rewrite_ptrs & (1 << 0)) != 0,
(m->data_opts.rewrite_ptrs & (1 << 1)) != 0,
(m->data_opts.rewrite_ptrs & (1 << 2)) != 0,
(m->data_opts.rewrite_ptrs & (1 << 3)) != 0);
prt_printf(&buf, "\nrewrites found: %u%u%u%u",
(rewrites_found & (1 << 0)) != 0,
(rewrites_found & (1 << 1)) != 0,
(rewrites_found & (1 << 2)) != 0,
(rewrites_found & (1 << 3)) != 0);
prt_str(&buf, "\nold: ");
bch2_bkey_val_to_text(&buf, c, old);
prt_str(&buf, "\nnew: ");
bch2_bkey_val_to_text(&buf, c, new);
prt_str(&buf, "\nwrote: ");
bch2_bkey_val_to_text(&buf, c, wrote);
if (insert) {
prt_str(&buf, "\ninsert: ");
bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(insert));
}
trace_move_extent_fail(c, buf.buf);
printbuf_exit(&buf);
}
static int __bch2_data_update_index_update(struct btree_trans *trans,
struct bch_write_op *op)
{
@ -135,8 +200,11 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
new = bkey_i_to_extent(bch2_keylist_front(keys));
if (!bch2_extents_match(k, old))
if (!bch2_extents_match(k, old)) {
trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i),
NULL, "no match:");
goto nowork;
}
bkey_reassemble(_insert.k, k);
insert = _insert.k;
@ -175,8 +243,10 @@ static int __bch2_data_update_index_update(struct btree_trans *trans,
if (m->data_opts.rewrite_ptrs &&
!rewrites_found &&
bch2_bkey_durability(c, k) >= m->op.opts.data_replicas)
bch2_bkey_durability(c, k) >= m->op.opts.data_replicas) {
trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "no rewrites found:");
goto nowork;
}
/*
* A replica that we just wrote might conflict with a replica
@ -190,8 +260,10 @@ restart_drop_conflicting_replicas:
goto restart_drop_conflicting_replicas;
}
if (!bkey_val_u64s(&new->k))
if (!bkey_val_u64s(&new->k)) {
trace_move_extent_fail2(m, k, bkey_i_to_s_c(&new->k_i), insert, "new replicas conflicted:");
goto nowork;
}
/* Now, drop pointers that conflict with what we just wrote: */
extent_for_each_ptr_decode(extent_i_to_s(new), p, entry)
@ -294,7 +366,6 @@ nowork:
}
this_cpu_add(c->counters[BCH_COUNTER_move_extent_fail], new->k.size);
trace_move_extent_fail(&new->k);
bch2_btree_iter_advance(&iter);
goto next;

View File

@ -65,10 +65,47 @@ void bch2_io_error(struct bch_dev *ca)
//queue_work(system_long_wq, &ca->io_error_work);
}
enum ask_yn {
YN_NO,
YN_YES,
YN_ALLNO,
YN_ALLYES,
};
#ifdef __KERNEL__
#define ask_yn() false
#define bch2_fsck_ask_yn() YN_NO
#else
#include "tools-util.h"
enum ask_yn bch2_fsck_ask_yn(void)
{
char *buf = NULL;
size_t buflen = 0;
bool ret;
while (true) {
fputs(" (y,n,Y,N) ", stdout);
fflush(stdout);
if (getline(&buf, &buflen, stdin) < 0)
die("error reading from standard input");
if (strlen(buf) != 1)
continue;
switch (buf[0]) {
case 'n':
return YN_NO;
case 'y':
return YN_YES;
case 'N':
return YN_ALLNO;
case 'Y':
return YN_ALLYES;
}
}
free(buf);
return ret;
}
#endif
static struct fsck_err_state *fsck_err_get(struct bch_fs *c, const char *fmt)
@ -161,14 +198,28 @@ int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
prt_str(out, ", exiting");
ret = -BCH_ERR_fsck_errors_not_fixed;
} else if (flags & FSCK_CAN_FIX) {
if (c->opts.fix_errors == FSCK_OPT_ASK) {
int fix = s && s->fix
? s->fix
: c->opts.fix_errors;
if (fix == FSCK_OPT_ASK) {
int ask;
prt_str(out, ": fix?");
bch2_print_string_as_lines(KERN_ERR, out->buf);
print = false;
ret = ask_yn()
ask = bch2_fsck_ask_yn();
if (ask >= YN_ALLNO && s)
s->fix = ask == YN_ALLNO
? FSCK_OPT_NO
: FSCK_OPT_YES;
ret = ask & 1
? -BCH_ERR_fsck_fix
: -BCH_ERR_fsck_ignore;
} else if (c->opts.fix_errors == FSCK_OPT_YES ||
} else if (fix == FSCK_OPT_YES ||
(c->opts.nochanges &&
!(flags & FSCK_CAN_IGNORE))) {
prt_str(out, ", fixing");

View File

@ -104,6 +104,7 @@ struct fsck_err_state {
u64 nr;
bool ratelimited;
int ret;
int fix;
char *last_msg;
};

View File

@ -35,7 +35,13 @@
#include <trace/events/bcachefs.h>
#include <trace/events/writeback.h>
static inline loff_t folio_end_pos(struct folio *folio)
/*
* Use u64 for the end pos and sector helpers because if the folio covers the
* max supported range of the mapping, the start offset of the next folio
* overflows loff_t. This breaks much of the range based processing in the
* buffered write path.
*/
static inline u64 folio_end_pos(struct folio *folio)
{
return folio_pos(folio) + folio_size(folio);
}
@ -50,7 +56,7 @@ static inline loff_t folio_sector(struct folio *folio)
return folio_pos(folio) >> 9;
}
static inline loff_t folio_end_sector(struct folio *folio)
static inline u64 folio_end_sector(struct folio *folio)
{
return folio_end_pos(folio) >> 9;
}
@ -58,12 +64,12 @@ static inline loff_t folio_end_sector(struct folio *folio)
typedef DARRAY(struct folio *) folios;
static int filemap_get_contig_folios_d(struct address_space *mapping,
loff_t start, loff_t end,
loff_t start, u64 end,
int fgp_flags, gfp_t gfp,
folios *folios)
{
struct folio *f;
loff_t pos = start;
u64 pos = start;
int ret = 0;
while (pos < end) {
@ -1819,7 +1825,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
folios folios;
struct folio **fi, *f;
unsigned copied = 0, f_offset;
loff_t end = pos + len, f_pos;
u64 end = pos + len, f_pos;
loff_t last_folio_pos = inode->v.i_size;
int ret = 0;
@ -1861,7 +1867,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
f_offset = pos - folio_pos(darray_first(folios));
darray_for_each(folios, fi) {
struct folio *f = *fi;
unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
u64 f_len = min(end, folio_end_pos(f)) - f_pos;
if (!bch2_folio_create(f, __GFP_NOFAIL)->uptodate) {
ret = bch2_folio_set(c, inode_inum(inode), fi,
@ -1900,7 +1906,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
f_offset = pos - folio_pos(darray_first(folios));
darray_for_each(folios, fi) {
struct folio *f = *fi;
unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
u64 f_len = min(end, folio_end_pos(f)) - f_pos;
unsigned f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
if (!f_copied) {
@ -1942,7 +1948,7 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
f_offset = pos - folio_pos(darray_first(folios));
darray_for_each(folios, fi) {
struct folio *f = *fi;
unsigned f_len = min(end, folio_end_pos(f)) - f_pos;
u64 f_len = min(end, folio_end_pos(f)) - f_pos;
if (!folio_test_uptodate(f))
folio_mark_uptodate(f);
@ -2774,7 +2780,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
struct folio *folio;
s64 i_sectors_delta = 0;
int ret = 0;
loff_t end_pos;
u64 end_pos;
folio = filemap_lock_folio(mapping, index);
if (!folio) {
@ -2800,7 +2806,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
BUG_ON(end <= folio_pos(folio));
start_offset = max(start, folio_pos(folio)) - folio_pos(folio);
end_offset = min(end, folio_end_pos(folio)) - folio_pos(folio);
end_offset = min_t(u64, end, folio_end_pos(folio)) - folio_pos(folio);
/* Folio boundary? Nothing to do */
if (start_offset == 0 &&
@ -2851,7 +2857,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
WARN_ON_ONCE(folio_pos(folio) >= inode->v.i_size);
end_pos = folio_end_pos(folio);
if (inode->v.i_size > folio_pos(folio))
end_pos = min(inode->v.i_size, end_pos);
end_pos = min_t(u64, inode->v.i_size, end_pos);
ret = s->s[(end_pos - folio_pos(folio) - 1) >> 9].state >= SECTOR_dirty;
folio_zero_segment(folio, start_offset, end_offset);

View File

@ -1896,7 +1896,7 @@ out:
err_put_super:
deactivate_locked_super(sb);
return ERR_PTR(ret);
return ERR_PTR(bch2_err_class(ret));
}
static void bch2_kill_sb(struct super_block *sb)

View File

@ -543,6 +543,8 @@ static int journal_keys_sort(struct bch_fs *c)
if (!i || i->ignore)
continue;
cond_resched();
for_each_jset_key(k, entry, &i->j) {
if (keys->nr == keys->size) {
__journal_keys_sort(keys);

View File

@ -602,12 +602,12 @@ SHOW(bch2_fs_counters)
counter_since_mount = counter - c->counters_on_mount[BCH_COUNTER_##t];\
prt_printf(out, "since mount:"); \
prt_tab(out); \
prt_human_readable_u64(out, counter_since_mount << 9); \
prt_human_readable_u64(out, counter_since_mount); \
prt_newline(out); \
\
prt_printf(out, "since filesystem creation:"); \
prt_tab(out); \
prt_human_readable_u64(out, counter << 9); \
prt_human_readable_u64(out, counter); \
prt_newline(out); \
}
BCH_PERSISTENT_COUNTERS()