Update bcachefs sources to 2115a2ffde bcachefs: Kill bch2_verify_bucket_evacuated()

This commit is contained in:
Kent Overstreet 2023-04-21 04:02:48 -04:00
parent 0f37f9f05f
commit fbe4e11c26
10 changed files with 122 additions and 128 deletions

View File

@ -1 +1 @@
fd381c355c92ad0e3abfc49c7675893ed355686f
2115a2ffde28a51adbb8a62fd3c1a3e4dd1b6160

View File

@ -33,23 +33,18 @@ DECLARE_EVENT_CLASS(bpos,
);
DECLARE_EVENT_CLASS(bkey,
TP_PROTO(const struct bkey *k),
TP_ARGS(k),
TP_PROTO(struct bch_fs *c, const char *k),
TP_ARGS(c, k),
TP_STRUCT__entry(
__field(u64, inode )
__field(u64, offset )
__field(u32, size )
__string(k, k )
),
TP_fast_assign(
__entry->inode = k->p.inode;
__entry->offset = k->p.offset;
__entry->size = k->size;
__assign_str(k, k);
),
TP_printk("%llu:%llu len %u", __entry->inode,
__entry->offset, __entry->size)
TP_printk("%s", __get_str(k))
);
DECLARE_EVENT_CLASS(btree_node,
@ -667,19 +662,45 @@ TRACE_EVENT(bucket_invalidate,
/* Moving IO */
TRACE_EVENT(bucket_evacuate,
TP_PROTO(struct bch_fs *c, struct bpos bucket),
TP_ARGS(c, bucket),
TP_STRUCT__entry(
__field(dev_t, dev )
__field(u32, dev_idx )
__field(u64, bucket )
),
TP_fast_assign(
__entry->dev = c->dev;
__entry->dev_idx = bucket.inode;
__entry->bucket = bucket.offset;
),
TP_printk("%d:%d %u:%llu",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->dev_idx, __entry->bucket)
);
DEFINE_EVENT(bkey, move_extent,
TP_PROTO(struct bch_fs *c, const char *k),
TP_ARGS(c, k)
);
DEFINE_EVENT(bkey, move_extent_read,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
TP_PROTO(struct bch_fs *c, const char *k),
TP_ARGS(c, k)
);
DEFINE_EVENT(bkey, move_extent_write,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
TP_PROTO(struct bch_fs *c, const char *k),
TP_ARGS(c, k)
);
DEFINE_EVENT(bkey, move_extent_finish,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
TP_PROTO(struct bch_fs *c, const char *k),
TP_ARGS(c, k)
);
TRACE_EVENT(move_extent_fail,
@ -700,8 +721,8 @@ TRACE_EVENT(move_extent_fail,
);
DEFINE_EVENT(bkey, move_extent_alloc_mem_fail,
TP_PROTO(const struct bkey *k),
TP_ARGS(k)
TP_PROTO(struct bch_fs *c, const char *k),
TP_ARGS(c, k)
);
TRACE_EVENT(move_data,

View File

@ -1362,17 +1362,21 @@ static int bch2_check_bucket_gens_key(struct btree_trans *trans,
u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
u64 b;
bool need_update = false;
bool need_update = false, dev_exists;
struct printbuf buf = PRINTBUF;
int ret = 0;
BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
bkey_reassemble(&g.k_i, k);
if (fsck_err_on(!bch2_dev_exists2(c, k.k->p.inode), c,
"bucket_gens key for invalid device:\n %s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, iter, 0);
/* if no bch_dev, skip out whether we repair or not */
dev_exists = bch2_dev_exists2(c, k.k->p.inode);
if (!dev_exists) {
if (fsck_err_on(!dev_exists, c,
"bucket_gens key for invalid device:\n %s",
(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
ret = bch2_btree_delete_at(trans, iter, 0);
}
goto out;
}

View File

@ -1778,7 +1778,8 @@ out:
return;
err:
set_btree_node_noevict(b);
bch2_fs_fatal_error(c, "fatal error writing btree node");
if (!bch2_err_matches(ret, EROFS))
bch2_fs_fatal_error(c, "fatal error writing btree node");
goto out;
}

View File

@ -93,6 +93,17 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans,
return ret;
}
static void trace_move_extent_finish2(struct bch_fs *c, struct bkey_s_c k)
{
if (trace_move_extent_finish_enabled()) {
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
trace_move_extent_finish(c, buf.buf);
printbuf_exit(&buf);
}
}
static void trace_move_extent_fail2(struct data_update *m,
struct bkey_s_c new,
struct bkey_s_c wrote,
@ -343,7 +354,7 @@ restart_drop_extra_replicas:
bch2_btree_iter_set_pos(&iter, next_pos);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_finish], new->k.size);
trace_move_extent_finish(&new->k);
trace_move_extent_finish2(c, bkey_i_to_s_c(&new->k_i));
}
err:
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))

View File

@ -485,6 +485,14 @@ static inline void folio_sector_set(struct folio *folio,
s->s[i].state = n;
}
/* file offset (to folio offset) to bch_folio_sector index */
static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
{
u64 f_offset = pos - folio_pos(folio);
BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
return f_offset >> SECTOR_SHIFT;
}
static inline struct bch_folio *__bch2_folio(struct folio *folio)
{
return folio_has_private(folio)
@ -2858,7 +2866,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
end_pos = folio_end_pos(folio);
if (inode->v.i_size > folio_pos(folio))
end_pos = min_t(u64, inode->v.i_size, end_pos);
ret = s->s[(end_pos - folio_pos(folio) - 1) >> 9].state >= SECTOR_dirty;
ret = s->s[folio_pos_to_s(folio, end_pos - 1)].state >= SECTOR_dirty;
folio_zero_segment(folio, start_offset, end_offset);
@ -3609,15 +3617,15 @@ err:
/* fseek: */
static int folio_data_offset(struct folio *folio, unsigned offset)
static int folio_data_offset(struct folio *folio, loff_t pos)
{
struct bch_folio *s = bch2_folio(folio);
unsigned i, sectors = folio_sectors(folio);
if (s)
for (i = offset >> 9; i < sectors; i++)
for (i = folio_pos_to_s(folio, pos); i < sectors; i++)
if (s->s[i].state >= SECTOR_dirty)
return i << 9;
return i << SECTOR_SHIFT;
return -1;
}
@ -3643,8 +3651,7 @@ static loff_t bch2_seek_pagecache_data(struct inode *vinode,
folio_lock(folio);
offset = folio_data_offset(folio,
max(folio_pos(folio), start_offset) -
folio_pos(folio));
max(folio_pos(folio), start_offset));
if (offset >= 0) {
ret = clamp(folio_pos(folio) + offset,
start_offset, end_offset);
@ -3718,7 +3725,7 @@ static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
{
struct folio *folio;
struct bch_folio *s;
unsigned i, sectors, f_offset;
unsigned i, sectors;
bool ret = true;
folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
@ -3730,11 +3737,10 @@ static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
goto unlock;
sectors = folio_sectors(folio);
f_offset = *offset - folio_pos(folio);
for (i = f_offset >> 9; i < sectors; i++)
for (i = folio_pos_to_s(folio, *offset); i < sectors; i++)
if (s->s[i].state < SECTOR_dirty) {
*offset = max(*offset, folio_pos(folio) + (i << 9));
*offset = max(*offset,
folio_pos(folio) + (i << SECTOR_SHIFT));
goto unlock;
}

View File

@ -26,6 +26,39 @@
#include <trace/events/bcachefs.h>
static void trace_move_extent2(struct bch_fs *c, struct bkey_s_c k)
{
if (trace_move_extent_enabled()) {
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
trace_move_extent(c, buf.buf);
printbuf_exit(&buf);
}
}
static void trace_move_extent_read2(struct bch_fs *c, struct bkey_s_c k)
{
if (trace_move_extent_read_enabled()) {
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
trace_move_extent_read(c, buf.buf);
printbuf_exit(&buf);
}
}
static void trace_move_extent_alloc_mem_fail2(struct bch_fs *c, struct bkey_s_c k)
{
if (trace_move_extent_alloc_mem_fail_enabled()) {
struct printbuf buf = PRINTBUF;
bch2_bkey_val_to_text(&buf, c, k);
trace_move_extent_alloc_mem_fail(c, buf.buf);
printbuf_exit(&buf);
}
}
static void progress_list_add(struct bch_fs *c, struct bch_move_stats *stats)
{
mutex_lock(&c->data_progress_lock);
@ -270,6 +303,8 @@ static int bch2_move_extent(struct btree_trans *trans,
unsigned sectors = k.k->size, pages;
int ret = -ENOMEM;
trace_move_extent2(c, k);
bch2_data_update_opts_normalize(k, &data_opts);
if (!data_opts.rewrite_ptrs &&
@ -347,8 +382,7 @@ static int bch2_move_extent(struct btree_trans *trans,
this_cpu_add(c->counters[BCH_COUNTER_io_move], k.k->size);
this_cpu_add(c->counters[BCH_COUNTER_move_extent_read], k.k->size);
trace_move_extent_read(k.k);
trace_move_extent_read2(c, k);
mutex_lock(&ctxt->lock);
atomic_add(io->read_sectors, &ctxt->read_sectors);
@ -374,7 +408,8 @@ err_free_pages:
err_free:
kfree(io);
err:
trace_and_count(c, move_extent_alloc_mem_fail, k.k);
this_cpu_inc(c->counters[BCH_COUNTER_move_extent_alloc_mem_fail]);
trace_move_extent_alloc_mem_fail2(c, k);
return ret;
}
@ -620,85 +655,6 @@ int bch2_move_data(struct bch_fs *c,
return ret;
}
void bch2_verify_bucket_evacuated(struct btree_trans *trans, struct bpos bucket, int gen)
{
struct bch_fs *c = trans->c;
struct btree_iter iter;
struct bkey_s_c k;
struct printbuf buf = PRINTBUF;
struct bch_backpointer bp;
struct bpos bp_pos = POS_MIN;
unsigned nr_bps = 0;
int ret;
bch2_trans_begin(trans);
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
bucket, BTREE_ITER_CACHED);
again:
ret = lockrestart_do(trans,
bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
if (!ret && k.k->type == KEY_TYPE_alloc_v4) {
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
if (a.v->gen == gen &&
a.v->dirty_sectors) {
if (a.v->data_type == BCH_DATA_btree) {
bch2_trans_unlock(trans);
if (bch2_btree_interior_updates_flush(c))
goto again;
goto failed_to_evacuate;
}
}
}
set_btree_iter_dontneed(&iter);
bch2_trans_iter_exit(trans, &iter);
return;
failed_to_evacuate:
bch2_trans_iter_exit(trans, &iter);
if (test_bit(BCH_FS_EMERGENCY_RO, &c->flags))
return;
prt_printf(&buf, bch2_log_msg(c, "failed to evacuate bucket "));
bch2_bkey_val_to_text(&buf, c, k);
while (1) {
bch2_trans_begin(trans);
ret = bch2_get_next_backpointer(trans, bucket, gen,
&bp_pos, &bp,
BTREE_ITER_CACHED);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
if (bkey_eq(bp_pos, POS_MAX))
break;
k = bch2_backpointer_get_key(trans, &iter, bp_pos, bp, 0);
ret = bkey_err(k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
continue;
if (ret)
break;
if (!k.k)
continue;
prt_newline(&buf);
bch2_bkey_val_to_text(&buf, c, k);
bch2_trans_iter_exit(trans, &iter);
if (++nr_bps > 10)
break;
bp_pos = bpos_nosnap_successor(bp_pos);
}
bch2_print_string_as_lines(KERN_ERR, buf.buf);
printbuf_exit(&buf);
}
int __bch2_evacuate_bucket(struct btree_trans *trans,
struct moving_context *ctxt,
struct move_bucket_in_flight *bucket_in_flight,
@ -720,6 +676,8 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
struct bpos bp_pos = POS_MIN;
int ret = 0;
trace_bucket_evacuate(c, bucket);
bch2_bkey_buf_init(&sk);
/*

View File

@ -36,8 +36,6 @@ struct moving_context {
wait_queue_head_t wait;
};
void bch2_verify_bucket_evacuated(struct btree_trans *, struct bpos, int);
#define move_ctxt_wait_event(_ctxt, _trans, _cond) \
do { \
bool cond_finished = false; \

View File

@ -134,13 +134,6 @@ static void move_buckets_wait(struct btree_trans *trans,
if (atomic_read(&i->count))
break;
/*
* moving_ctxt_exit calls bch2_write as it flushes pending
* reads, which inits another btree_trans; this one must be
* unlocked:
*/
bch2_verify_bucket_evacuated(trans, i->bucket.k.bucket, i->bucket.k.gen);
list->first = i->next;
if (!list->first)
list->last = NULL;

View File

@ -1433,6 +1433,8 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
BTREE_TRIGGER_NORUN, NULL) ?:
bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
BTREE_TRIGGER_NORUN, NULL) ?:
bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
BTREE_TRIGGER_NORUN, NULL);
if (ret)
bch_err(c, "error removing dev alloc info: %s", bch2_err_str(ret));