mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
Update bcachefs sources to 15178a6479 bcachefs: Update btree ptrs after every write
This commit is contained in:
parent
86bd5c622c
commit
669fc107c6
@ -1 +1 @@
|
||||
787de128a5caf209845e5a8d0f14f24e1a42492c
|
||||
15178a6479955d5aeaa124c0de9dabac87ffcca0
|
||||
|
@ -675,7 +675,7 @@ struct bch_fs {
|
||||
struct btree_key_cache btree_key_cache;
|
||||
|
||||
struct workqueue_struct *btree_update_wq;
|
||||
struct workqueue_struct *btree_error_wq;
|
||||
struct workqueue_struct *btree_io_complete_wq;
|
||||
/* copygc needs its own workqueue for index updates.. */
|
||||
struct workqueue_struct *copygc_wq;
|
||||
|
||||
@ -826,8 +826,6 @@ struct bch_fs {
|
||||
|
||||
atomic64_t btree_writes_nr;
|
||||
atomic64_t btree_writes_sectors;
|
||||
struct bio_list btree_write_error_list;
|
||||
struct work_struct btree_write_error_work;
|
||||
spinlock_t btree_write_error_lock;
|
||||
|
||||
/* ERRORS */
|
||||
|
@ -1210,7 +1210,8 @@ enum bcachefs_metadata_version {
|
||||
bcachefs_metadata_version_inode_btree_change = 11,
|
||||
bcachefs_metadata_version_snapshot = 12,
|
||||
bcachefs_metadata_version_inode_backpointers = 13,
|
||||
bcachefs_metadata_version_max = 14,
|
||||
bcachefs_metadata_version_btree_ptr_sectors_written = 14,
|
||||
bcachefs_metadata_version_max = 15,
|
||||
};
|
||||
|
||||
#define bcachefs_metadata_version_current (bcachefs_metadata_version_max - 1)
|
||||
|
@ -101,6 +101,7 @@ const char *bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k)
|
||||
static unsigned bch2_key_types_allowed[] = {
|
||||
[BKEY_TYPE_extents] =
|
||||
(1U << KEY_TYPE_error)|
|
||||
(1U << KEY_TYPE_cookie)|
|
||||
(1U << KEY_TYPE_extent)|
|
||||
(1U << KEY_TYPE_reservation)|
|
||||
(1U << KEY_TYPE_reflink_p)|
|
||||
@ -112,6 +113,7 @@ static unsigned bch2_key_types_allowed[] = {
|
||||
(1U << KEY_TYPE_hash_whiteout)|
|
||||
(1U << KEY_TYPE_dirent),
|
||||
[BKEY_TYPE_xattrs] =
|
||||
(1U << KEY_TYPE_cookie)|
|
||||
(1U << KEY_TYPE_hash_whiteout)|
|
||||
(1U << KEY_TYPE_xattr),
|
||||
[BKEY_TYPE_alloc] =
|
||||
|
@ -712,26 +712,24 @@ static int lock_node_check_fn(struct six_lock *lock, void *p)
|
||||
|
||||
static noinline void btree_bad_header(struct bch_fs *c, struct btree *b)
|
||||
{
|
||||
char buf1[100], buf2[100], buf3[100], buf4[100];
|
||||
char buf1[200], buf2[100], buf3[100];
|
||||
|
||||
if (!test_bit(BCH_FS_INITIAL_GC_DONE, &c->flags))
|
||||
return;
|
||||
|
||||
bch2_bpos_to_text(&PBUF(buf1), b->key.k.type == KEY_TYPE_btree_ptr_v2
|
||||
? bkey_i_to_btree_ptr_v2(&b->key)->v.min_key
|
||||
: POS_MIN);
|
||||
bch2_bpos_to_text(&PBUF(buf2), b->data->min_key);
|
||||
bch2_bkey_val_to_text(&PBUF(buf1), c, bkey_i_to_s_c(&b->key));
|
||||
bch2_bpos_to_text(&PBUF(buf3), b->data->max_key);
|
||||
|
||||
bch2_bpos_to_text(&PBUF(buf3), b->key.k.p);
|
||||
bch2_bpos_to_text(&PBUF(buf4), b->data->max_key);
|
||||
bch2_fs_inconsistent(c, "btree node header doesn't match ptr\n"
|
||||
"btree: ptr %u header %llu\n"
|
||||
"level: ptr %u header %llu\n"
|
||||
"min ptr %s node header %s\n"
|
||||
"max ptr %s node header %s",
|
||||
b->c.btree_id, BTREE_NODE_ID(b->data),
|
||||
b->c.level, BTREE_NODE_LEVEL(b->data),
|
||||
buf1, buf2, buf3, buf4);
|
||||
"btree %s level %u\n"
|
||||
"ptr: %s\n"
|
||||
"header: btree %s level %llu\n"
|
||||
"min %s max %s\n",
|
||||
bch2_btree_ids[b->c.btree_id], b->c.level,
|
||||
buf1,
|
||||
bch2_btree_ids[BTREE_NODE_ID(b->data)],
|
||||
BTREE_NODE_LEVEL(b->data),
|
||||
buf2, buf3);
|
||||
}
|
||||
|
||||
static inline void btree_check_header(struct bch_fs *c, struct btree *b)
|
||||
|
@ -26,6 +26,7 @@ void bch2_btree_node_io_unlock(struct btree *b)
|
||||
{
|
||||
EBUG_ON(!btree_node_write_in_flight(b));
|
||||
|
||||
clear_btree_node_write_in_flight_inner(b);
|
||||
clear_btree_node_write_in_flight(b);
|
||||
wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
|
||||
}
|
||||
@ -870,7 +871,8 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
bool updated_range = b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
BTREE_PTR_RANGE_UPDATED(&bkey_i_to_btree_ptr_v2(&b->key)->v);
|
||||
unsigned u64s;
|
||||
unsigned nonblacklisted_written = 0;
|
||||
unsigned blacklisted_written, nonblacklisted_written = 0;
|
||||
unsigned ptr_written = btree_ptr_sectors_written(&b->key);
|
||||
int ret, retry_read = 0, write = READ;
|
||||
|
||||
b->version_ondisk = U16_MAX;
|
||||
@ -901,7 +903,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
b->data->keys.seq, bp->seq);
|
||||
}
|
||||
|
||||
while (b->written < c->opts.btree_node_size) {
|
||||
while (b->written < (ptr_written ?: c->opts.btree_node_size)) {
|
||||
unsigned sectors, whiteout_u64s = 0;
|
||||
struct nonce nonce;
|
||||
struct bch_csum csum;
|
||||
@ -981,6 +983,10 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
btree_err_on(blacklisted && first,
|
||||
BTREE_ERR_FIXABLE, c, ca, b, i,
|
||||
"first btree node bset has blacklisted journal seq");
|
||||
|
||||
btree_err_on(blacklisted && ptr_written,
|
||||
BTREE_ERR_FIXABLE, c, ca, b, i,
|
||||
"found blacklisted bset in btree node with sectors_written");
|
||||
if (blacklisted && !first)
|
||||
continue;
|
||||
|
||||
@ -994,26 +1000,34 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
nonblacklisted_written = b->written;
|
||||
}
|
||||
|
||||
for (bne = write_block(b);
|
||||
bset_byte_offset(b, bne) < btree_bytes(c);
|
||||
bne = (void *) bne + block_bytes(c))
|
||||
btree_err_on(bne->keys.seq == b->data->keys.seq &&
|
||||
!bch2_journal_seq_is_blacklisted(c,
|
||||
le64_to_cpu(bne->keys.journal_seq),
|
||||
true),
|
||||
if (ptr_written) {
|
||||
btree_err_on(b->written < ptr_written,
|
||||
BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
|
||||
"found bset signature after last bset");
|
||||
"btree node data missing: expected %u sectors, found %u",
|
||||
ptr_written, b->written);
|
||||
} else {
|
||||
for (bne = write_block(b);
|
||||
bset_byte_offset(b, bne) < btree_bytes(c);
|
||||
bne = (void *) bne + block_bytes(c))
|
||||
btree_err_on(bne->keys.seq == b->data->keys.seq &&
|
||||
!bch2_journal_seq_is_blacklisted(c,
|
||||
le64_to_cpu(bne->keys.journal_seq),
|
||||
true),
|
||||
BTREE_ERR_WANT_RETRY, c, ca, b, NULL,
|
||||
"found bset signature after last bset");
|
||||
|
||||
/*
|
||||
* Blacklisted bsets are those that were written after the most recent
|
||||
* (flush) journal write. Since there wasn't a flush, they may not have
|
||||
* made it to all devices - which means we shouldn't write new bsets
|
||||
* after them, as that could leave a gap and then reads from that device
|
||||
* wouldn't find all the bsets in that btree node - which means it's
|
||||
* important that we start writing new bsets after the most recent _non_
|
||||
* blacklisted bset:
|
||||
*/
|
||||
b->written = nonblacklisted_written;
|
||||
/*
|
||||
* Blacklisted bsets are those that were written after the most recent
|
||||
* (flush) journal write. Since there wasn't a flush, they may not have
|
||||
* made it to all devices - which means we shouldn't write new bsets
|
||||
* after them, as that could leave a gap and then reads from that device
|
||||
* wouldn't find all the bsets in that btree node - which means it's
|
||||
* important that we start writing new bsets after the most recent _non_
|
||||
* blacklisted bset:
|
||||
*/
|
||||
blacklisted_written = b->written;
|
||||
b->written = nonblacklisted_written;
|
||||
}
|
||||
|
||||
sorted = btree_bounce_alloc(c, btree_bytes(c), &used_mempool);
|
||||
sorted->keys.u64s = 0;
|
||||
@ -1081,6 +1095,9 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
|
||||
if (ca->mi.state != BCH_MEMBER_STATE_rw)
|
||||
set_btree_node_need_rewrite(b);
|
||||
}
|
||||
|
||||
if (!ptr_written)
|
||||
set_btree_node_need_rewrite(b);
|
||||
out:
|
||||
mempool_free(iter, &c->fill_iter);
|
||||
return retry_read;
|
||||
@ -1578,6 +1595,7 @@ static void btree_node_write_done(struct bch_fs *c, struct btree *b)
|
||||
goto do_write;
|
||||
|
||||
new &= ~(1U << BTREE_NODE_write_in_flight);
|
||||
new &= ~(1U << BTREE_NODE_write_in_flight_inner);
|
||||
} while ((v = cmpxchg(&b->flags, old, new)) != old);
|
||||
|
||||
wake_up_bit(&b->flags, BTREE_NODE_write_in_flight);
|
||||
@ -1596,10 +1614,12 @@ do_write:
|
||||
new &= ~(1U << BTREE_NODE_dirty);
|
||||
new &= ~(1U << BTREE_NODE_need_write);
|
||||
new |= (1U << BTREE_NODE_write_in_flight);
|
||||
new |= (1U << BTREE_NODE_write_in_flight_inner);
|
||||
new |= (1U << BTREE_NODE_just_written);
|
||||
new ^= (1U << BTREE_NODE_write_idx);
|
||||
} else {
|
||||
new &= ~(1U << BTREE_NODE_write_in_flight);
|
||||
new &= ~(1U << BTREE_NODE_write_in_flight_inner);
|
||||
}
|
||||
} while ((v = cmpxchg(&b->flags, old, new)) != old);
|
||||
|
||||
@ -1609,52 +1629,38 @@ do_write:
|
||||
six_unlock_read(&b->c.lock);
|
||||
}
|
||||
|
||||
static void bch2_btree_node_write_error(struct bch_fs *c,
|
||||
struct btree_write_bio *wbio)
|
||||
static void btree_node_write_work(struct work_struct *work)
|
||||
{
|
||||
struct btree_write_bio *wbio =
|
||||
container_of(work, struct btree_write_bio, work);
|
||||
struct bch_fs *c = wbio->wbio.c;
|
||||
struct btree *b = wbio->wbio.bio.bi_private;
|
||||
struct bkey_buf k;
|
||||
struct bch_extent_ptr *ptr;
|
||||
struct btree_trans trans;
|
||||
struct btree_iter *iter;
|
||||
int ret;
|
||||
|
||||
bch2_bkey_buf_init(&k);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
btree_bounce_free(c,
|
||||
wbio->data_bytes,
|
||||
wbio->wbio.used_mempool,
|
||||
wbio->data);
|
||||
|
||||
iter = bch2_trans_get_node_iter(&trans, b->c.btree_id, b->key.k.p,
|
||||
BTREE_MAX_DEPTH, b->c.level, 0);
|
||||
retry:
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
/* has node been freed? */
|
||||
if (iter->l[b->c.level].b != b) {
|
||||
/* node has been freed: */
|
||||
BUG_ON(!btree_node_dying(b));
|
||||
goto out;
|
||||
}
|
||||
|
||||
BUG_ON(!btree_node_hashed(b));
|
||||
|
||||
bch2_bkey_buf_copy(&k, c, &b->key);
|
||||
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(k.k), ptr,
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(&wbio->key), ptr,
|
||||
bch2_dev_list_has_dev(wbio->wbio.failed, ptr->dev));
|
||||
|
||||
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(k.k)))
|
||||
if (!bch2_bkey_nr_ptrs(bkey_i_to_s_c(&wbio->key)))
|
||||
goto err;
|
||||
|
||||
ret = bch2_btree_node_update_key(&trans, iter, b, k.k);
|
||||
if (ret == -EINTR)
|
||||
goto retry;
|
||||
if (ret)
|
||||
goto err;
|
||||
if (wbio->wbio.first_btree_write) {
|
||||
if (wbio->wbio.failed.nr) {
|
||||
|
||||
}
|
||||
} else {
|
||||
ret = bch2_trans_do(c, NULL, NULL, 0,
|
||||
bch2_btree_node_update_key_get_iter(&trans, b, &wbio->key,
|
||||
!wbio->wbio.failed.nr));
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
out:
|
||||
bch2_trans_iter_put(&trans, iter);
|
||||
bch2_trans_exit(&trans);
|
||||
bch2_bkey_buf_exit(&k, c);
|
||||
bio_put(&wbio->wbio.bio);
|
||||
btree_node_write_done(c, b);
|
||||
return;
|
||||
@ -1664,58 +1670,14 @@ err:
|
||||
goto out;
|
||||
}
|
||||
|
||||
void bch2_btree_write_error_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_fs *c = container_of(work, struct bch_fs,
|
||||
btree_write_error_work);
|
||||
struct bio *bio;
|
||||
|
||||
while (1) {
|
||||
spin_lock_irq(&c->btree_write_error_lock);
|
||||
bio = bio_list_pop(&c->btree_write_error_list);
|
||||
spin_unlock_irq(&c->btree_write_error_lock);
|
||||
|
||||
if (!bio)
|
||||
break;
|
||||
|
||||
bch2_btree_node_write_error(c,
|
||||
container_of(bio, struct btree_write_bio, wbio.bio));
|
||||
}
|
||||
}
|
||||
|
||||
static void btree_node_write_work(struct work_struct *work)
|
||||
{
|
||||
struct btree_write_bio *wbio =
|
||||
container_of(work, struct btree_write_bio, work);
|
||||
struct bch_fs *c = wbio->wbio.c;
|
||||
struct btree *b = wbio->wbio.bio.bi_private;
|
||||
|
||||
btree_bounce_free(c,
|
||||
wbio->bytes,
|
||||
wbio->wbio.used_mempool,
|
||||
wbio->data);
|
||||
|
||||
if (wbio->wbio.failed.nr) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&c->btree_write_error_lock, flags);
|
||||
bio_list_add(&c->btree_write_error_list, &wbio->wbio.bio);
|
||||
spin_unlock_irqrestore(&c->btree_write_error_lock, flags);
|
||||
|
||||
queue_work(c->btree_error_wq, &c->btree_write_error_work);
|
||||
return;
|
||||
}
|
||||
|
||||
bio_put(&wbio->wbio.bio);
|
||||
btree_node_write_done(c, b);
|
||||
}
|
||||
|
||||
static void btree_node_write_endio(struct bio *bio)
|
||||
{
|
||||
struct bch_write_bio *wbio = to_wbio(bio);
|
||||
struct bch_write_bio *parent = wbio->split ? wbio->parent : NULL;
|
||||
struct bch_write_bio *orig = parent ?: wbio;
|
||||
struct btree_write_bio *wb = container_of(orig, struct btree_write_bio, wbio);
|
||||
struct bch_fs *c = wbio->c;
|
||||
struct btree *b = wbio->bio.bi_private;
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, wbio->dev);
|
||||
unsigned long flags;
|
||||
|
||||
@ -1736,13 +1698,13 @@ static void btree_node_write_endio(struct bio *bio)
|
||||
if (parent) {
|
||||
bio_put(bio);
|
||||
bio_endio(&parent->bio);
|
||||
} else {
|
||||
struct btree_write_bio *wb =
|
||||
container_of(orig, struct btree_write_bio, wbio);
|
||||
|
||||
INIT_WORK(&wb->work, btree_node_write_work);
|
||||
queue_work(c->io_complete_wq, &wb->work);
|
||||
return;
|
||||
}
|
||||
|
||||
clear_btree_node_write_in_flight_inner(b);
|
||||
wake_up_bit(&b->flags, BTREE_NODE_write_in_flight_inner);
|
||||
INIT_WORK(&wb->work, btree_node_write_work);
|
||||
queue_work(c->btree_io_complete_wq, &wb->work);
|
||||
}
|
||||
|
||||
static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
|
||||
@ -1767,8 +1729,15 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
|
||||
static void btree_write_submit(struct work_struct *work)
|
||||
{
|
||||
struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
|
||||
struct bch_extent_ptr *ptr;
|
||||
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||
|
||||
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &wbio->key);
|
||||
bkey_copy(&tmp.k, &wbio->key);
|
||||
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&tmp.k)), ptr)
|
||||
ptr->offset += wbio->sector_offset;
|
||||
|
||||
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &tmp.k);
|
||||
}
|
||||
|
||||
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_started)
|
||||
@ -1778,7 +1747,6 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_sta
|
||||
struct bset *i;
|
||||
struct btree_node *bn = NULL;
|
||||
struct btree_node_entry *bne = NULL;
|
||||
struct bch_extent_ptr *ptr;
|
||||
struct sort_iter sort_iter;
|
||||
struct nonce nonce;
|
||||
unsigned bytes_to_write, sectors_to_write, bytes, u64s;
|
||||
@ -1818,6 +1786,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, bool already_sta
|
||||
new &= ~(1 << BTREE_NODE_dirty);
|
||||
new &= ~(1 << BTREE_NODE_need_write);
|
||||
new |= (1 << BTREE_NODE_write_in_flight);
|
||||
new |= (1 << BTREE_NODE_write_in_flight_inner);
|
||||
new |= (1 << BTREE_NODE_just_written);
|
||||
new ^= (1 << BTREE_NODE_write_idx);
|
||||
} while (cmpxchg_acquire(&b->flags, old, new) != old);
|
||||
@ -1969,37 +1938,30 @@ do_write:
|
||||
struct btree_write_bio, wbio.bio);
|
||||
wbio_init(&wbio->wbio.bio);
|
||||
wbio->data = data;
|
||||
wbio->bytes = bytes;
|
||||
wbio->data_bytes = bytes;
|
||||
wbio->sector_offset = b->written;
|
||||
wbio->wbio.c = c;
|
||||
wbio->wbio.used_mempool = used_mempool;
|
||||
wbio->wbio.first_btree_write = !b->written;
|
||||
wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
|
||||
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
|
||||
wbio->wbio.bio.bi_private = b;
|
||||
|
||||
bch2_bio_map(&wbio->wbio.bio, data, sectors_to_write << 9);
|
||||
|
||||
/*
|
||||
* If we're appending to a leaf node, we don't technically need FUA -
|
||||
* this write just needs to be persisted before the next journal write,
|
||||
* which will be marked FLUSH|FUA.
|
||||
*
|
||||
* Similarly if we're writing a new btree root - the pointer is going to
|
||||
* be in the next journal entry.
|
||||
*
|
||||
* But if we're writing a new btree node (that isn't a root) or
|
||||
* appending to a non leaf btree node, we need either FUA or a flush
|
||||
* when we write the parent with the new pointer. FUA is cheaper than a
|
||||
* flush, and writes appending to leaf nodes aren't blocking anything so
|
||||
* just make all btree node writes FUA to keep things sane.
|
||||
*/
|
||||
|
||||
bkey_copy(&wbio->key, &b->key);
|
||||
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&wbio->key)), ptr)
|
||||
ptr->offset += b->written;
|
||||
|
||||
b->written += sectors_to_write;
|
||||
|
||||
if (wbio->wbio.first_btree_write &&
|
||||
b->key.k.type == KEY_TYPE_btree_ptr_v2)
|
||||
bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
|
||||
cpu_to_le16(b->written);
|
||||
|
||||
if (wbio->key.k.type == KEY_TYPE_btree_ptr_v2)
|
||||
bkey_i_to_btree_ptr_v2(&wbio->key)->v.sectors_written =
|
||||
cpu_to_le16(b->written);
|
||||
|
||||
atomic64_inc(&c->btree_writes_nr);
|
||||
atomic64_add(sectors_to_write, &c->btree_writes_sectors);
|
||||
|
||||
@ -2008,6 +1970,10 @@ do_write:
|
||||
return;
|
||||
err:
|
||||
set_btree_node_noevict(b);
|
||||
if (!b->written &&
|
||||
b->key.k.type == KEY_TYPE_btree_ptr_v2)
|
||||
bkey_i_to_btree_ptr_v2(&b->key)->v.sectors_written =
|
||||
cpu_to_le16(sectors_to_write);
|
||||
b->written += sectors_to_write;
|
||||
nowrite:
|
||||
btree_bounce_free(c, bytes, used_mempool, data);
|
||||
|
@ -32,6 +32,13 @@ static inline void clear_btree_node_dirty(struct bch_fs *c, struct btree *b)
|
||||
atomic_dec(&c->btree_cache.dirty);
|
||||
}
|
||||
|
||||
static inline unsigned btree_ptr_sectors_written(struct bkey_i *k)
|
||||
{
|
||||
return k->k.type == KEY_TYPE_btree_ptr_v2
|
||||
? le16_to_cpu(bkey_i_to_btree_ptr_v2(k)->v.sectors_written)
|
||||
: 0;
|
||||
}
|
||||
|
||||
struct btree_read_bio {
|
||||
struct bch_fs *c;
|
||||
struct btree *b;
|
||||
@ -48,7 +55,8 @@ struct btree_write_bio {
|
||||
struct work_struct work;
|
||||
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
|
||||
void *data;
|
||||
unsigned bytes;
|
||||
unsigned data_bytes;
|
||||
unsigned sector_offset;
|
||||
struct bch_write_bio wbio;
|
||||
};
|
||||
|
||||
@ -137,7 +145,6 @@ int bch2_btree_root_read(struct bch_fs *, enum btree_id,
|
||||
|
||||
void bch2_btree_complete_write(struct bch_fs *, struct btree *,
|
||||
struct btree_write *);
|
||||
void bch2_btree_write_error_work(struct work_struct *);
|
||||
|
||||
void __bch2_btree_node_write(struct bch_fs *, struct btree *, bool);
|
||||
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
|
||||
|
@ -347,7 +347,6 @@ bool __bch2_btree_node_lock(struct btree *b, struct bpos pos,
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
static void bch2_btree_iter_verify_locks(struct btree_iter *iter)
|
||||
{
|
||||
struct bch_fs *c = iter->trans->c;
|
||||
unsigned l;
|
||||
|
||||
if (!(iter->trans->iters_linked & (1ULL << iter->idx))) {
|
||||
@ -623,7 +622,7 @@ static void bch2_btree_iter_verify(struct btree_iter *iter)
|
||||
(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
!btree_type_has_snapshots(iter->btree_id));
|
||||
|
||||
for (i = 0; i < BTREE_MAX_DEPTH; i++) {
|
||||
for (i = 0; i < (type != BTREE_ITER_CACHED ? BTREE_MAX_DEPTH : 1); i++) {
|
||||
if (!iter->l[i].b) {
|
||||
BUG_ON(c->btree_roots[iter->btree_id].b->c.level > i);
|
||||
break;
|
||||
@ -1618,7 +1617,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
|
||||
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = bkey_start_pos(&iter->k);
|
||||
bool ret = bpos_cmp(pos, POS_MIN) != 0;
|
||||
bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
|
||||
? bpos_cmp(pos, POS_MIN)
|
||||
: bkey_cmp(pos, POS_MIN)) != 0;
|
||||
|
||||
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
pos = bkey_predecessor(iter, pos);
|
||||
|
@ -132,7 +132,7 @@ void __bch2_btree_iter_downgrade(struct btree_iter *, unsigned);
|
||||
|
||||
static inline void bch2_btree_iter_downgrade(struct btree_iter *iter)
|
||||
{
|
||||
unsigned new_locks_want = (iter->flags & BTREE_ITER_INTENT ? 1 : 0);
|
||||
unsigned new_locks_want = iter->level + !!(iter->flags & BTREE_ITER_INTENT);
|
||||
|
||||
if (iter->locks_want > new_locks_want)
|
||||
__bch2_btree_iter_downgrade(iter, new_locks_want);
|
||||
|
@ -435,6 +435,7 @@ enum btree_flags {
|
||||
BTREE_NODE_write_idx,
|
||||
BTREE_NODE_accessed,
|
||||
BTREE_NODE_write_in_flight,
|
||||
BTREE_NODE_write_in_flight_inner,
|
||||
BTREE_NODE_just_written,
|
||||
BTREE_NODE_dying,
|
||||
BTREE_NODE_fake,
|
||||
@ -449,6 +450,7 @@ BTREE_FLAG(noevict);
|
||||
BTREE_FLAG(write_idx);
|
||||
BTREE_FLAG(accessed);
|
||||
BTREE_FLAG(write_in_flight);
|
||||
BTREE_FLAG(write_in_flight_inner);
|
||||
BTREE_FLAG(just_written);
|
||||
BTREE_FLAG(dying);
|
||||
BTREE_FLAG(fake);
|
||||
|
@ -74,7 +74,9 @@ int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
|
||||
__le64, unsigned);
|
||||
void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
|
||||
int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
|
||||
struct btree *, struct bkey_i *);
|
||||
struct btree *, struct bkey_i *, bool);
|
||||
int bch2_btree_node_update_key_get_iter(struct btree_trans *,
|
||||
struct btree *, struct bkey_i *, bool);
|
||||
|
||||
int bch2_trans_update(struct btree_trans *, struct btree_iter *,
|
||||
struct bkey_i *, enum btree_update_flags);
|
||||
|
@ -246,11 +246,7 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (c->sb.features & (1ULL << BCH_FEATURE_btree_ptr_v2))
|
||||
bkey_btree_ptr_v2_init(&tmp.k);
|
||||
else
|
||||
bkey_btree_ptr_init(&tmp.k);
|
||||
|
||||
bkey_btree_ptr_v2_init(&tmp.k);
|
||||
bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, c->opts.btree_node_size);
|
||||
|
||||
bch2_open_bucket_get(c, wp, &ob);
|
||||
@ -567,7 +563,8 @@ static void btree_update_nodes_written(struct btree_update *as)
|
||||
six_unlock_read(&old->c.lock);
|
||||
|
||||
if (seq == as->old_nodes_seq[i])
|
||||
bch2_btree_node_wait_on_write(old);
|
||||
wait_on_bit_io(&old->flags, BTREE_NODE_write_in_flight_inner,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1153,6 +1150,9 @@ static void bch2_insert_fixup_btree_ptr(struct btree_update *as, struct btree *b
|
||||
struct bkey_packed *k;
|
||||
const char *invalid;
|
||||
|
||||
BUG_ON(insert->k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
!btree_ptr_sectors_written(insert));
|
||||
|
||||
invalid = bch2_bkey_invalid(c, bkey_i_to_s_c(insert), btree_node_type(b)) ?:
|
||||
bch2_bkey_in_btree_node(b, bkey_i_to_s_c(insert));
|
||||
if (invalid) {
|
||||
@ -1395,6 +1395,7 @@ static void btree_split(struct btree_update *as,
|
||||
six_unlock_write(&n2->c.lock);
|
||||
six_unlock_write(&n1->c.lock);
|
||||
|
||||
bch2_btree_node_write(c, n1, SIX_LOCK_intent);
|
||||
bch2_btree_node_write(c, n2, SIX_LOCK_intent);
|
||||
|
||||
/*
|
||||
@ -1422,12 +1423,12 @@ static void btree_split(struct btree_update *as,
|
||||
bch2_btree_build_aux_trees(n1);
|
||||
six_unlock_write(&n1->c.lock);
|
||||
|
||||
bch2_btree_node_write(c, n1, SIX_LOCK_intent);
|
||||
|
||||
if (parent)
|
||||
bch2_keylist_add(&as->parent_keys, &n1->key);
|
||||
}
|
||||
|
||||
bch2_btree_node_write(c, n1, SIX_LOCK_intent);
|
||||
|
||||
/* New nodes all written, now make them visible: */
|
||||
|
||||
if (parent) {
|
||||
@ -1703,13 +1704,13 @@ retry:
|
||||
bch2_btree_build_aux_trees(n);
|
||||
six_unlock_write(&n->c.lock);
|
||||
|
||||
bch2_btree_node_write(c, n, SIX_LOCK_intent);
|
||||
|
||||
bkey_init(&delete.k);
|
||||
delete.k.p = prev->key.k.p;
|
||||
bch2_keylist_add(&as->parent_keys, &delete);
|
||||
bch2_keylist_add(&as->parent_keys, &n->key);
|
||||
|
||||
bch2_btree_node_write(c, n, SIX_LOCK_intent);
|
||||
|
||||
bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, flags);
|
||||
|
||||
bch2_btree_update_get_open_buckets(as, n);
|
||||
@ -1883,74 +1884,109 @@ void bch2_btree_node_rewrite_async(struct bch_fs *c, struct btree *b)
|
||||
queue_work(c->btree_interior_update_worker, &a->work);
|
||||
}
|
||||
|
||||
static void __bch2_btree_node_update_key(struct btree_update *as,
|
||||
struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct btree *b, struct btree *new_hash,
|
||||
struct bkey_i *new_key)
|
||||
static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct btree *b, struct btree *new_hash,
|
||||
struct bkey_i *new_key,
|
||||
bool skip_triggers)
|
||||
{
|
||||
struct bch_fs *c = as->c;
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter *iter2 = NULL;
|
||||
struct btree *parent;
|
||||
u64 journal_entries[BKEY_BTREE_PTR_U64s_MAX];
|
||||
int ret;
|
||||
|
||||
btree_update_will_delete_key(as, &b->key);
|
||||
btree_update_will_add_key(as, new_key);
|
||||
if (!skip_triggers) {
|
||||
ret = bch2_trans_mark_key(trans,
|
||||
bkey_s_c_null,
|
||||
bkey_i_to_s_c(new_key),
|
||||
BTREE_TRIGGER_INSERT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = bch2_trans_mark_key(trans,
|
||||
bkey_i_to_s_c(&b->key),
|
||||
bkey_s_c_null,
|
||||
BTREE_TRIGGER_OVERWRITE);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (new_hash) {
|
||||
bkey_copy(&new_hash->key, new_key);
|
||||
ret = bch2_btree_node_hash_insert(&c->btree_cache,
|
||||
new_hash, b->c.level, b->c.btree_id);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
|
||||
parent = btree_node_parent(iter, b);
|
||||
if (parent) {
|
||||
if (new_hash) {
|
||||
bkey_copy(&new_hash->key, new_key);
|
||||
ret = bch2_btree_node_hash_insert(&c->btree_cache,
|
||||
new_hash, b->c.level, b->c.btree_id);
|
||||
BUG_ON(ret);
|
||||
}
|
||||
iter2 = bch2_trans_copy_iter(trans, iter);
|
||||
|
||||
bch2_keylist_add(&as->parent_keys, new_key);
|
||||
bch2_btree_insert_node(as, trans, iter, parent, &as->parent_keys, 0);
|
||||
BUG_ON(iter2->level != b->c.level);
|
||||
BUG_ON(bpos_cmp(iter2->pos, new_key->k.p));
|
||||
|
||||
if (new_hash) {
|
||||
mutex_lock(&c->btree_cache.lock);
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, new_hash);
|
||||
btree_node_unlock(iter2, iter2->level);
|
||||
iter2->l[iter2->level].b = BTREE_ITER_NO_NODE_UP;
|
||||
iter2->level++;
|
||||
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
||||
|
||||
bkey_copy(&b->key, new_key);
|
||||
ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
|
||||
BUG_ON(ret);
|
||||
mutex_unlock(&c->btree_cache.lock);
|
||||
} else {
|
||||
bkey_copy(&b->key, new_key);
|
||||
}
|
||||
ret = bch2_btree_iter_traverse(iter2) ?:
|
||||
bch2_trans_update(trans, iter2, new_key, BTREE_TRIGGER_NORUN);
|
||||
if (ret)
|
||||
goto err;
|
||||
} else {
|
||||
BUG_ON(btree_node_root(c, b) != b);
|
||||
|
||||
bch2_btree_node_lock_write(b, iter);
|
||||
bkey_copy(&b->key, new_key);
|
||||
|
||||
if (btree_ptr_hash_val(&b->key) != b->hash_val) {
|
||||
mutex_lock(&c->btree_cache.lock);
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
||||
|
||||
ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
|
||||
BUG_ON(ret);
|
||||
mutex_unlock(&c->btree_cache.lock);
|
||||
}
|
||||
|
||||
btree_update_updated_root(as, b);
|
||||
bch2_btree_node_unlock_write(b, iter);
|
||||
trans->extra_journal_entries = (void *) &journal_entries[0];
|
||||
trans->extra_journal_entry_u64s =
|
||||
journal_entry_set((void *) &journal_entries[0],
|
||||
BCH_JSET_ENTRY_btree_root,
|
||||
b->c.btree_id, b->c.level,
|
||||
new_key, new_key->k.u64s);
|
||||
}
|
||||
|
||||
bch2_btree_update_done(as);
|
||||
ret = bch2_trans_commit(trans, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_NOCHECK_RW|
|
||||
BTREE_INSERT_JOURNAL_RECLAIM|
|
||||
BTREE_INSERT_JOURNAL_RESERVED|
|
||||
BTREE_INSERT_NOUNLOCK);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch2_btree_node_lock_write(b, iter);
|
||||
|
||||
if (new_hash) {
|
||||
mutex_lock(&c->btree_cache.lock);
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, new_hash);
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
||||
|
||||
bkey_copy(&b->key, new_key);
|
||||
ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
|
||||
BUG_ON(ret);
|
||||
mutex_unlock(&c->btree_cache.lock);
|
||||
} else {
|
||||
bkey_copy(&b->key, new_key);
|
||||
}
|
||||
|
||||
bch2_btree_node_unlock_write(b, iter);
|
||||
out:
|
||||
bch2_trans_iter_put(trans, iter2);
|
||||
return ret;
|
||||
err:
|
||||
if (new_hash) {
|
||||
mutex_lock(&c->btree_cache.lock);
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
||||
mutex_unlock(&c->btree_cache.lock);
|
||||
}
|
||||
goto out;
|
||||
}
|
||||
|
||||
int bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct btree *b,
|
||||
struct bkey_i *new_key)
|
||||
int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct btree *b, struct bkey_i *new_key,
|
||||
bool skip_triggers)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree *parent = btree_node_parent(iter, b);
|
||||
struct btree_update *as = NULL;
|
||||
struct btree *new_hash = NULL;
|
||||
struct closure cl;
|
||||
int ret = 0;
|
||||
@ -1964,27 +2000,18 @@ int bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
if (btree_ptr_hash_val(new_key) != b->hash_val) {
|
||||
ret = bch2_btree_cache_cannibalize_lock(c, &cl);
|
||||
if (ret) {
|
||||
bch2_trans_unlock(iter->trans);
|
||||
bch2_trans_unlock(trans);
|
||||
closure_sync(&cl);
|
||||
if (!bch2_trans_relock(iter->trans))
|
||||
if (!bch2_trans_relock(trans))
|
||||
return -EINTR;
|
||||
}
|
||||
|
||||
new_hash = bch2_btree_node_mem_alloc(c);
|
||||
}
|
||||
|
||||
as = bch2_btree_update_start(iter, b->c.level,
|
||||
parent ? btree_update_reserve_required(c, parent) : 0,
|
||||
BTREE_INSERT_NOFAIL);
|
||||
if (IS_ERR(as)) {
|
||||
ret = PTR_ERR(as);
|
||||
goto err;
|
||||
}
|
||||
ret = __bch2_btree_node_update_key(trans, iter, b, new_hash,
|
||||
new_key, skip_triggers);
|
||||
|
||||
__bch2_btree_node_update_key(as, trans, iter, b, new_hash, new_key);
|
||||
|
||||
bch2_btree_iter_downgrade(iter);
|
||||
err:
|
||||
if (new_hash) {
|
||||
mutex_lock(&c->btree_cache.lock);
|
||||
list_move(&new_hash->list, &c->btree_cache.freeable);
|
||||
@ -1998,6 +2025,35 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_btree_node_update_key_get_iter(struct btree_trans *trans,
|
||||
struct btree *b, struct bkey_i *new_key,
|
||||
bool skip_triggers)
|
||||
{
|
||||
struct btree_iter *iter;
|
||||
int ret;
|
||||
|
||||
iter = bch2_trans_get_node_iter(trans, b->c.btree_id, b->key.k.p,
|
||||
BTREE_MAX_DEPTH, b->c.level,
|
||||
BTREE_ITER_INTENT);
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
/* has node been freed? */
|
||||
if (iter->l[b->c.level].b != b) {
|
||||
/* node has been freed: */
|
||||
BUG_ON(!btree_node_dying(b));
|
||||
goto out;
|
||||
}
|
||||
|
||||
BUG_ON(!btree_node_hashed(b));
|
||||
|
||||
ret = bch2_btree_node_update_key(trans, iter, b, new_key, skip_triggers);
|
||||
out:
|
||||
bch2_trans_iter_put(trans, iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Init code: */
|
||||
|
||||
/*
|
||||
|
@ -890,7 +890,8 @@ int __bch2_trans_commit(struct btree_trans *trans)
|
||||
unsigned u64s, reset_flags = 0;
|
||||
int ret = 0;
|
||||
|
||||
if (!trans->nr_updates)
|
||||
if (!trans->nr_updates &&
|
||||
!trans->extra_journal_entry_u64s)
|
||||
goto out_reset;
|
||||
|
||||
if (trans->flags & BTREE_INSERT_GC_LOCK_HELD)
|
||||
|
@ -192,9 +192,10 @@ void bch2_btree_ptr_v2_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
{
|
||||
struct bkey_s_c_btree_ptr_v2 bp = bkey_s_c_to_btree_ptr_v2(k);
|
||||
|
||||
pr_buf(out, "seq %llx written %u min_key ",
|
||||
pr_buf(out, "seq %llx written %u min_key %s",
|
||||
le64_to_cpu(bp.v->seq),
|
||||
le16_to_cpu(bp.v->sectors_written));
|
||||
le16_to_cpu(bp.v->sectors_written),
|
||||
BTREE_PTR_RANGE_UPDATED(bp.v) ? "R " : "");
|
||||
|
||||
bch2_bpos_to_text(out, bp.v->min_key);
|
||||
pr_buf(out, " ");
|
||||
|
@ -94,7 +94,8 @@ struct bch_write_bio {
|
||||
bounce:1,
|
||||
put_bio:1,
|
||||
have_ioref:1,
|
||||
used_mempool:1;
|
||||
used_mempool:1,
|
||||
first_btree_write:1;
|
||||
|
||||
struct bio bio;
|
||||
};
|
||||
|
@ -139,7 +139,7 @@ retry:
|
||||
break;
|
||||
}
|
||||
|
||||
ret = bch2_btree_node_update_key(&trans, iter, b, k.k);
|
||||
ret = bch2_btree_node_update_key(&trans, iter, b, k.k, false);
|
||||
if (ret == -EINTR) {
|
||||
b = bch2_btree_iter_peek_node(iter);
|
||||
ret = 0;
|
||||
|
@ -1005,6 +1005,11 @@ int bch2_fs_recovery(struct bch_fs *c)
|
||||
c->opts.fix_errors = FSCK_OPT_YES;
|
||||
}
|
||||
|
||||
if (c->sb.version < bcachefs_metadata_version_btree_ptr_sectors_written) {
|
||||
bch_info(c, "version prior to btree_ptr_sectors_written, upgrade required");
|
||||
c->opts.version_upgrade = true;
|
||||
}
|
||||
|
||||
ret = bch2_blacklist_table_initialize(c);
|
||||
if (ret) {
|
||||
bch_err(c, "error initializing blacklist table");
|
||||
|
@ -507,8 +507,8 @@ static void __bch2_fs_free(struct bch_fs *c)
|
||||
destroy_workqueue(c->io_complete_wq );
|
||||
if (c->copygc_wq)
|
||||
destroy_workqueue(c->copygc_wq);
|
||||
if (c->btree_error_wq)
|
||||
destroy_workqueue(c->btree_error_wq);
|
||||
if (c->btree_io_complete_wq)
|
||||
destroy_workqueue(c->btree_io_complete_wq);
|
||||
if (c->btree_update_wq)
|
||||
destroy_workqueue(c->btree_update_wq);
|
||||
|
||||
@ -560,7 +560,6 @@ void __bch2_fs_stop(struct bch_fs *c)
|
||||
for_each_member_device(ca, c, i)
|
||||
cancel_work_sync(&ca->io_error_work);
|
||||
|
||||
cancel_work_sync(&c->btree_write_error_work);
|
||||
cancel_work_sync(&c->read_only_work);
|
||||
|
||||
for (i = 0; i < c->sb.nr_devices; i++)
|
||||
@ -688,9 +687,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
|
||||
mutex_init(&c->bio_bounce_pages_lock);
|
||||
|
||||
bio_list_init(&c->btree_write_error_list);
|
||||
spin_lock_init(&c->btree_write_error_lock);
|
||||
INIT_WORK(&c->btree_write_error_work, bch2_btree_write_error_work);
|
||||
|
||||
INIT_WORK(&c->journal_seq_blacklist_gc_work,
|
||||
bch2_blacklist_entries_gc);
|
||||
@ -760,7 +757,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
|
||||
|
||||
if (!(c->btree_update_wq = alloc_workqueue("bcachefs",
|
||||
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
|
||||
!(c->btree_error_wq = alloc_workqueue("bcachefs_error",
|
||||
!(c->btree_io_complete_wq = alloc_workqueue("bcachefs_btree_io",
|
||||
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
|
||||
!(c->copygc_wq = alloc_workqueue("bcachefs_copygc",
|
||||
WQ_FREEZABLE|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE, 1)) ||
|
||||
|
@ -34,19 +34,15 @@ static int test_delete(struct bch_fs *c, u64 nr)
|
||||
int ret;
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
|
||||
BTREE_ITER_INTENT);
|
||||
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
if (ret) {
|
||||
bch_err(c, "lookup error in test_delete: %i", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_btree_iter_traverse(iter) ?:
|
||||
bch2_trans_update(&trans, iter, &k.k_i, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "update error in test_delete: %i", ret);
|
||||
@ -55,7 +51,8 @@ static int test_delete(struct bch_fs *c, u64 nr)
|
||||
|
||||
pr_info("deleting once");
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_btree_delete_at(&trans, iter, 0));
|
||||
bch2_btree_iter_traverse(iter) ?:
|
||||
bch2_btree_delete_at(&trans, iter, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "delete error (first) in test_delete: %i", ret);
|
||||
goto err;
|
||||
@ -63,7 +60,8 @@ static int test_delete(struct bch_fs *c, u64 nr)
|
||||
|
||||
pr_info("deleting twice");
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_btree_delete_at(&trans, iter, 0));
|
||||
bch2_btree_iter_traverse(iter) ?:
|
||||
bch2_btree_delete_at(&trans, iter, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "delete error (second) in test_delete: %i", ret);
|
||||
goto err;
|
||||
@ -82,29 +80,27 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
|
||||
int ret;
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
|
||||
BTREE_ITER_INTENT);
|
||||
|
||||
ret = bch2_btree_iter_traverse(iter);
|
||||
if (ret) {
|
||||
bch_err(c, "lookup error in test_delete_written: %i", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_btree_iter_traverse(iter) ?:
|
||||
bch2_trans_update(&trans, iter, &k.k_i, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "update error in test_delete_written: %i", ret);
|
||||
goto err;
|
||||
}
|
||||
|
||||
bch2_trans_unlock(&trans);
|
||||
bch2_journal_flush_all_pins(&c->journal);
|
||||
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_btree_delete_at(&trans, iter, 0));
|
||||
bch2_btree_iter_traverse(iter) ?:
|
||||
bch2_btree_delete_at(&trans, iter, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "delete error in test_delete_written: %i", ret);
|
||||
goto err;
|
||||
@ -134,6 +130,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.offset = i;
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
@ -188,6 +185,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.offset = i + 8;
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
k.k.size = 8;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
|
||||
@ -243,6 +241,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.offset = i * 2;
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
@ -306,6 +305,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k.p.offset = i + 16;
|
||||
k.k.p.snapshot = U32_MAX;
|
||||
k.k.size = 8;
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
|
||||
@ -413,6 +413,7 @@ static int insert_test_extent(struct bch_fs *c,
|
||||
|
||||
bkey_cookie_init(&k.k_i);
|
||||
k.k_i.k.p.offset = end;
|
||||
k.k_i.k.p.snapshot = U32_MAX;
|
||||
k.k_i.k.size = end - start;
|
||||
k.k_i.k.version.lo = test_version++;
|
||||
|
||||
@ -591,6 +592,7 @@ static int rand_mixed(struct bch_fs *c, u64 nr)
|
||||
k.k.p = iter->pos;
|
||||
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_btree_iter_traverse(iter) ?:
|
||||
bch2_trans_update(&trans, iter, &k.k_i, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "update error in rand_mixed: %i", ret);
|
||||
@ -671,6 +673,7 @@ static int seq_insert(struct bch_fs *c, u64 nr)
|
||||
insert.k.p = iter->pos;
|
||||
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_btree_iter_traverse(iter) ?:
|
||||
bch2_trans_update(&trans, iter, &insert.k_i, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "error in seq_insert: %i", ret);
|
||||
@ -719,6 +722,7 @@ static int seq_overwrite(struct bch_fs *c, u64 nr)
|
||||
bkey_reassemble(&u.k_i, k);
|
||||
|
||||
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
|
||||
bch2_btree_iter_traverse(iter) ?:
|
||||
bch2_trans_update(&trans, iter, &u.k_i, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "error in seq_overwrite: %i", ret);
|
||||
|
Loading…
Reference in New Issue
Block a user