Update bcachefs sources to b00cf89c56 bcachefs: Allow shorter JSET_ENTRY_dev_usage entries

This commit is contained in:
Kent Overstreet 2021-06-11 00:16:27 -04:00
parent ff578c6718
commit 2be79b428b
10 changed files with 44 additions and 69 deletions

View File

@ -1 +1 @@
69be0dae3162e1651a5d5fcce08562e6e2af971a
b00cf89c56077d5e91c134d066ba2b45bc3136d7

View File

@ -321,6 +321,17 @@ again:
}
ret = btree_repair_node_start(c, b, prev, cur);
if (ret == DROP_THIS_NODE) {
six_unlock_read(&cur->c.lock);
bch2_btree_node_evict(c, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
if (ret)
goto err;
continue;
}
if (prev)
six_unlock_read(&prev->c.lock);
@ -331,13 +342,6 @@ again:
if (ret)
goto err;
goto again;
} else if (ret == DROP_THIS_NODE) {
bch2_btree_node_evict(c, cur_k.k);
ret = bch2_journal_key_delete(c, b->c.btree_id,
b->c.level, cur_k.k->k.p);
if (ret)
goto err;
continue;
} else if (ret)
break;

View File

@ -2312,6 +2312,7 @@ void bch2_trans_reset(struct btree_trans *trans, unsigned flags)
trans->iters_touched &= trans->iters_live;
trans->extra_journal_res = 0;
trans->nr_updates = 0;
trans->mem_top = 0;

View File

@ -383,6 +383,11 @@ struct btree_trans {
unsigned used_mempool:1;
unsigned error:1;
unsigned in_traverse_all:1;
/*
* For when bch2_trans_update notices we'll be splitting a compressed
* extent:
*/
unsigned extra_journal_res;
u64 iters_linked;
u64 iters_live;
@ -680,7 +685,6 @@ enum btree_insert_ret {
BTREE_INSERT_OK,
/* leaf node needs to be split */
BTREE_INSERT_BTREE_NODE_FULL,
BTREE_INSERT_ENOSPC,
BTREE_INSERT_NEED_MARK_REPLICAS,
BTREE_INSERT_NEED_JOURNAL_RES,
BTREE_INSERT_NEED_JOURNAL_RECLAIM,

View File

@ -696,10 +696,6 @@ int bch2_trans_commit_error(struct btree_trans *trans,
ret = -EINTR;
}
break;
case BTREE_INSERT_ENOSPC:
BUG_ON(flags & BTREE_INSERT_NOFAIL);
ret = -ENOSPC;
break;
case BTREE_INSERT_NEED_MARK_REPLICAS:
bch2_trans_unlock(trans);
@ -805,7 +801,7 @@ static int extent_handle_overwrites(struct btree_trans *trans,
struct bpos start = bkey_start_pos(&i->k->k);
struct bkey_i *update;
struct bkey_s_c k;
int ret = 0;
int ret = 0, compressed_sectors;
iter = bch2_trans_get_iter(trans, i->btree_id, start,
BTREE_ITER_INTENT|
@ -839,6 +835,16 @@ static int extent_handle_overwrites(struct btree_trans *trans,
goto next;
while (bkey_cmp(i->k->k.p, bkey_start_pos(k.k)) > 0) {
/*
* If we're going to be splitting a compressed extent, note it
* so that __bch2_trans_commit() can increase our disk
* reservation:
*/
if (bkey_cmp(bkey_start_pos(k.k), start) < 0 &&
bkey_cmp(k.k->p, i->k->k.p) > 0 &&
(compressed_sectors = bch2_bkey_sectors_compressed(k)))
trans->extra_journal_res += compressed_sectors;
if (bkey_cmp(bkey_start_pos(k.k), start) < 0) {
update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
if ((ret = PTR_ERR_OR_ZERO(update)))
@ -976,6 +982,15 @@ int __bch2_trans_commit(struct btree_trans *trans)
trans->journal_preres_u64s += u64s;
trans->journal_u64s += u64s;
}
if (trans->extra_journal_res) {
ret = bch2_disk_reservation_add(trans->c, trans->disk_res,
trans->extra_journal_res,
(trans->flags & BTREE_INSERT_NOFAIL)
? BCH_DISK_RESERVATION_NOFAIL : 0);
if (ret)
goto err;
}
retry:
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
@ -1029,22 +1044,12 @@ int bch2_trans_update(struct btree_trans *trans, struct btree_iter *iter,
BUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
#ifdef CONFIG_BCACHEFS_DEBUG
BUG_ON(bkey_cmp(iter->pos,
is_extent ? bkey_start_pos(&k->k) : k->k.p));
trans_for_each_update(trans, i) {
BUG_ON(bkey_cmp(i->iter->pos, i->k->k.p));
trans_for_each_update(trans, i)
BUG_ON(i != trans->updates &&
btree_insert_entry_cmp(i - 1, i) >= 0);
}
#endif
if (is_extent) {
ret = bch2_extent_can_insert(trans, n.iter, n.k);
if (ret)
return ret;
ret = extent_handle_overwrites(trans, &n);
if (ret)
return ret;

View File

@ -173,38 +173,3 @@ int bch2_extent_is_atomic(struct bkey_i *k, struct btree_iter *iter)
return !bkey_cmp(end, k->k.p);
}
enum btree_insert_ret
bch2_extent_can_insert(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_i *insert)
{
struct bkey_s_c k;
int ret, sectors;
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
return ret;
/* Check if we're splitting a compressed extent: */
if (bkey_cmp(bkey_start_pos(&insert->k), bkey_start_pos(k.k)) > 0 &&
bkey_cmp(insert->k.p, k.k->p) < 0 &&
(sectors = bch2_bkey_sectors_compressed(k))) {
int flags = trans->flags & BTREE_INSERT_NOFAIL
? BCH_DISK_RESERVATION_NOFAIL : 0;
switch (bch2_disk_reservation_add(trans->c, trans->disk_res,
sectors, flags)) {
case 0:
break;
case -ENOSPC:
return BTREE_INSERT_ENOSPC;
default:
BUG();
}
}
return BTREE_INSERT_OK;
}

View File

@ -9,8 +9,4 @@ int bch2_extent_atomic_end(struct btree_iter *, struct bkey_i *,
int bch2_extent_trim_atomic(struct bkey_i *, struct btree_iter *);
int bch2_extent_is_atomic(struct bkey_i *, struct btree_iter *);
enum btree_insert_ret
bch2_extent_can_insert(struct btree_trans *, struct btree_iter *,
struct bkey_i *);
#endif /* _BCACHEFS_EXTENT_UPDATE_H */

View File

@ -1322,9 +1322,6 @@ static char **split_devs(const char *_dev_name, unsigned *nr)
char *dev_name = NULL, **devs = NULL, *s;
size_t i, nr_devs = 0;
if (strlen(_dev_name) == 0)
return NULL;
dev_name = kstrdup(_dev_name, GFP_KERNEL);
if (!dev_name)
return NULL;
@ -1500,6 +1497,9 @@ static struct dentry *bch2_mount(struct file_system_type *fs_type,
if (ret)
return ERR_PTR(ret);
if (!dev_name || strlen(dev_name) == 0)
return ERR_PTR(-EINVAL);
devs = split_devs(dev_name, &nr_devs);
if (!devs)
return ERR_PTR(-ENOMEM);

View File

@ -450,7 +450,7 @@ static int journal_entry_validate_dev_usage(struct bch_fs *c,
struct jset_entry_dev_usage *u =
container_of(entry, struct jset_entry_dev_usage, entry);
unsigned bytes = jset_u64s(le16_to_cpu(entry->u64s)) * sizeof(u64);
unsigned expected = sizeof(*u) + sizeof(u->d[0]) * 7; /* Current value of BCH_DATA_NR */
unsigned expected = sizeof(*u);
unsigned dev;
int ret = 0;

View File

@ -725,7 +725,7 @@ static int journal_replay_entry_early(struct bch_fs *c,
ca->usage_base->buckets_ec = le64_to_cpu(u->buckets_ec);
ca->usage_base->buckets_unavailable = le64_to_cpu(u->buckets_unavailable);
for (i = 0; i < nr_types; i++) {
for (i = 0; i < min_t(unsigned, nr_types, BCH_DATA_NR); i++) {
ca->usage_base->d[i].buckets = le64_to_cpu(u->d[i].buckets);
ca->usage_base->d[i].sectors = le64_to_cpu(u->d[i].sectors);
ca->usage_base->d[i].fragmented = le64_to_cpu(u->d[i].fragmented);