Update bcachefs sources to 5d0a6c2b32f1 bcachefs: check_directory_structure() can now be run online

This commit is contained in:
Kent Overstreet 2023-12-21 19:10:40 -05:00
parent 2b97686ffa
commit 93241a1c9a
23 changed files with 281 additions and 220 deletions

View File

@ -1 +1 @@
1a739db0b256dc56d4e9fdc33a11d0728d7672d2
5d0a6c2b32f1542f01e47e767b0174de788dd8cc

View File

@ -1360,8 +1360,17 @@ retry:
goto alloc_done;
/* Don't retry from all devices if we're out of open buckets: */
if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
goto allocate_blocking;
if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty)) {
int ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,
&have_cache, watermark,
flags, cl);
if (!ret ||
bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
goto alloc_done;
}
/*
* Only try to allocate cache (durability = 0 devices) from the
@ -1375,7 +1384,6 @@ retry:
&have_cache, watermark,
flags, cl);
} else {
allocate_blocking:
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
target, erasure_code,
nr_replicas, &nr_effective,

View File

@ -326,6 +326,12 @@ do { \
bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
} while (0)
#define bch_err_fn_ratelimited(_c, _ret) \
do { \
if (should_print_err(_ret)) \
bch_err_ratelimited(_c, "%s(): error %s", __func__, bch2_err_str(_ret));\
} while (0)
#define bch_err_msg(_c, _ret, _msg, ...) \
do { \
if (should_print_err(_ret)) \

View File

@ -397,7 +397,7 @@ struct bch_ioctl_fsck_offline {
__u64 flags;
__u64 opts; /* string */
__u64 nr_devs;
__u64 devs[0];
__u64 devs[] __counted_by(nr_devs);
};
/*

View File

@ -1987,7 +1987,6 @@ static int bch2_gc_thread(void *arg)
struct io_clock *clock = &c->io_clock[WRITE];
unsigned long last = atomic64_read(&clock->now);
unsigned last_kick = atomic_read(&c->kick_gc);
int ret;
set_freezable();
@ -2027,7 +2026,7 @@ static int bch2_gc_thread(void *arg)
#if 0
ret = bch2_gc(c, false, false);
#else
ret = bch2_gc_gens(c);
bch2_gc_gens(c);
#endif
debug_check_no_locks_held();
}

View File

@ -1875,32 +1875,49 @@ inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
}
static noinline
struct bkey_i *__bch2_btree_trans_peek_updates(struct btree_iter *iter)
void bch2_btree_trans_peek_prev_updates(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c *k)
{
struct btree_trans *trans = iter->trans;
struct bkey_i *ret = NULL;
struct bpos end = path_l(btree_iter_path(trans, iter))->b->data->min_key;
trans_for_each_update(trans, i) {
if (i->btree_id < iter->btree_id)
continue;
if (i->btree_id > iter->btree_id)
break;
if (bpos_lt(i->k->k.p, btree_iter_path(trans, iter)->pos))
continue;
if (i->key_cache_already_flushed)
continue;
if (!ret || bpos_lt(i->k->k.p, ret->k.p))
ret = i->k;
}
return ret;
trans_for_each_update(trans, i)
if (!i->key_cache_already_flushed &&
i->btree_id == iter->btree_id &&
bpos_le(i->k->k.p, iter->pos) &&
bpos_ge(i->k->k.p, k->k ? k->k->p : end)) {
iter->k = i->k->k;
*k = bkey_i_to_s_c(i->k);
}
}
static inline struct bkey_i *btree_trans_peek_updates(struct btree_iter *iter)
static noinline
void bch2_btree_trans_peek_updates(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c *k)
{
return iter->flags & BTREE_ITER_WITH_UPDATES
? __bch2_btree_trans_peek_updates(iter)
: NULL;
struct btree_path *path = btree_iter_path(trans, iter);
struct bpos end = path_l(path)->b->key.k.p;
trans_for_each_update(trans, i)
if (!i->key_cache_already_flushed &&
i->btree_id == iter->btree_id &&
bpos_ge(i->k->k.p, path->pos) &&
bpos_le(i->k->k.p, k->k ? k->k->p : end)) {
iter->k = i->k->k;
*k = bkey_i_to_s_c(i->k);
}
}
static noinline
void bch2_btree_trans_peek_slot_updates(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c *k)
{
trans_for_each_update(trans, i)
if (!i->key_cache_already_flushed &&
i->btree_id == iter->btree_id &&
bpos_eq(i->k->k.p, iter->pos)) {
iter->k = i->k->k;
*k = bkey_i_to_s_c(i->k);
}
}
static struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
@ -1999,7 +2016,6 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bpos search_key)
{
struct btree_trans *trans = iter->trans;
struct bkey_i *next_update;
struct bkey_s_c k, k2;
int ret;
@ -2049,14 +2065,9 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL))
k = btree_trans_peek_journal(trans, iter, k);
next_update = btree_trans_peek_updates(iter);
if (next_update &&
bpos_le(next_update->k.p,
k.k ? k.k->p : l->b->key.k.p)) {
iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
}
if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
trans->nr_updates))
bch2_btree_trans_peek_updates(trans, iter, &k);
if (k.k && bkey_deleted(k.k)) {
/*
@ -2265,7 +2276,6 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
EBUG_ON(btree_iter_path(trans, iter)->cached ||
btree_iter_path(trans, iter)->level);
EBUG_ON(iter->flags & BTREE_ITER_WITH_UPDATES);
if (iter->flags & BTREE_ITER_WITH_JOURNAL)
return bkey_s_c_err(-EIO);
@ -2298,6 +2308,10 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
: bpos_gt(k.k->p, search_key)))
k = btree_path_level_prev(trans, path, &path->l[0], &iter->k);
if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
trans->nr_updates))
bch2_btree_trans_peek_prev_updates(trans, iter, &k);
if (likely(k.k)) {
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS) {
if (k.k->p.snapshot == iter->snapshot)
@ -2422,13 +2436,13 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
if ((iter->flags & BTREE_ITER_CACHED) ||
!(iter->flags & (BTREE_ITER_IS_EXTENTS|BTREE_ITER_FILTER_SNAPSHOTS))) {
struct bkey_i *next_update;
k = bkey_s_c_null;
if ((next_update = btree_trans_peek_updates(iter)) &&
bpos_eq(next_update->k.p, iter->pos)) {
iter->k = next_update->k;
k = bkey_i_to_s_c(next_update);
goto out;
if (unlikely((iter->flags & BTREE_ITER_WITH_UPDATES) &&
trans->nr_updates)) {
bch2_btree_trans_peek_slot_updates(trans, iter, &k);
if (k.k)
goto out;
}
if (unlikely(iter->flags & BTREE_ITER_WITH_JOURNAL) &&

View File

@ -655,7 +655,9 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
*/
if (ck->journal.seq == journal_last_seq(j))
commit_flags |= BCH_WATERMARK_reclaim;
else
if (ck->journal.seq != journal_last_seq(j) ||
j->watermark == BCH_WATERMARK_stripe)
commit_flags |= BCH_TRANS_COMMIT_no_journal_res;
ret = bch2_btree_iter_traverse(&b_iter) ?:

View File

@ -146,6 +146,7 @@ static size_t btree_node_u64s_with_format(struct btree_nr_keys nr,
*
* @c: filesystem handle
* @b: btree node to rewrite
* @nr: number of keys for new node (i.e. b->nr)
* @new_f: bkey format to translate keys to
*
* Returns: true if all re-packed keys will be able to fit in a new node.
@ -1190,6 +1191,9 @@ bch2_btree_update_start(struct btree_trans *trans, struct btree_path *path,
return as;
err:
bch2_btree_update_free(as, trans);
if (!bch2_err_matches(ret, ENOSPC) &&
!bch2_err_matches(ret, EROFS))
bch_err_fn_ratelimited(c, ret);
return ERR_PTR(ret);
}
@ -1665,7 +1669,7 @@ bch2_btree_insert_keys_interior(struct btree_update *as,
*
* @as: btree_update object
* @trans: btree_trans object
* @path: path that points to current node
* @path_idx: path that points to current node
* @b: node to insert keys into
* @keys: list of keys to insert
* @flags: transaction commit flags

View File

@ -360,7 +360,8 @@ static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_a
init_waitqueue_head(&thr->output.wait);
darray_init(&thr->output2);
if (copy_from_user(devs, &user_arg->devs[0], sizeof(user_arg->devs[0]) * arg.nr_devs)) {
if (copy_from_user(devs, &user_arg->devs[0],
array_size(sizeof(user_arg->devs[0]), arg.nr_devs))) {
ret = -EINVAL;
goto err;
}

View File

@ -418,6 +418,8 @@ static void bch2_update_unwritten_extent(struct btree_trans *trans,
continue;
}
bch_err_fn_ratelimited(c, ret);
if (ret)
return;

View File

@ -198,6 +198,34 @@ static struct bkey_i_dirent *dirent_create_key(struct btree_trans *trans,
return dirent;
}
int bch2_dirent_create_snapshot(struct btree_trans *trans,
u64 dir, u32 snapshot,
const struct bch_hash_info *hash_info,
u8 type, const struct qstr *name, u64 dst_inum,
u64 *dir_offset,
bch_str_hash_flags_t str_hash_flags)
{
subvol_inum zero_inum = { 0 };
struct bkey_i_dirent *dirent;
int ret;
dirent = dirent_create_key(trans, zero_inum, type, name, dst_inum);
ret = PTR_ERR_OR_ZERO(dirent);
if (ret)
return ret;
dirent->k.p.inode = dir;
dirent->k.p.snapshot = snapshot;
ret = bch2_hash_set_snapshot(trans, bch2_dirent_hash_desc, hash_info,
zero_inum, snapshot,
&dirent->k_i, str_hash_flags,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
*dir_offset = dirent->k.p.offset;
return ret;
}
int bch2_dirent_create(struct btree_trans *trans, subvol_inum dir,
const struct bch_hash_info *hash_info,
u8 type, const struct qstr *name, u64 dst_inum,

View File

@ -35,6 +35,10 @@ static inline unsigned dirent_val_u64s(unsigned len)
int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
struct bkey_s_c_dirent, subvol_inum *);
int bch2_dirent_create_snapshot(struct btree_trans *, u64, u32,
const struct bch_hash_info *, u8,
const struct qstr *, u64, u64 *,
bch_str_hash_flags_t);
int bch2_dirent_create(struct btree_trans *, subvol_inum,
const struct bch_hash_info *, u8,
const struct qstr *, u64, u64 *,

View File

@ -231,6 +231,7 @@
x(BCH_ERR_nopromote, nopromote_unwritten) \
x(BCH_ERR_nopromote, nopromote_congested) \
x(BCH_ERR_nopromote, nopromote_in_flight) \
x(BCH_ERR_nopromote, nopromote_no_writes) \
x(BCH_ERR_nopromote, nopromote_enomem)
enum bch_errcode {

View File

@ -52,23 +52,20 @@ struct readpages_iter {
static int readpages_iter_init(struct readpages_iter *iter,
struct readahead_control *ractl)
{
memset(iter, 0, sizeof(*iter));
struct folio *folio;
iter->mapping = ractl->mapping;
*iter = (struct readpages_iter) { ractl->mapping };
int ret = bch2_filemap_get_contig_folios_d(iter->mapping,
ractl->_index << PAGE_SHIFT,
(ractl->_index + ractl->_nr_pages) << PAGE_SHIFT,
0, mapping_gfp_mask(iter->mapping),
&iter->folios);
if (ret)
return ret;
while ((folio = __readahead_folio(ractl))) {
if (!bch2_folio_create(folio, GFP_KERNEL) ||
darray_push(&iter->folios, folio)) {
bch2_folio_release(folio);
ractl->_nr_pages += folio_nr_pages(folio);
ractl->_index -= folio_nr_pages(folio);
return iter->folios.nr ? 0 : -ENOMEM;
}
darray_for_each(iter->folios, fi) {
ractl->_nr_pages -= 1U << folio_order(*fi);
__bch2_folio_create(*fi, __GFP_NOFAIL|GFP_KERNEL);
folio_put(*fi);
folio_put(*fi);
folio_put(folio);
}
return 0;
@ -270,12 +267,12 @@ void bch2_readahead(struct readahead_control *ractl)
struct btree_trans *trans = bch2_trans_get(c);
struct folio *folio;
struct readpages_iter readpages_iter;
int ret;
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
ret = readpages_iter_init(&readpages_iter, ractl);
BUG_ON(ret);
int ret = readpages_iter_init(&readpages_iter, ractl);
if (ret)
return;
bch2_pagecache_add_get(inode);

View File

@ -77,9 +77,6 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
if ((offset|iter->count) & (block_bytes(c) - 1))
return -EINVAL;
ret = min_t(loff_t, iter->count,
max_t(loff_t, 0, i_size_read(&inode->v) - offset));

View File

@ -59,23 +59,8 @@ static s64 bch2_count_subdirs(struct btree_trans *trans, u64 inum,
return ret ?: subdirs;
}
static int __snapshot_lookup_subvol(struct btree_trans *trans, u32 snapshot,
u32 *subvol)
{
struct bch_snapshot s;
int ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshots,
POS(0, snapshot), 0,
snapshot, &s);
if (!ret)
*subvol = le32_to_cpu(s.subvol);
else if (bch2_err_matches(ret, ENOENT))
bch_err(trans->c, "snapshot %u not found", snapshot);
return ret;
}
static int __subvol_lookup(struct btree_trans *trans, u32 subvol,
u32 *snapshot, u64 *inum)
static int subvol_lookup(struct btree_trans *trans, u32 subvol,
u32 *snapshot, u64 *inum)
{
struct bch_subvolume s;
int ret;
@ -87,12 +72,6 @@ static int __subvol_lookup(struct btree_trans *trans, u32 subvol,
return ret;
}
static int subvol_lookup(struct btree_trans *trans, u32 subvol,
u32 *snapshot, u64 *inum)
{
return lockrestart_do(trans, __subvol_lookup(trans, subvol, snapshot, inum));
}
static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
struct bch_inode_unpacked *inode)
{
@ -120,7 +99,7 @@ err:
return ret;
}
static int __lookup_inode(struct btree_trans *trans, u64 inode_nr,
static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
struct bch_inode_unpacked *inode,
u32 *snapshot)
{
@ -145,13 +124,6 @@ err:
return ret;
}
static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
struct bch_inode_unpacked *inode,
u32 *snapshot)
{
return lockrestart_do(trans, __lookup_inode(trans, inode_nr, inode, snapshot));
}
static int __lookup_dirent(struct btree_trans *trans,
struct bch_hash_info hash_info,
subvol_inum dir, struct qstr *name,
@ -227,35 +199,43 @@ err:
}
/* Get lost+found, create if it doesn't exist: */
static int lookup_lostfound(struct btree_trans *trans, u32 subvol,
static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
struct bch_inode_unpacked *lostfound)
{
struct bch_fs *c = trans->c;
struct bch_inode_unpacked root;
struct bch_hash_info root_hash_info;
struct qstr lostfound_str = QSTR("lost+found");
subvol_inum root_inum = { .subvol = subvol };
u64 inum = 0;
unsigned d_type = 0;
u32 snapshot;
int ret;
ret = __subvol_lookup(trans, subvol, &snapshot, &root_inum.inum);
struct bch_snapshot_tree st;
ret = bch2_snapshot_tree_lookup(trans,
bch2_snapshot_tree(c, snapshot), &st);
if (ret)
return ret;
ret = __lookup_inode(trans, root_inum.inum, &root, &snapshot);
subvol_inum root_inum = { .subvol = le32_to_cpu(st.master_subvol) };
u32 subvol_snapshot;
ret = subvol_lookup(trans, le32_to_cpu(st.master_subvol),
&subvol_snapshot, &root_inum.inum);
bch_err_msg(c, ret, "looking up root subvol");
if (ret)
return ret;
root_hash_info = bch2_hash_info_init(c, &root);
struct bch_inode_unpacked root_inode;
struct bch_hash_info root_hash_info;
ret = lookup_inode(trans, root_inum.inum, &root_inode, &snapshot);
bch_err_msg(c, ret, "looking up root inode");
if (ret)
return ret;
root_hash_info = bch2_hash_info_init(c, &root_inode);
ret = __lookup_dirent(trans, root_hash_info, root_inum,
&lostfound_str, &inum, &d_type);
if (bch2_err_matches(ret, ENOENT)) {
bch_notice(c, "creating lost+found");
&lostfound_str, &inum, &d_type);
if (bch2_err_matches(ret, ENOENT))
goto create_lostfound;
}
bch_err_fn(c, ret);
if (ret)
@ -270,20 +250,50 @@ static int lookup_lostfound(struct btree_trans *trans, u32 subvol,
* The bch2_check_dirents pass has already run, dangling dirents
* shouldn't exist here:
*/
return __lookup_inode(trans, inum, lostfound, &snapshot);
return lookup_inode(trans, inum, lostfound, &snapshot);
create_lostfound:
bch2_inode_init_early(c, lostfound);
/*
* XXX: we could have a nicer log message here if we had a nice way to
* walk backpointers to print a path
*/
bch_notice(c, "creating lost+found in snapshot %u", le32_to_cpu(st.root_snapshot));
ret = bch2_create_trans(trans, root_inum, &root,
lostfound, &lostfound_str,
0, 0, S_IFDIR|0700, 0, NULL, NULL,
(subvol_inum) { }, 0);
u64 now = bch2_current_time(c);
struct btree_iter lostfound_iter = { NULL };
u64 cpu = raw_smp_processor_id();
bch2_inode_init_early(c, lostfound);
bch2_inode_init_late(lostfound, now, 0, 0, S_IFDIR|0700, 0, &root_inode);
lostfound->bi_dir = root_inode.bi_inum;
root_inode.bi_nlink++;
ret = bch2_inode_create(trans, &lostfound_iter, lostfound, snapshot, cpu);
if (ret)
goto err;
bch2_btree_iter_set_snapshot(&lostfound_iter, snapshot);
ret = bch2_btree_iter_traverse(&lostfound_iter);
if (ret)
goto err;
ret = bch2_dirent_create_snapshot(trans,
root_inode.bi_inum, snapshot, &root_hash_info,
mode_to_type(lostfound->bi_mode),
&lostfound_str,
lostfound->bi_inum,
&lostfound->bi_dir_offset,
BCH_HASH_SET_MUST_CREATE) ?:
bch2_inode_write_flags(trans, &lostfound_iter, lostfound,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
err:
bch_err_msg(c, ret, "creating lost+found");
bch2_trans_iter_exit(trans, &lostfound_iter);
return ret;
}
static int __reattach_inode(struct btree_trans *trans,
static int reattach_inode(struct btree_trans *trans,
struct bch_inode_unpacked *inode,
u32 inode_snapshot)
{
@ -292,14 +302,9 @@ static int __reattach_inode(struct btree_trans *trans,
char name_buf[20];
struct qstr name;
u64 dir_offset = 0;
u32 subvol;
int ret;
ret = __snapshot_lookup_subvol(trans, inode_snapshot, &subvol);
if (ret)
return ret;
ret = lookup_lostfound(trans, subvol, &lostfound);
ret = lookup_lostfound(trans, inode_snapshot, &lostfound);
if (ret)
return ret;
@ -316,15 +321,12 @@ static int __reattach_inode(struct btree_trans *trans,
snprintf(name_buf, sizeof(name_buf), "%llu", inode->bi_inum);
name = (struct qstr) QSTR(name_buf);
ret = bch2_dirent_create(trans,
(subvol_inum) {
.subvol = subvol,
.inum = lostfound.bi_inum,
},
&dir_hash,
inode_d_type(inode),
&name, inode->bi_inum, &dir_offset,
BCH_HASH_SET_MUST_CREATE);
ret = bch2_dirent_create_snapshot(trans,
lostfound.bi_inum, inode_snapshot,
&dir_hash,
inode_d_type(inode),
&name, inode->bi_inum, &dir_offset,
BCH_HASH_SET_MUST_CREATE);
if (ret)
return ret;
@ -334,16 +336,6 @@ static int __reattach_inode(struct btree_trans *trans,
return __write_inode(trans, inode, inode_snapshot);
}
static int reattach_inode(struct btree_trans *trans,
struct bch_inode_unpacked *inode,
u32 inode_snapshot)
{
int ret = commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
__reattach_inode(trans, inode, inode_snapshot));
bch_err_msg(trans->c, ret, "reattaching inode %llu", inode->bi_inum);
return ret;
}
static int remove_backpointer(struct btree_trans *trans,
struct bch_inode_unpacked *inode)
{
@ -1721,7 +1713,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
u32 target_snapshot;
u64 target_inum;
ret = __subvol_lookup(trans, target_subvol,
ret = subvol_lookup(trans, target_subvol,
&target_snapshot, &target_inum);
if (ret && !bch2_err_matches(ret, ENOENT))
goto err;
@ -1733,7 +1725,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
goto err;
}
ret = __lookup_inode(trans, target_inum,
ret = lookup_inode(trans, target_inum,
&subvol_root, &target_snapshot);
if (ret && !bch2_err_matches(ret, ENOENT))
goto err;
@ -1893,7 +1885,7 @@ static int check_root_trans(struct btree_trans *trans)
u64 inum;
int ret;
ret = __subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
ret = subvol_lookup(trans, BCACHEFS_ROOT_SUBVOL, &snapshot, &inum);
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
@ -1915,7 +1907,7 @@ static int check_root_trans(struct btree_trans *trans)
goto err;
}
ret = __lookup_inode(trans, BCACHEFS_ROOT_INO, &root_inode, &snapshot);
ret = lookup_inode(trans, BCACHEFS_ROOT_INO, &root_inode, &snapshot);
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
@ -2006,10 +1998,10 @@ static int check_path(struct btree_trans *trans,
break;
}
ret = lockrestart_do(trans,
PTR_ERR_OR_ZERO((d = dirent_get_by_pos(trans, &dirent_iter,
SPOS(inode->bi_dir, inode->bi_dir_offset,
parent_snapshot))).k));
d = dirent_get_by_pos(trans, &dirent_iter,
SPOS(inode->bi_dir, inode->bi_dir_offset,
parent_snapshot));
ret = bkey_err(d.s_c);
if (ret && !bch2_err_matches(ret, ENOENT))
break;
@ -2046,7 +2038,8 @@ static int check_path(struct btree_trans *trans,
ret = lookup_inode(trans, inode->bi_dir, inode, &snapshot);
if (ret) {
/* Should have been caught in dirents pass */
bch_err(c, "error looking up parent directory: %i", ret);
if (!bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err(c, "error looking up parent directory: %i", ret);
break;
}
@ -2058,19 +2051,19 @@ static int check_path(struct btree_trans *trans,
pr_err("%llu:%u", i->inum, i->snapshot);
pr_err("%llu:%u", inode->bi_inum, snapshot);
if (!fsck_err(c, dir_loop,
"directory structure loop"))
if (!fsck_err(c, dir_loop, "directory structure loop"))
return 0;
ret = commit_do(trans, NULL, NULL,
BCH_TRANS_COMMIT_no_enospc,
remove_backpointer(trans, inode));
if (ret) {
bch_err(c, "error removing dirent: %i", ret);
ret = remove_backpointer(trans, inode);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err_msg(c, ret, "removing dirent");
if (ret)
break;
}
ret = reattach_inode(trans, inode, snapshot);
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
bch_err_msg(c, ret, "reattaching inode %llu", inode->bi_inum);
break;
}
}
fsck_err:
@ -2085,31 +2078,26 @@ fsck_err:
*/
int bch2_check_directory_structure(struct bch_fs *c)
{
struct btree_trans *trans = bch2_trans_get(c);
struct btree_iter iter;
struct bkey_s_c k;
struct bch_inode_unpacked u;
pathbuf path = { 0, };
int ret;
for_each_btree_key_old(trans, iter, BTREE_ID_inodes, POS_MIN,
BTREE_ITER_INTENT|
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
if (!bkey_is_inode(k.k))
continue;
ret = bch2_trans_run(c,
for_each_btree_key_commit(trans, iter, BTREE_ID_inodes, POS_MIN,
BTREE_ITER_INTENT|
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS, k,
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
if (!bkey_is_inode(k.k))
continue;
BUG_ON(bch2_inode_unpack(k, &u));
BUG_ON(bch2_inode_unpack(k, &u));
if (u.bi_flags & BCH_INODE_unlinked)
continue;
if (u.bi_flags & BCH_INODE_unlinked)
continue;
ret = check_path(trans, &path, &u, iter.pos.snapshot);
if (ret)
break;
}
bch2_trans_iter_exit(trans, &iter);
bch2_trans_put(trans);
check_path(trans, &path, &u, iter.pos.snapshot);
})));
darray_exit(&path);
bch_err_fn(c, ret);

View File

@ -34,8 +34,7 @@ int bch2_extent_fallocate(struct btree_trans *trans,
struct open_buckets open_buckets = { 0 };
struct bkey_s_c k;
struct bkey_buf old, new;
unsigned sectors_allocated = 0;
bool have_reservation = false;
unsigned sectors_allocated = 0, new_replicas;
bool unwritten = opts.nocow &&
c->sb.version >= bcachefs_metadata_version_unwritten_extents;
int ret;
@ -50,28 +49,20 @@ int bch2_extent_fallocate(struct btree_trans *trans,
return ret;
sectors = min_t(u64, sectors, k.k->p.offset - iter->pos.offset);
new_replicas = max(0, (int) opts.data_replicas -
(int) bch2_bkey_nr_ptrs_fully_allocated(k));
if (!have_reservation) {
unsigned new_replicas =
max(0, (int) opts.data_replicas -
(int) bch2_bkey_nr_ptrs_fully_allocated(k));
/*
* Get a disk reservation before (in the nocow case) calling
* into the allocator:
*/
ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
if (unlikely(ret))
goto err;
/*
* Get a disk reservation before (in the nocow case) calling
* into the allocator:
*/
ret = bch2_disk_reservation_get(c, &disk_res, sectors, new_replicas, 0);
if (unlikely(ret))
goto err;
bch2_bkey_buf_reassemble(&old, c, k);
}
bch2_bkey_buf_reassemble(&old, c, k);
if (have_reservation) {
if (!bch2_extents_match(k, bkey_i_to_s_c(old.k)))
goto err;
bch2_key_resize(&new.k->k, sectors);
} else if (!unwritten) {
if (!unwritten) {
struct bkey_i_reservation *reservation;
bch2_bkey_buf_realloc(&new, c, sizeof(*reservation) / sizeof(u64));
@ -118,13 +109,16 @@ int bch2_extent_fallocate(struct btree_trans *trans,
ptr->unwritten = true;
}
have_reservation = true;
ret = bch2_extent_update(trans, inum, iter, new.k, &disk_res,
0, i_sectors_delta, true);
err:
if (!ret && sectors_allocated)
bch2_increment_clock(c, sectors_allocated, WRITE);
if (should_print_err(ret))
bch_err_inum_offset_ratelimited(c,
inum.inum,
iter->pos.offset << 9,
"%s(): error: %s", __func__, bch2_err_str(ret));
bch2_open_buckets_put(c, &open_buckets);
bch2_disk_reservation_put(c, &disk_res);

View File

@ -172,11 +172,13 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
int ret;
if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_promote))
return NULL;
return ERR_PTR(-BCH_ERR_nopromote_no_writes);
op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_NOFS);
if (!op)
op = kzalloc(sizeof(*op) + sizeof(struct bio_vec) * pages, GFP_KERNEL);
if (!op) {
ret = -BCH_ERR_nopromote_enomem;
goto err;
}
op->start_time = local_clock();
op->pos = pos;
@ -187,24 +189,29 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
*/
*rbio = kzalloc(sizeof(struct bch_read_bio) +
sizeof(struct bio_vec) * pages,
GFP_NOFS);
if (!*rbio)
GFP_KERNEL);
if (!*rbio) {
ret = -BCH_ERR_nopromote_enomem;
goto err;
}
rbio_init(&(*rbio)->bio, opts);
bio_init(&(*rbio)->bio, NULL, (*rbio)->bio.bi_inline_vecs, pages, 0);
if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9,
GFP_NOFS))
if (bch2_bio_alloc_pages(&(*rbio)->bio, sectors << 9, GFP_KERNEL)) {
ret = -BCH_ERR_nopromote_enomem;
goto err;
}
(*rbio)->bounce = true;
(*rbio)->split = true;
(*rbio)->kmalloc = true;
if (rhashtable_lookup_insert_fast(&c->promote_table, &op->hash,
bch_promote_params))
bch_promote_params)) {
ret = -BCH_ERR_nopromote_in_flight;
goto err;
}
bio = &op->write.op.wbio.bio;
bio_init(bio, NULL, bio->bi_inline_vecs, pages, 0);
@ -223,9 +230,8 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
* -BCH_ERR_ENOSPC_disk_reservation:
*/
if (ret) {
ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
bch_promote_params);
BUG_ON(ret);
BUG_ON(rhashtable_remove_fast(&c->promote_table, &op->hash,
bch_promote_params));
goto err;
}
@ -239,7 +245,7 @@ err:
*rbio = NULL;
kfree(op);
bch2_write_ref_put(c, BCH_WRITE_REF_promote);
return NULL;
return ERR_PTR(ret);
}
noinline
@ -274,10 +280,9 @@ static struct promote_op *promote_alloc(struct btree_trans *trans,
? BTREE_ID_reflink
: BTREE_ID_extents,
k, pos, pick, opts, sectors, rbio);
if (!promote) {
ret = -BCH_ERR_nopromote_enomem;
ret = PTR_ERR_OR_ZERO(promote);
if (ret)
goto nopromote;
}
*bounce = true;
*read_full = promote_full;

View File

@ -1461,6 +1461,10 @@ err:
op->flags |= BCH_WRITE_DONE;
if (ret < 0) {
bch_err_inum_offset_ratelimited(c,
op->pos.inode,
op->pos.offset << 9,
"%s(): error: %s", __func__, bch2_err_str(ret));
op->error = ret;
break;
}

View File

@ -62,6 +62,7 @@ EXPORT_SYMBOL_GPL(u128_div);
/**
* mean_and_variance_get_mean() - get mean from @s
* @s: mean and variance number of samples and their sums
*/
s64 mean_and_variance_get_mean(struct mean_and_variance s)
{
@ -71,6 +72,7 @@ EXPORT_SYMBOL_GPL(mean_and_variance_get_mean);
/**
* mean_and_variance_get_variance() - get variance from @s1
* @s1: mean and variance number of samples and sums
*
* see linked pdf equation 12.
*/
@ -89,6 +91,7 @@ EXPORT_SYMBOL_GPL(mean_and_variance_get_variance);
/**
* mean_and_variance_get_stddev() - get standard deviation from @s
* @s: mean and variance number of samples and their sums
*/
u32 mean_and_variance_get_stddev(struct mean_and_variance s)
{
@ -98,8 +101,8 @@ EXPORT_SYMBOL_GPL(mean_and_variance_get_stddev);
/**
* mean_and_variance_weighted_update() - exponentially weighted variant of mean_and_variance_update()
* @s1: ..
* @s2: ..
* @s: mean and variance number of samples and their sums
* @x: new value to include in the &mean_and_variance_weighted
*
* see linked pdf: function derived from equations 140-143 where alpha = 2^w.
* values are stored bitshifted for performance and added precision.
@ -129,6 +132,7 @@ EXPORT_SYMBOL_GPL(mean_and_variance_weighted_update);
/**
* mean_and_variance_weighted_get_mean() - get mean from @s
* @s: mean and variance number of samples and their sums
*/
s64 mean_and_variance_weighted_get_mean(struct mean_and_variance_weighted s)
{
@ -138,6 +142,7 @@ EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_mean);
/**
* mean_and_variance_weighted_get_variance() -- get variance from @s
* @s: mean and variance number of samples and their sums
*/
u64 mean_and_variance_weighted_get_variance(struct mean_and_variance_weighted s)
{
@ -148,6 +153,7 @@ EXPORT_SYMBOL_GPL(mean_and_variance_weighted_get_variance);
/**
* mean_and_variance_weighted_get_stddev() - get standard deviation from @s
* @s: mean and variance number of samples and their sums
*/
u32 mean_and_variance_weighted_get_stddev(struct mean_and_variance_weighted s)
{

View File

@ -177,7 +177,8 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
prt_str(&buf, "target=");
bch2_target_to_text(&buf, c, r->target);
prt_str(&buf, " compression=");
prt_str(&buf, bch2_compression_opts[r->compression]);
struct bch_compression_opt opt = __bch2_compression_decode(r->compression);
prt_str(&buf, bch2_compression_opts[opt.type]);
prt_str(&buf, " ");
bch2_bkey_val_to_text(&buf, c, k);

View File

@ -39,7 +39,7 @@
x(check_dirents, PASS_FSCK) \
x(check_xattrs, PASS_FSCK) \
x(check_root, PASS_ONLINE|PASS_FSCK) \
x(check_directory_structure, PASS_FSCK) \
x(check_directory_structure, PASS_ONLINE|PASS_FSCK) \
x(check_nlinks, PASS_FSCK) \
x(delete_dead_inodes, PASS_FSCK|PASS_UNCLEAN) \
x(fix_reflink_p, 0) \

View File

@ -310,7 +310,7 @@ static inline void check_indirect_extent_deleting(struct bkey_i *new, unsigned *
if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
new->k.type = KEY_TYPE_deleted;
new->k.size = 0;
set_bkey_val_u64s(&new->k, 0);;
set_bkey_val_u64s(&new->k, 0);
*flags &= ~BTREE_TRIGGER_INSERT;
}
}