mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to a8b3ce7599 fixup! bcachefs: Eliminate more PAGE_SIZE uses
This commit is contained in:
parent
816ec60516
commit
b422ff58ba
@ -1 +1 @@
|
||||
6a3927a96b2f362deccc7ee36e20e03f193a9e00
|
||||
a8b3ce75990057fc2e3a1c64310668e1ac9ed0f5
|
||||
|
@ -11,5 +11,6 @@ extern void memzero_explicit(void *, size_t);
|
||||
int match_string(const char * const *, size_t, const char *);
|
||||
|
||||
#define kstrndup(s, n, gfp) strndup(s, n)
|
||||
#define kstrdup(s, gfp) strdup(s)
|
||||
|
||||
#endif /* _LINUX_STRING_H_ */
|
||||
|
@ -716,6 +716,11 @@ DEFINE_EVENT(transaction_restart, trans_restart_iter_upgrade,
|
||||
TP_ARGS(ip)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(transaction_restart, trans_restart_relock,
|
||||
TP_PROTO(unsigned long ip),
|
||||
TP_ARGS(ip)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(transaction_restart, trans_restart_traverse,
|
||||
TP_PROTO(unsigned long ip),
|
||||
TP_ARGS(ip)
|
||||
|
@ -1104,7 +1104,8 @@ static int bch2_allocator_thread(void *arg)
|
||||
|
||||
pr_debug("free_inc now empty");
|
||||
|
||||
do {
|
||||
while (1) {
|
||||
cond_resched();
|
||||
/*
|
||||
* Find some buckets that we can invalidate, either
|
||||
* they're completely unused, or only contain clean data
|
||||
@ -1127,22 +1128,21 @@ static int bch2_allocator_thread(void *arg)
|
||||
wake_up_process(c->gc_thread);
|
||||
}
|
||||
|
||||
if (nr)
|
||||
break;
|
||||
|
||||
/*
|
||||
* If we found any buckets, we have to invalidate them
|
||||
* before we scan for more - but if we didn't find very
|
||||
* many we may want to wait on more buckets being
|
||||
* available so we don't spin:
|
||||
*/
|
||||
if (!nr ||
|
||||
(nr < ALLOC_SCAN_BATCH(ca) &&
|
||||
!fifo_empty(&ca->free[RESERVE_NONE]))) {
|
||||
ret = wait_buckets_available(c, ca);
|
||||
if (ret) {
|
||||
up_read(&c->gc_lock);
|
||||
goto stop;
|
||||
}
|
||||
}
|
||||
} while (!nr);
|
||||
|
||||
up_read(&c->gc_lock);
|
||||
|
||||
|
@ -188,7 +188,7 @@ static inline enum bset_aux_tree_type bset_aux_tree_type(const struct bset_tree
|
||||
* gets to the second cacheline.
|
||||
*/
|
||||
|
||||
#define BSET_CACHELINE 128
|
||||
#define BSET_CACHELINE 256
|
||||
|
||||
static inline size_t btree_keys_cachelines(const struct btree *b)
|
||||
{
|
||||
|
@ -214,7 +214,7 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
|
||||
if (bch2_verify_btree_ondisk)
|
||||
bch2_btree_node_write(c, b, SIX_LOCK_intent);
|
||||
else
|
||||
__bch2_btree_node_write(c, b, SIX_LOCK_read);
|
||||
__bch2_btree_node_write(c, b);
|
||||
|
||||
/* wait for any in flight btree write */
|
||||
btree_node_wait_on_io(b);
|
||||
@ -666,13 +666,9 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Unlock before doing IO:
|
||||
*
|
||||
* XXX: ideally should be dropping all btree node locks here
|
||||
*/
|
||||
if (iter && btree_node_read_locked(iter, level + 1))
|
||||
btree_node_unlock(iter, level + 1);
|
||||
/* Unlock before doing IO: */
|
||||
if (iter && sync)
|
||||
bch2_trans_unlock(iter->trans);
|
||||
|
||||
bch2_btree_node_read(c, b, sync);
|
||||
|
||||
@ -683,6 +679,16 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: this will probably always fail because btree_iter_relock()
|
||||
* currently fails for iterators that aren't pointed at a valid btree
|
||||
* node
|
||||
*/
|
||||
if (iter && !bch2_trans_relock(iter->trans)) {
|
||||
six_unlock_intent(&b->c.lock);
|
||||
return ERR_PTR(-EINTR);
|
||||
}
|
||||
|
||||
if (lock_type == SIX_LOCK_read)
|
||||
six_lock_downgrade(&b->c.lock);
|
||||
|
||||
@ -789,10 +795,23 @@ lock_node:
|
||||
}
|
||||
}
|
||||
|
||||
/* XXX: waiting on IO with btree locks held: */
|
||||
if (unlikely(btree_node_read_in_flight(b))) {
|
||||
six_unlock_type(&b->c.lock, lock_type);
|
||||
bch2_trans_unlock(iter->trans);
|
||||
|
||||
wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
|
||||
/*
|
||||
* XXX: check if this always fails - btree_iter_relock()
|
||||
* currently fails for iterators that aren't pointed at a valid
|
||||
* btree node
|
||||
*/
|
||||
if (iter && !bch2_trans_relock(iter->trans))
|
||||
return ERR_PTR(-EINTR);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
prefetch(b->aux_data);
|
||||
|
||||
for_each_bset(b, t) {
|
||||
|
@ -1158,8 +1158,6 @@ static int bch2_gc_btree_gens(struct bch_fs *c, enum btree_id btree_id)
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
bch2_extent_normalize(c, bkey_i_to_s(sk.k));
|
||||
|
||||
bch2_btree_iter_set_pos(iter, bkey_start_pos(&sk.k->k));
|
||||
|
||||
bch2_trans_update(&trans, iter, sk.k, 0);
|
||||
|
||||
ret = bch2_trans_commit(&trans, NULL, NULL,
|
||||
@ -1206,7 +1204,7 @@ int bch2_gc_gens(struct bch_fs *c)
|
||||
}
|
||||
|
||||
for (i = 0; i < BTREE_ID_NR; i++)
|
||||
if (btree_node_type_needs_gc(i)) {
|
||||
if ((1 << i) & BTREE_ID_HAS_PTRS) {
|
||||
ret = bch2_gc_btree_gens(c, i);
|
||||
if (ret) {
|
||||
bch_err(c, "error recalculating oldest_gen: %i", ret);
|
||||
|
@ -241,7 +241,6 @@ bool bch2_compact_whiteouts(struct bch_fs *c, struct btree *b,
|
||||
}
|
||||
|
||||
static void btree_node_sort(struct bch_fs *c, struct btree *b,
|
||||
struct btree_iter *iter,
|
||||
unsigned start_idx,
|
||||
unsigned end_idx,
|
||||
bool filter_whiteouts)
|
||||
@ -377,8 +376,7 @@ void bch2_btree_sort_into(struct bch_fs *c,
|
||||
* We're about to add another bset to the btree node, so if there's currently
|
||||
* too many bsets - sort some of them together:
|
||||
*/
|
||||
static bool btree_node_compact(struct bch_fs *c, struct btree *b,
|
||||
struct btree_iter *iter)
|
||||
static bool btree_node_compact(struct bch_fs *c, struct btree *b)
|
||||
{
|
||||
unsigned unwritten_idx;
|
||||
bool ret = false;
|
||||
@ -390,13 +388,13 @@ static bool btree_node_compact(struct bch_fs *c, struct btree *b,
|
||||
break;
|
||||
|
||||
if (b->nsets - unwritten_idx > 1) {
|
||||
btree_node_sort(c, b, iter, unwritten_idx,
|
||||
btree_node_sort(c, b, unwritten_idx,
|
||||
b->nsets, false);
|
||||
ret = true;
|
||||
}
|
||||
|
||||
if (unwritten_idx > 1) {
|
||||
btree_node_sort(c, b, iter, 0, unwritten_idx, false);
|
||||
btree_node_sort(c, b, 0, unwritten_idx, false);
|
||||
ret = true;
|
||||
}
|
||||
|
||||
@ -426,12 +424,30 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
struct btree_node_entry *bne;
|
||||
bool did_sort;
|
||||
bool reinit_iter = false;
|
||||
|
||||
EBUG_ON(!(b->c.lock.state.seq & 1));
|
||||
EBUG_ON(iter && iter->l[b->c.level].b != b);
|
||||
BUG_ON(bset_written(b, bset(b, &b->set[1])));
|
||||
|
||||
did_sort = btree_node_compact(c, b, iter);
|
||||
if (b->nsets == MAX_BSETS) {
|
||||
unsigned log_u64s[] = {
|
||||
ilog2(bset_u64s(&b->set[0])),
|
||||
ilog2(bset_u64s(&b->set[1])),
|
||||
ilog2(bset_u64s(&b->set[2])),
|
||||
};
|
||||
|
||||
if (log_u64s[1] >= (log_u64s[0] + log_u64s[2]) / 2) {
|
||||
bch2_btree_node_write(c, b, SIX_LOCK_write);
|
||||
reinit_iter = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (b->nsets == MAX_BSETS &&
|
||||
btree_node_compact(c, b))
|
||||
reinit_iter = true;
|
||||
|
||||
BUG_ON(b->nsets >= MAX_BSETS);
|
||||
|
||||
bne = want_new_bset(c, b);
|
||||
if (bne)
|
||||
@ -439,7 +455,7 @@ void bch2_btree_init_next(struct bch_fs *c, struct btree *b,
|
||||
|
||||
bch2_btree_build_aux_trees(b);
|
||||
|
||||
if (iter && did_sort)
|
||||
if (iter && reinit_iter)
|
||||
bch2_btree_iter_reinit_node(iter, b);
|
||||
}
|
||||
|
||||
@ -1321,15 +1337,20 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
enum six_lock_type lock_type_held)
|
||||
static void btree_write_submit(struct work_struct *work)
|
||||
{
|
||||
struct btree_write_bio *wbio = container_of(work, struct btree_write_bio, work);
|
||||
|
||||
bch2_submit_wbio_replicas(&wbio->wbio, wbio->wbio.c, BCH_DATA_btree, &wbio->key);
|
||||
}
|
||||
|
||||
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b)
|
||||
{
|
||||
struct btree_write_bio *wbio;
|
||||
struct bset_tree *t;
|
||||
struct bset *i;
|
||||
struct btree_node *bn = NULL;
|
||||
struct btree_node_entry *bne = NULL;
|
||||
struct bkey_buf k;
|
||||
struct bch_extent_ptr *ptr;
|
||||
struct sort_iter sort_iter;
|
||||
struct nonce nonce;
|
||||
@ -1340,8 +1361,6 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
bool validate_before_checksum = false;
|
||||
void *data;
|
||||
|
||||
bch2_bkey_buf_init(&k);
|
||||
|
||||
if (test_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags))
|
||||
return;
|
||||
|
||||
@ -1518,6 +1537,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
wbio_init(&wbio->wbio.bio);
|
||||
wbio->data = data;
|
||||
wbio->bytes = bytes;
|
||||
wbio->wbio.c = c;
|
||||
wbio->wbio.used_mempool = used_mempool;
|
||||
wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META;
|
||||
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
|
||||
@ -1540,9 +1560,9 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
* just make all btree node writes FUA to keep things sane.
|
||||
*/
|
||||
|
||||
bch2_bkey_buf_copy(&k, c, &b->key);
|
||||
bkey_copy(&wbio->key, &b->key);
|
||||
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(k.k)), ptr)
|
||||
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&wbio->key)), ptr)
|
||||
ptr->offset += b->written;
|
||||
|
||||
b->written += sectors_to_write;
|
||||
@ -1550,9 +1570,8 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
atomic64_inc(&c->btree_writes_nr);
|
||||
atomic64_add(sectors_to_write, &c->btree_writes_sectors);
|
||||
|
||||
/* XXX: submitting IO with btree locks held: */
|
||||
bch2_submit_wbio_replicas(&wbio->wbio, c, BCH_DATA_btree, k.k);
|
||||
bch2_bkey_buf_exit(&k, c);
|
||||
INIT_WORK(&wbio->work, btree_write_submit);
|
||||
schedule_work(&wbio->work);
|
||||
return;
|
||||
err:
|
||||
set_btree_node_noevict(b);
|
||||
@ -1592,7 +1611,7 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
|
||||
* single bset:
|
||||
*/
|
||||
if (b->nsets > 1) {
|
||||
btree_node_sort(c, b, NULL, 0, b->nsets, true);
|
||||
btree_node_sort(c, b, 0, b->nsets, true);
|
||||
invalidated_iter = true;
|
||||
} else {
|
||||
invalidated_iter = bch2_drop_whiteouts(b, COMPACT_ALL);
|
||||
@ -1624,11 +1643,10 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
|
||||
void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
enum six_lock_type lock_type_held)
|
||||
{
|
||||
BUG_ON(lock_type_held == SIX_LOCK_write);
|
||||
|
||||
if (lock_type_held == SIX_LOCK_intent ||
|
||||
six_lock_tryupgrade(&b->c.lock)) {
|
||||
__bch2_btree_node_write(c, b, SIX_LOCK_intent);
|
||||
(lock_type_held == SIX_LOCK_read &&
|
||||
six_lock_tryupgrade(&b->c.lock))) {
|
||||
__bch2_btree_node_write(c, b);
|
||||
|
||||
/* don't cycle lock unnecessarily: */
|
||||
if (btree_node_just_written(b) &&
|
||||
@ -1640,7 +1658,10 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||
if (lock_type_held == SIX_LOCK_read)
|
||||
six_lock_downgrade(&b->c.lock);
|
||||
} else {
|
||||
__bch2_btree_node_write(c, b, SIX_LOCK_read);
|
||||
__bch2_btree_node_write(c, b);
|
||||
if (lock_type_held == SIX_LOCK_write &&
|
||||
btree_node_just_written(b))
|
||||
bch2_btree_post_write_cleanup(c, b);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,6 +42,7 @@ struct btree_read_bio {
|
||||
|
||||
struct btree_write_bio {
|
||||
struct work_struct work;
|
||||
__BKEY_PADDED(key, BKEY_BTREE_PTR_VAL_U64s_MAX);
|
||||
void *data;
|
||||
unsigned bytes;
|
||||
struct bch_write_bio wbio;
|
||||
@ -144,8 +145,7 @@ void bch2_btree_complete_write(struct bch_fs *, struct btree *,
|
||||
struct btree_write *);
|
||||
void bch2_btree_write_error_work(struct work_struct *);
|
||||
|
||||
void __bch2_btree_node_write(struct bch_fs *, struct btree *,
|
||||
enum six_lock_type);
|
||||
void __bch2_btree_node_write(struct bch_fs *, struct btree *);
|
||||
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
|
||||
|
||||
void bch2_btree_node_write(struct bch_fs *, struct btree *,
|
||||
|
@ -465,8 +465,10 @@ bool bch2_trans_relock(struct btree_trans *trans)
|
||||
|
||||
trans_for_each_iter(trans, iter)
|
||||
if (btree_iter_keep(trans, iter) &&
|
||||
!bch2_btree_iter_relock(iter, true))
|
||||
!bch2_btree_iter_relock(iter, true)) {
|
||||
trace_trans_restart_relock(trans->ip);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1631,7 +1633,9 @@ static inline struct bkey_s_c __btree_iter_peek(struct btree_iter *iter, bool wi
|
||||
* iter->pos should be mononotically increasing, and always be equal to
|
||||
* the key we just returned - except extents can straddle iter->pos:
|
||||
*/
|
||||
if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
|
||||
if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
iter->pos = k.k->p;
|
||||
else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
|
||||
iter->pos = bkey_start_pos(k.k);
|
||||
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
|
@ -689,20 +689,16 @@ int bch2_fs_btree_key_cache_init(struct btree_key_cache *c)
|
||||
{
|
||||
int ret;
|
||||
|
||||
c->shrink.seeks = 1;
|
||||
c->shrink.count_objects = bch2_btree_key_cache_count;
|
||||
c->shrink.scan_objects = bch2_btree_key_cache_scan;
|
||||
|
||||
ret = register_shrinker(&c->shrink);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = rhashtable_init(&c->table, &bch2_btree_key_cache_params);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
c->table_init_done = true;
|
||||
return 0;
|
||||
|
||||
c->shrink.seeks = 1;
|
||||
c->shrink.count_objects = bch2_btree_key_cache_count;
|
||||
c->shrink.scan_objects = bch2_btree_key_cache_scan;
|
||||
return register_shrinker(&c->shrink);
|
||||
}
|
||||
|
||||
void bch2_btree_key_cache_to_text(struct printbuf *out, struct btree_key_cache *c)
|
||||
|
@ -614,6 +614,10 @@ static inline bool btree_iter_is_extents(struct btree_iter *iter)
|
||||
(1U << BTREE_ID_dirents)| \
|
||||
(1U << BTREE_ID_xattrs))
|
||||
|
||||
#define BTREE_ID_HAS_PTRS \
|
||||
((1U << BTREE_ID_extents)| \
|
||||
(1U << BTREE_ID_reflink))
|
||||
|
||||
static inline bool btree_type_has_snapshots(enum btree_id id)
|
||||
{
|
||||
return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
|
||||
|
@ -974,20 +974,25 @@ retry:
|
||||
* closure argument
|
||||
*/
|
||||
if (flags & BTREE_INSERT_NOUNLOCK) {
|
||||
trace_trans_restart_journal_preres_get(trans->ip);
|
||||
ret = -EINTR;
|
||||
goto err;
|
||||
}
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
if (flags & BTREE_INSERT_JOURNAL_RECLAIM)
|
||||
goto err;
|
||||
if (flags & BTREE_INSERT_JOURNAL_RECLAIM) {
|
||||
bch2_btree_update_free(as);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
ret = bch2_journal_preres_get(&c->journal, &as->journal_preres,
|
||||
BTREE_UPDATE_JOURNAL_RES,
|
||||
journal_flags);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
trace_trans_restart_journal_preres_get(trans->ip);
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!bch2_trans_relock(trans)) {
|
||||
ret = -EINTR;
|
||||
|
@ -256,13 +256,15 @@ static inline size_t bch_btree_keys_u64s_remaining(struct bch_fs *c,
|
||||
return remaining;
|
||||
}
|
||||
|
||||
#define BTREE_WRITE_SET_U64s_BITS 9
|
||||
|
||||
static inline unsigned btree_write_set_buffer(struct btree *b)
|
||||
{
|
||||
/*
|
||||
* Could buffer up larger amounts of keys for btrees with larger keys,
|
||||
* pending benchmarking:
|
||||
*/
|
||||
return 4 << 10;
|
||||
return 8 << BTREE_WRITE_SET_U64s_BITS;
|
||||
}
|
||||
|
||||
static inline struct btree_node_entry *want_new_bset(struct bch_fs *c,
|
||||
|
@ -639,6 +639,8 @@ static int journal_reclaim_wait_done(struct bch_fs *c)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
journal_reclaim_kick(&c->journal);
|
||||
|
||||
if (mutex_trylock(&c->journal.reclaim_lock)) {
|
||||
ret = bch2_journal_reclaim(&c->journal);
|
||||
mutex_unlock(&c->journal.reclaim_lock);
|
||||
|
@ -832,7 +832,7 @@ static int mark_stripe_bucket(struct bch_fs *c, struct bkey_s_c k,
|
||||
if (g->stripe && g->stripe != k.k->p.offset) {
|
||||
bch2_fs_inconsistent(c,
|
||||
"bucket %u:%zu gen %u: multiple stripes using same bucket\n%s",
|
||||
ptr->dev, PTR_BUCKET_NR(ca, ptr), new.gen,
|
||||
ptr->dev, PTR_BUCKET_NR(ca, ptr), g->mark.gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, k), buf));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -150,7 +150,7 @@ struct dump_iter {
|
||||
struct bch_fs *c;
|
||||
enum btree_id id;
|
||||
|
||||
char buf[PAGE_SIZE];
|
||||
char buf[1 << 12];
|
||||
size_t bytes; /* what's currently in buf */
|
||||
|
||||
char __user *ubuf; /* destination user buffer */
|
||||
@ -230,7 +230,7 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
|
||||
while (k.k && !(err = bkey_err(k))) {
|
||||
bch2_bkey_val_to_text(&PBUF(i->buf), i->c, k);
|
||||
i->bytes = strlen(i->buf);
|
||||
BUG_ON(i->bytes >= PAGE_SIZE);
|
||||
BUG_ON(i->bytes >= sizeof(i->buf));
|
||||
i->buf[i->bytes] = '\n';
|
||||
i->bytes++;
|
||||
|
||||
|
@ -84,16 +84,24 @@ const char *bch2_dirent_invalid(const struct bch_fs *c, struct bkey_s_c k)
|
||||
if (!len)
|
||||
return "empty name";
|
||||
|
||||
/*
|
||||
* older versions of bcachefs were buggy and creating dirent
|
||||
* keys that were bigger than necessary:
|
||||
*/
|
||||
if (bkey_val_u64s(k.k) > dirent_val_u64s(len + 7))
|
||||
if (bkey_val_u64s(k.k) > dirent_val_u64s(len))
|
||||
return "value too big";
|
||||
|
||||
if (len > BCH_NAME_MAX)
|
||||
return "dirent name too big";
|
||||
|
||||
if (len == 1 && !memcmp(d.v->d_name, ".", 1))
|
||||
return "invalid name";
|
||||
|
||||
if (len == 2 && !memcmp(d.v->d_name, "..", 2))
|
||||
return "invalid name";
|
||||
|
||||
if (memchr(d.v->d_name, '/', len))
|
||||
return "invalid name";
|
||||
|
||||
if (le64_to_cpu(d.v->d_inum) == d.k->p.inode)
|
||||
return "dirent points to own directory";
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -3,7 +3,6 @@
|
||||
#define _BCACHEFS_FSCK_H
|
||||
|
||||
int bch2_fsck_full(struct bch_fs *);
|
||||
int bch2_fsck_inode_nlink(struct bch_fs *);
|
||||
int bch2_fsck_walk_inodes_only(struct bch_fs *);
|
||||
|
||||
#endif /* _BCACHEFS_FSCK_H */
|
||||
|
@ -1005,6 +1005,13 @@ int bch2_fs_recovery(struct bch_fs *c)
|
||||
|
||||
}
|
||||
|
||||
if (!c->sb.clean &&
|
||||
!(c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) {
|
||||
bch_info(c, "BCH_FEATURE_atomic_nlink not set and filesystem dirty, fsck required");
|
||||
c->opts.fsck = true;
|
||||
c->opts.fix_errors = FSCK_OPT_YES;
|
||||
}
|
||||
|
||||
if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
|
||||
bch_info(c, "alloc_v2 feature bit not set, fsck required");
|
||||
c->opts.fsck = true;
|
||||
@ -1017,6 +1024,13 @@ int bch2_fs_recovery(struct bch_fs *c)
|
||||
set_bit(BCH_FS_REBUILD_REPLICAS, &c->flags);
|
||||
}
|
||||
|
||||
if (c->sb.version < bcachefs_metadata_version_inode_backpointers) {
|
||||
bch_info(c, "version prior to inode backpointers, upgrade and fsck required");
|
||||
c->opts.version_upgrade = true;
|
||||
c->opts.fsck = true;
|
||||
c->opts.fix_errors = FSCK_OPT_YES;
|
||||
}
|
||||
|
||||
ret = bch2_blacklist_table_initialize(c);
|
||||
if (ret) {
|
||||
bch_err(c, "error initializing blacklist table");
|
||||
@ -1179,25 +1193,6 @@ use_clean:
|
||||
bch_verbose(c, "alloc write done");
|
||||
}
|
||||
|
||||
if (!c->sb.clean) {
|
||||
if (!(c->sb.features & (1 << BCH_FEATURE_atomic_nlink))) {
|
||||
bch_info(c, "checking inode link counts");
|
||||
err = "error in recovery";
|
||||
ret = bch2_fsck_inode_nlink(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "check inodes done");
|
||||
|
||||
} else {
|
||||
bch_verbose(c, "checking for deleted inodes");
|
||||
err = "error in recovery";
|
||||
ret = bch2_fsck_walk_inodes_only(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "check inodes done");
|
||||
}
|
||||
}
|
||||
|
||||
if (c->opts.fsck) {
|
||||
bch_info(c, "starting fsck");
|
||||
err = "error in fsck";
|
||||
@ -1205,6 +1200,13 @@ use_clean:
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "fsck done");
|
||||
} else if (!c->sb.clean) {
|
||||
bch_verbose(c, "checking for deleted inodes");
|
||||
err = "error in recovery";
|
||||
ret = bch2_fsck_walk_inodes_only(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "check inodes done");
|
||||
}
|
||||
|
||||
if (enabled_qtypes(c)) {
|
||||
|
@ -50,8 +50,7 @@ static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb_handle *sb,
|
||||
unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
|
||||
unsigned sb_u64s = le32_to_cpu(sb->sb->u64s) + u64s - old_u64s;
|
||||
|
||||
BUG_ON(get_order(__vstruct_bytes(struct bch_sb, sb_u64s)) >
|
||||
sb->page_order);
|
||||
BUG_ON(__vstruct_bytes(struct bch_sb, sb_u64s) > sb->buffer_size);
|
||||
|
||||
if (!f && !u64s) {
|
||||
/* nothing to do: */
|
||||
@ -101,18 +100,23 @@ void bch2_free_super(struct bch_sb_handle *sb)
|
||||
if (!IS_ERR_OR_NULL(sb->bdev))
|
||||
blkdev_put(sb->bdev, sb->mode);
|
||||
|
||||
free_pages((unsigned long) sb->sb, sb->page_order);
|
||||
kfree(sb->sb);
|
||||
memset(sb, 0, sizeof(*sb));
|
||||
}
|
||||
|
||||
int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
|
||||
{
|
||||
size_t new_bytes = __vstruct_bytes(struct bch_sb, u64s);
|
||||
unsigned order = get_order(new_bytes);
|
||||
size_t new_buffer_size;
|
||||
struct bch_sb *new_sb;
|
||||
struct bio *bio;
|
||||
|
||||
if (sb->sb && sb->page_order >= order)
|
||||
if (sb->bdev)
|
||||
new_bytes = max_t(size_t, new_bytes, bdev_logical_block_size(sb->bdev));
|
||||
|
||||
new_buffer_size = roundup_pow_of_two(new_bytes);
|
||||
|
||||
if (sb->sb && sb->buffer_size >= new_buffer_size)
|
||||
return 0;
|
||||
|
||||
if (sb->have_layout) {
|
||||
@ -127,14 +131,15 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
|
||||
}
|
||||
}
|
||||
|
||||
if (sb->page_order >= order && sb->sb)
|
||||
if (sb->buffer_size >= new_buffer_size && sb->sb)
|
||||
return 0;
|
||||
|
||||
if (dynamic_fault("bcachefs:add:super_realloc"))
|
||||
return -ENOMEM;
|
||||
|
||||
if (sb->have_bio) {
|
||||
bio = bio_kmalloc(GFP_KERNEL, 1 << order);
|
||||
bio = bio_kmalloc(GFP_KERNEL,
|
||||
DIV_ROUND_UP(new_buffer_size, PAGE_SIZE));
|
||||
if (!bio)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -143,17 +148,12 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
|
||||
sb->bio = bio;
|
||||
}
|
||||
|
||||
new_sb = (void *) __get_free_pages(GFP_NOFS|__GFP_ZERO, order);
|
||||
new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO);
|
||||
if (!new_sb)
|
||||
return -ENOMEM;
|
||||
|
||||
if (sb->sb)
|
||||
memcpy(new_sb, sb->sb, PAGE_SIZE << sb->page_order);
|
||||
|
||||
free_pages((unsigned long) sb->sb, sb->page_order);
|
||||
sb->sb = new_sb;
|
||||
|
||||
sb->page_order = order;
|
||||
sb->buffer_size = new_buffer_size;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -475,7 +475,7 @@ reread:
|
||||
bio_set_dev(sb->bio, sb->bdev);
|
||||
sb->bio->bi_iter.bi_sector = offset;
|
||||
bio_set_op_attrs(sb->bio, REQ_OP_READ, REQ_SYNC|REQ_META);
|
||||
bch2_bio_map(sb->bio, sb->sb, PAGE_SIZE << sb->page_order);
|
||||
bch2_bio_map(sb->bio, sb->sb, sb->buffer_size);
|
||||
|
||||
if (submit_bio_wait(sb->bio))
|
||||
return "IO error";
|
||||
@ -492,7 +492,7 @@ reread:
|
||||
if (bytes > 512 << sb->sb->layout.sb_max_size_bits)
|
||||
return "Bad superblock: too big";
|
||||
|
||||
if (get_order(bytes) > sb->page_order) {
|
||||
if (bytes > sb->buffer_size) {
|
||||
if (bch2_sb_realloc(sb, le32_to_cpu(sb->sb->u64s)))
|
||||
return "cannot allocate memory";
|
||||
goto reread;
|
||||
@ -698,8 +698,12 @@ int bch2_write_super(struct bch_fs *c)
|
||||
const char *err;
|
||||
struct bch_devs_mask sb_written;
|
||||
bool wrote, can_mount_without_written, can_mount_with_written;
|
||||
unsigned degraded_flags = BCH_FORCE_IF_DEGRADED;
|
||||
int ret = 0;
|
||||
|
||||
if (c->opts.very_degraded)
|
||||
degraded_flags |= BCH_FORCE_IF_LOST;
|
||||
|
||||
lockdep_assert_held(&c->sb_lock);
|
||||
|
||||
closure_init_stack(cl);
|
||||
@ -770,13 +774,13 @@ int bch2_write_super(struct bch_fs *c)
|
||||
nr_wrote = dev_mask_nr(&sb_written);
|
||||
|
||||
can_mount_with_written =
|
||||
bch2_have_enough_devs(c, sb_written, BCH_FORCE_IF_DEGRADED, false);
|
||||
bch2_have_enough_devs(c, sb_written, degraded_flags, false);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(sb_written.d); i++)
|
||||
sb_written.d[i] = ~sb_written.d[i];
|
||||
|
||||
can_mount_without_written =
|
||||
bch2_have_enough_devs(c, sb_written, BCH_FORCE_IF_DEGRADED, false);
|
||||
bch2_have_enough_devs(c, sb_written, degraded_flags, false);
|
||||
|
||||
/*
|
||||
* If we would be able to mount _without_ the devices we successfully
|
||||
|
@ -516,8 +516,7 @@ static void __bch2_fs_free(struct bch_fs *c)
|
||||
if (c->wq)
|
||||
destroy_workqueue(c->wq);
|
||||
|
||||
free_pages((unsigned long) c->disk_sb.sb,
|
||||
c->disk_sb.page_order);
|
||||
bch2_free_super(&c->disk_sb);
|
||||
kvpfree(c, sizeof(*c));
|
||||
module_put(THIS_MODULE);
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ struct bch_sb_handle {
|
||||
struct bch_sb *sb;
|
||||
struct block_device *bdev;
|
||||
struct bio *bio;
|
||||
unsigned page_order;
|
||||
size_t buffer_size;
|
||||
fmode_t mode;
|
||||
unsigned have_layout:1;
|
||||
unsigned have_bio:1;
|
||||
|
@ -154,7 +154,7 @@ void bch2_flags_to_text(struct printbuf *out,
|
||||
u64 bch2_read_flag_list(char *opt, const char * const list[])
|
||||
{
|
||||
u64 ret = 0;
|
||||
char *p, *s, *d = kstrndup(opt, PAGE_SIZE - 1, GFP_KERNEL);
|
||||
char *p, *s, *d = kstrdup(opt, GFP_KERNEL);
|
||||
|
||||
if (!d)
|
||||
return -ENOMEM;
|
||||
|
Loading…
Reference in New Issue
Block a user