Update bcachefs sources to 61ebcb532a bcachefs: Fix for allocating before backpointers have been checked

This commit is contained in:
Kent Overstreet 2022-10-24 11:34:07 -04:00
parent ae43a58d97
commit 980f7437e2
6 changed files with 59 additions and 13 deletions

View File

@ -1 +1 @@
70fa0c1ff48feba041a8243b1a52ee57cffe1e0e
61ebcb532a1266e5e36f354858b552e2a4fb9925

View File

@ -14,6 +14,7 @@
#include "bcachefs.h"
#include "alloc_background.h"
#include "alloc_foreground.h"
#include "backpointers.h"
#include "btree_iter.h"
#include "btree_update.h"
#include "btree_gc.h"
@ -333,6 +334,28 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
goto err;
}
if (!test_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags)) {
struct bch_backpointer bp;
u64 bp_offset = 0;
ret = bch2_get_next_backpointer(trans, POS(ca->dev_idx, b), -1,
&bp_offset, &bp, 0);
if (ret) {
ob = ERR_PTR(ret);
goto err;
}
if (bp_offset != U64_MAX) {
/*
* Bucket may have data in it - we don't call
* bc2h_trans_inconnsistent() because fsck hasn't
* finished yet
*/
ob = NULL;
goto err;
}
}
ob = __try_alloc_bucket(c, ca, b, reserve, &a,
skipped_open,
skipped_need_journal_commit,

View File

@ -1222,19 +1222,15 @@ struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
}
struct btree_path * __must_check
bch2_btree_path_set_pos(struct btree_trans *trans,
__bch2_btree_path_set_pos(struct btree_trans *trans,
struct btree_path *path, struct bpos new_pos,
bool intent, unsigned long ip)
bool intent, unsigned long ip, int cmp)
{
int cmp = bpos_cmp(new_pos, path->pos);
unsigned l = path->level;
EBUG_ON(trans->restarted);
EBUG_ON(!path->ref);
if (!cmp)
return path;
path = bch2_btree_path_make_mut(trans, path, intent, ip);
path->pos = new_pos;

View File

@ -146,8 +146,21 @@ bch2_btree_path_make_mut(struct btree_trans *trans,
}
struct btree_path * __must_check
bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
struct bpos, bool, unsigned long);
__bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
struct bpos, bool, unsigned long, int);
static inline struct btree_path * __must_check
bch2_btree_path_set_pos(struct btree_trans *trans,
struct btree_path *path, struct bpos new_pos,
bool intent, unsigned long ip)
{
int cmp = bpos_cmp(new_pos, path->pos);
return cmp
? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
: path;
}
int __must_check bch2_btree_path_traverse(struct btree_trans *,
struct btree_path *, unsigned);
struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,

View File

@ -1666,10 +1666,21 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
goto out;
}
/*
* XXX: per POSIX and fstests generic/275, on -ENOSPC we're
* supposed to write as much as we have disk space for.
*
* On failure here we should still write out a partial page if
* we aren't completely out of disk space - we don't do that
* yet:
*/
ret = bch2_page_reservation_get(c, inode, page, &res,
pg_offset, pg_len);
if (ret)
goto out;
if (unlikely(ret)) {
if (!reserved)
goto out;
break;
}
reserved += pg_len;
}
@ -1678,10 +1689,10 @@ static int __bch2_buffered_write(struct bch_inode_info *inode,
for (i = 0; i < nr_pages; i++)
flush_dcache_page(pages[i]);
while (copied < len) {
while (copied < reserved) {
struct page *page = pages[(offset + copied) >> PAGE_SHIFT];
unsigned pg_offset = (offset + copied) & (PAGE_SIZE - 1);
unsigned pg_len = min_t(unsigned, len - copied,
unsigned pg_len = min_t(unsigned, reserved - copied,
PAGE_SIZE - pg_offset);
unsigned pg_copied = copy_page_from_iter_atomic(page,
pg_offset, pg_len, iter);

View File

@ -325,6 +325,9 @@ int bch2_extent_update(struct btree_trans *trans,
new_inode->v.bi_size = cpu_to_le64(new_i_size);
le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta);
new_inode->k.p.snapshot = iter->snapshot;
ret = bch2_trans_update(trans, &inode_iter, &new_inode->k_i, 0);
if (unlikely(ret))
goto err;