Update bcachefs sources to 400f275d46 bcachefs: Fix check_overlapping_extents()

This commit is contained in:
Kent Overstreet 2023-05-13 00:13:57 -04:00
parent 6e89763956
commit 5ef62f56ab
7 changed files with 27 additions and 43 deletions

View File

@ -1 +1 @@
5074caad6a72d6e38c21a7e02f5e62048f2046d7
400f275d46228e0ca08d8c931a674a534db0f4fb

View File

@ -49,7 +49,7 @@
* six_trylock_type(lock, type)
* six_trylock_convert(lock, from, to)
*
* A lock may be held multiple types by the same thread (for read or intent,
* A lock may be held multiple times by the same thread (for read or intent,
* not write). However, the six locks code does _not_ implement the actual
* recursive checks itself though - rather, if your code (e.g. btree iterator
* code) knows that the current thread already has a lock held, and for the

View File

@ -1176,7 +1176,7 @@ static int check_overlapping_extents(struct btree_trans *trans,
goto err;
bkey_reassemble(update, k);
ret = bch2_trans_update_extent(trans, iter, update, 0);
if (!ret)
if (ret)
goto err;
}
}

View File

@ -1743,7 +1743,16 @@ void bch2_journal_write(struct closure *cl)
BUG_ON(u64s > j->entry_u64s_reserved);
le32_add_cpu(&jset->u64s, u64s);
BUG_ON(vstruct_sectors(jset, c->block_bits) > w->sectors);
sectors = vstruct_sectors(jset, c->block_bits);
bytes = vstruct_bytes(jset);
if (sectors > w->sectors) {
bch2_fs_fatal_error(c, "aieeee! journal write overran available space, %zu > %u (extra %u reserved %u/%u)",
vstruct_bytes(jset), w->sectors << 9,
u64s, w->u64s_reserved, j->entry_u64s_reserved);
goto err;
}
jset->magic = cpu_to_le64(jset_magic(c));
jset->version = c->sb.version < bcachefs_metadata_version_bkey_renumber
@ -1780,10 +1789,6 @@ void bch2_journal_write(struct closure *cl)
jset_validate(c, NULL, jset, 0, WRITE))
goto err;
sectors = vstruct_sectors(jset, c->block_bits);
BUG_ON(sectors > w->sectors);
bytes = vstruct_bytes(jset);
memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
retry_alloc:

View File

@ -2,6 +2,7 @@
#include "bcachefs.h"
#include "btree_update.h"
#include "errcode.h"
#include "error.h"
#include "inode.h"
#include "quota.h"
#include "subvolume.h"
@ -561,6 +562,9 @@ static int bch2_fs_quota_read_inode(struct btree_trans *trans,
ret = bch2_snapshot_tree_lookup(trans,
snapshot_t(c, k.k->p.snapshot)->tree, &s_t);
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
"%s: snapshot tree %u not found", __func__,
snapshot_t(c, k.k->p.snapshot)->tree);
if (ret)
return ret;

View File

@ -460,36 +460,11 @@ int bch2_replicas_delta_list_mark(struct bch_fs *c,
int bch2_replicas_gc_end(struct bch_fs *c, int ret)
{
unsigned i;
lockdep_assert_held(&c->replicas_gc_lock);
mutex_lock(&c->sb_lock);
percpu_down_write(&c->mark_lock);
/*
* this is kind of crappy; the replicas gc mechanism needs to be ripped
* out
*/
for (i = 0; i < c->replicas.nr; i++) {
struct bch_replicas_entry *e =
cpu_replicas_entry(&c->replicas, i);
struct bch_replicas_cpu n;
if (!__replicas_has_entry(&c->replicas_gc, e) &&
bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
n = cpu_replicas_add_entry(&c->replicas_gc, e);
if (!n.entries) {
ret = -BCH_ERR_ENOMEM_cpu_replicas;
goto err;
}
swap(n, c->replicas_gc);
kfree(n.entries);
}
}
ret = bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
if (ret)
goto err;

View File

@ -37,14 +37,8 @@ int bch2_snapshot_tree_invalid(const struct bch_fs *c, struct bkey_s_c k,
int bch2_snapshot_tree_lookup(struct btree_trans *trans, u32 id,
struct bch_snapshot_tree *s)
{
int ret;
ret = bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
if (bch2_err_matches(ret, ENOENT))
bch_err(trans->c, "snapshot tree %u not found", id);
return ret;
return bch2_bkey_get_val_typed(trans, BTREE_ID_snapshot_trees, POS(0, id),
BTREE_ITER_WITH_UPDATES, snapshot_tree, s);
}
static struct bkey_i_snapshot_tree *
@ -434,6 +428,8 @@ static int snapshot_tree_ptr_good(struct btree_trans *trans,
struct bch_snapshot_tree s_t;
int ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
if (bch2_err_matches(ret, ENOENT))
return 0;
if (ret)
return ret;
@ -467,10 +463,10 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans,
tree_id = le32_to_cpu(root.v->tree);
ret = bch2_snapshot_tree_lookup(trans, tree_id, &s_t);
if (ret)
if (ret && !bch2_err_matches(ret, ENOENT))
return ret;
if (le32_to_cpu(s_t.root_snapshot) != root_id) {
if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
u = bch2_bkey_make_mut_typed(trans, &root_iter, root.s_c, 0, snapshot);
ret = PTR_ERR_OR_ZERO(u) ?:
snapshot_tree_create(trans, root_id,
@ -664,6 +660,10 @@ static int check_subvol(struct btree_trans *trans,
struct bch_snapshot_tree st;
ret = bch2_snapshot_tree_lookup(trans, snapshot_tree, &st);
bch2_fs_inconsistent_on(bch2_err_matches(ret, ENOENT), c,
"%s: snapshot tree %u not found", __func__, snapshot_tree);
if (ret)
return ret;