mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-12-08 00:00:12 +03:00
Update bcachefs sources to 156233ad4b90 bcachefs: kill replicas gc
Some checks failed
build / bcachefs-tools-msrv (push) Has been cancelled
.deb build orchestrator / source-only (push) Has been cancelled
.deb build orchestrator / obs (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:forky], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:forky], map[build-arch:amd64 host-arch:ppc64el machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:forky], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:trixie], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:trixie], map[build-arch:amd64 host-arch:ppc64el machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:trixie], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:unstable], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:unstable], map[build-arch:amd64 host-arch:ppc64el machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:unstable], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:plucky], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:plucky], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:questing], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:questing], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / reprotest (push) Has been cancelled
.deb build orchestrator / publish (push) Has been cancelled
Nix Flake actions / nix-matrix (push) Has been cancelled
Nix Flake actions / ${{ matrix.name }} (${{ matrix.system }}) (push) Has been cancelled
Some checks failed
build / bcachefs-tools-msrv (push) Has been cancelled
.deb build orchestrator / source-only (push) Has been cancelled
.deb build orchestrator / obs (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:forky], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:forky], map[build-arch:amd64 host-arch:ppc64el machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:forky], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:trixie], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:trixie], map[build-arch:amd64 host-arch:ppc64el machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:trixie], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:unstable], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:unstable], map[build-arch:amd64 host-arch:ppc64el machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:unstable], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:plucky], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:plucky], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:questing], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:questing], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / reprotest (push) Has been cancelled
.deb build orchestrator / publish (push) Has been cancelled
Nix Flake actions / nix-matrix (push) Has been cancelled
Nix Flake actions / ${{ matrix.name }} (${{ matrix.system }}) (push) Has been cancelled
This commit is contained in:
parent
223ecf11f0
commit
b101c5201a
@ -1 +1 @@
|
||||
7604cb70e5909aed4647acf0199c60ac5776dc7c
|
||||
156233ad4b9043c04d1b6b0c1b244ae2af38c52e
|
||||
|
||||
@ -74,7 +74,8 @@ static int create_or_update_link(struct bch_fs *c,
|
||||
struct bch_inode_unpacked *dir,
|
||||
const char *name, subvol_inum inum, mode_t mode)
|
||||
{
|
||||
struct bch_hash_info dir_hash = bch2_hash_info_init(c, dir);
|
||||
struct bch_hash_info dir_hash;
|
||||
try(bch2_hash_info_init(c, dir, &dir_hash));
|
||||
|
||||
struct qstr qstr = QSTR(name);
|
||||
struct bch_inode_unpacked dir_u;
|
||||
@ -109,14 +110,16 @@ static struct bch_inode_unpacked create_or_update_file(struct bch_fs *c,
|
||||
uid_t uid, gid_t gid,
|
||||
mode_t mode, dev_t rdev)
|
||||
{
|
||||
struct bch_hash_info dir_hash = bch2_hash_info_init(c, dir);
|
||||
struct bch_hash_info dir_hash;
|
||||
int ret = bch2_hash_info_init(c, dir, &dir_hash);
|
||||
if (ret)
|
||||
die("hash_info_init() error: %s", bch2_err_str(ret));
|
||||
|
||||
struct qstr qname = QSTR(name);
|
||||
struct bch_inode_unpacked child_inode;
|
||||
subvol_inum child_inum;
|
||||
|
||||
int ret = bch2_dirent_lookup(c, dir_inum, &dir_hash,
|
||||
&qname, &child_inum);
|
||||
ret = bch2_dirent_lookup(c, dir_inum, &dir_hash, &qname, &child_inum);
|
||||
if (!ret) {
|
||||
/* Already exists, update */
|
||||
|
||||
@ -191,8 +194,6 @@ static void copy_times(struct bch_fs *c, struct bch_inode_unpacked *dst,
|
||||
static void copy_xattrs(struct bch_fs *c, struct bch_inode_unpacked *dst,
|
||||
char *src)
|
||||
{
|
||||
struct bch_hash_info hash_info = bch2_hash_info_init(c, dst);
|
||||
|
||||
char attrs[XATTR_LIST_MAX];
|
||||
ssize_t attrs_size = llistxattr(src, attrs, sizeof(attrs));
|
||||
if (attrs_size < 0)
|
||||
@ -217,7 +218,7 @@ static void copy_xattrs(struct bch_fs *c, struct bch_inode_unpacked *dst,
|
||||
int ret = bch2_trans_commit_do(c, NULL, NULL, 0,
|
||||
bch2_xattr_set(trans,
|
||||
(subvol_inum) { 1, dst->bi_inum },
|
||||
dst, &hash_info, attr,
|
||||
dst, attr,
|
||||
val, val_size, h->flags, 0));
|
||||
if (ret < 0)
|
||||
die("error creating xattr: %s", bch2_err_str(ret));
|
||||
@ -630,7 +631,8 @@ static int simple_readdir(struct bch_fs *c,
|
||||
{
|
||||
darray_init(dirents);
|
||||
|
||||
struct bch_hash_info hash_info = bch2_hash_info_init(c, dir);
|
||||
struct bch_hash_info hash_info;
|
||||
try(bch2_hash_info_init(c, dir, &hash_info));
|
||||
struct readdir_out dst_dirents = { .ctx.actor = readdir_actor, .dirents = dirents };
|
||||
|
||||
int ret = bch2_readdir(c, dir_inum, &hash_info, &dst_dirents.ctx);
|
||||
|
||||
@ -16,25 +16,40 @@ DEFINE_CLASS(bch_replicas_cpu, struct bch_replicas_cpu,
|
||||
kfree(_T.entries),
|
||||
(struct bch_replicas_cpu) {}, void)
|
||||
|
||||
static inline struct bch_replicas_entry_v1 *
|
||||
static inline struct bch_replicas_entry_cpu *
|
||||
cpu_replicas_entry(struct bch_replicas_cpu *r, unsigned i)
|
||||
{
|
||||
return (void *) r->entries + r->entry_size * i;
|
||||
}
|
||||
|
||||
static inline unsigned __cpu_replicas_entry_bytes(unsigned v1_bytes)
|
||||
{
|
||||
return offsetof(struct bch_replicas_entry_cpu, e) + v1_bytes;
|
||||
}
|
||||
|
||||
static inline unsigned cpu_replicas_entry_bytes(struct bch_replicas_entry_cpu *e)
|
||||
{
|
||||
return __cpu_replicas_entry_bytes(replicas_entry_bytes(&e->e));
|
||||
}
|
||||
|
||||
#define for_each_cpu_replicas_entry(_r, _i) \
|
||||
for (struct bch_replicas_entry_v1 *_i = (_r)->entries; \
|
||||
for (struct bch_replicas_entry_cpu *_i = (_r)->entries; \
|
||||
(void *) (_i) < (void *) (_r)->entries + (_r)->nr * (_r)->entry_size; \
|
||||
_i = (void *) (_i) + (_r)->entry_size)
|
||||
|
||||
static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
|
||||
struct bch_replicas_cpu *);
|
||||
|
||||
/* Some (buggy!) compilers don't allow memcmp to be passed as a pointer */
|
||||
static int bch2_memcmp(const void *l, const void *r, const void *priv)
|
||||
static int cpu_replicas_entry_cmp(const struct bch_replicas_entry_cpu *l,
|
||||
const struct bch_replicas_entry_cpu *r,
|
||||
size_t size)
|
||||
{
|
||||
size_t size = (size_t) priv;
|
||||
return memcmp(l, r, size);
|
||||
return memcmp(&l->e, &r->e, size - offsetof(struct bch_replicas_entry_cpu, e));
|
||||
}
|
||||
|
||||
static int cpu_replicas_entry_cmp_r(const void *l, const void *r, const void *priv)
|
||||
{
|
||||
return cpu_replicas_entry_cmp(l, r, (size_t) priv);
|
||||
}
|
||||
|
||||
/* Replicas tracking - in memory: */
|
||||
@ -60,7 +75,8 @@ void bch2_replicas_entry_sort(struct bch_replicas_entry_v1 *e)
|
||||
static void bch2_cpu_replicas_sort(struct bch_replicas_cpu *r)
|
||||
{
|
||||
eytzinger0_sort_r(r->entries, r->nr, r->entry_size,
|
||||
bch2_memcmp, NULL, (void *)(size_t)r->entry_size);
|
||||
cpu_replicas_entry_cmp_r, NULL,
|
||||
(void *)(size_t)r->entry_size);
|
||||
}
|
||||
|
||||
static void bch2_replicas_entry_v0_to_text(struct printbuf *out,
|
||||
@ -85,6 +101,13 @@ void bch2_replicas_entry_to_text(struct printbuf *out,
|
||||
prt_printf(out, "]");
|
||||
}
|
||||
|
||||
static void bch2_replicas_entry_cpu_to_text(struct printbuf *out,
|
||||
struct bch_replicas_entry_cpu *e)
|
||||
{
|
||||
prt_printf(out, "ref=%u ", atomic_read(&e->ref));
|
||||
bch2_replicas_entry_to_text(out, &e->e);
|
||||
}
|
||||
|
||||
__printf(3, 4)
|
||||
static int replicas_entry_invalid(struct bch_replicas_entry_v1 *r,
|
||||
struct printbuf *err,
|
||||
@ -148,7 +171,7 @@ void bch2_cpu_replicas_to_text(struct printbuf *out,
|
||||
prt_printf(out, " ");
|
||||
first = false;
|
||||
|
||||
bch2_replicas_entry_to_text(out, i);
|
||||
bch2_replicas_entry_cpu_to_text(out, i);
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,6 +252,44 @@ void bch2_devlist_to_replicas(struct bch_replicas_entry_v1 *e,
|
||||
bch2_replicas_entry_sort(e);
|
||||
}
|
||||
|
||||
/* @l is bch_replicas_entry_v1, @r is bch_replicas_entry_cpu */
|
||||
static int replicas_entry_search_cmp(const void *_l, const void *_r, const void *priv)
|
||||
{
|
||||
const struct bch_replicas_entry_v1 *l = _l;
|
||||
const struct bch_replicas_entry_cpu *r = _r;
|
||||
size_t size = (size_t) priv;
|
||||
|
||||
return memcmp(l, &r->e, size);
|
||||
}
|
||||
|
||||
static inline struct bch_replicas_entry_cpu *
|
||||
replicas_entry_search(struct bch_replicas_cpu *r,
|
||||
struct bch_replicas_entry_v1 *search)
|
||||
{
|
||||
verify_replicas_entry(search);
|
||||
|
||||
size_t entry_size = replicas_entry_bytes(search);
|
||||
int idx = likely(__cpu_replicas_entry_bytes(entry_size) <= r->entry_size)
|
||||
? eytzinger0_find_r(r->entries, r->nr, r->entry_size,
|
||||
replicas_entry_search_cmp,
|
||||
(void *) entry_size, search)
|
||||
: -1;
|
||||
return idx >= 0 ? cpu_replicas_entry(r, idx) : NULL;
|
||||
}
|
||||
|
||||
bool bch2_replicas_marked_locked(struct bch_fs *c,
|
||||
struct bch_replicas_entry_v1 *search)
|
||||
{
|
||||
return !search->nr_devs || replicas_entry_search(&c->replicas, search);
|
||||
}
|
||||
|
||||
bool bch2_replicas_marked(struct bch_fs *c,
|
||||
struct bch_replicas_entry_v1 *search)
|
||||
{
|
||||
guard(percpu_read)(&c->mark_lock);
|
||||
return bch2_replicas_marked_locked(c, search);
|
||||
}
|
||||
|
||||
static struct bch_replicas_cpu
|
||||
cpu_replicas_add_entry(struct bch_fs *c,
|
||||
struct bch_replicas_cpu *old,
|
||||
@ -237,9 +298,12 @@ cpu_replicas_add_entry(struct bch_fs *c,
|
||||
struct bch_replicas_cpu new = {
|
||||
.nr = old->nr + 1,
|
||||
.entry_size = max_t(unsigned, old->entry_size,
|
||||
replicas_entry_bytes(new_entry)),
|
||||
__cpu_replicas_entry_bytes(replicas_entry_bytes(new_entry))),
|
||||
};
|
||||
|
||||
/* alignment */
|
||||
new.entry_size = round_up(new.entry_size, sizeof(atomic_t));
|
||||
|
||||
new.entries = kcalloc(new.nr, new.entry_size, GFP_KERNEL);
|
||||
if (!new.entries)
|
||||
return new;
|
||||
@ -249,7 +313,7 @@ cpu_replicas_add_entry(struct bch_fs *c,
|
||||
cpu_replicas_entry(old, i),
|
||||
old->entry_size);
|
||||
|
||||
memcpy(cpu_replicas_entry(&new, old->nr),
|
||||
memcpy(&cpu_replicas_entry(&new, old->nr)->e,
|
||||
new_entry,
|
||||
replicas_entry_bytes(new_entry));
|
||||
|
||||
@ -257,152 +321,56 @@ cpu_replicas_add_entry(struct bch_fs *c,
|
||||
return new;
|
||||
}
|
||||
|
||||
static inline struct bch_replicas_entry_v1 *
|
||||
replicas_entry_search(struct bch_replicas_cpu *r,
|
||||
struct bch_replicas_entry_v1 *search)
|
||||
{
|
||||
verify_replicas_entry(search);
|
||||
|
||||
size_t entry_size = replicas_entry_bytes(search);
|
||||
int idx = likely(entry_size <= r->entry_size)
|
||||
? eytzinger0_find_r(r->entries, r->nr, r->entry_size,
|
||||
bch2_memcmp, (void *) entry_size, search)
|
||||
: -1;
|
||||
return idx >= 0 ? cpu_replicas_entry(r, idx) : NULL;
|
||||
}
|
||||
|
||||
bool bch2_replicas_marked_locked(struct bch_fs *c,
|
||||
struct bch_replicas_entry_v1 *search)
|
||||
{
|
||||
return !search->nr_devs ||
|
||||
(replicas_entry_search(&c->replicas, search) &&
|
||||
(likely((!c->replicas_gc.entries)) ||
|
||||
replicas_entry_search(&c->replicas_gc, search)));
|
||||
}
|
||||
|
||||
bool bch2_replicas_marked(struct bch_fs *c,
|
||||
struct bch_replicas_entry_v1 *search)
|
||||
{
|
||||
guard(percpu_read)(&c->mark_lock);
|
||||
return bch2_replicas_marked_locked(c, search);
|
||||
}
|
||||
|
||||
noinline
|
||||
static int bch2_mark_replicas_slowpath(struct bch_fs *c,
|
||||
struct bch_replicas_entry_v1 *new_entry)
|
||||
struct bch_replicas_entry_v1 *new_entry,
|
||||
unsigned ref)
|
||||
{
|
||||
verify_replicas_entry(new_entry);
|
||||
|
||||
CLASS(bch_replicas_cpu, new_r)();
|
||||
CLASS(bch_replicas_cpu, new_gc)();
|
||||
|
||||
guard(mutex)(&c->sb_lock);
|
||||
bool write_sb = false;
|
||||
|
||||
if (c->replicas_gc.entries &&
|
||||
!replicas_entry_search(&c->replicas_gc, new_entry)) {
|
||||
new_gc = cpu_replicas_add_entry(c, &c->replicas_gc, new_entry);
|
||||
if (!new_gc.entries)
|
||||
return bch_err_throw(c, ENOMEM_cpu_replicas);
|
||||
}
|
||||
|
||||
if (!replicas_entry_search(&c->replicas, new_entry)) {
|
||||
new_r = cpu_replicas_add_entry(c, &c->replicas, new_entry);
|
||||
if (!new_r.entries)
|
||||
return bch_err_throw(c, ENOMEM_cpu_replicas);
|
||||
|
||||
try(bch2_cpu_replicas_to_sb_replicas(c, &new_r));
|
||||
}
|
||||
|
||||
if (!new_r.entries &&
|
||||
!new_gc.entries)
|
||||
return 0;
|
||||
|
||||
/* allocations done, now commit: */
|
||||
|
||||
if (new_r.entries)
|
||||
bch2_write_super(c);
|
||||
|
||||
/* don't update in memory replicas until changes are persistent */
|
||||
scoped_guard(percpu_write, &c->mark_lock) {
|
||||
if (new_r.entries)
|
||||
if (!replicas_entry_search(&c->replicas, new_entry)) {
|
||||
CLASS(bch_replicas_cpu, new_r)();
|
||||
|
||||
new_r = cpu_replicas_add_entry(c, &c->replicas, new_entry);
|
||||
if (!new_r.entries)
|
||||
return bch_err_throw(c, ENOMEM_cpu_replicas);
|
||||
|
||||
try(bch2_cpu_replicas_to_sb_replicas(c, &new_r));
|
||||
|
||||
swap(c->replicas, new_r);
|
||||
if (new_gc.entries)
|
||||
swap(new_gc, c->replicas_gc);
|
||||
write_sb = true;
|
||||
}
|
||||
|
||||
atomic_add(ref, &replicas_entry_search(&c->replicas, new_entry)->ref);
|
||||
}
|
||||
|
||||
/* After dropping mark_lock */
|
||||
if (write_sb)
|
||||
bch2_write_super(c);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_mark_replicas(struct bch_fs *c, struct bch_replicas_entry_v1 *r)
|
||||
{
|
||||
return likely(bch2_replicas_marked(c, r))
|
||||
? 0 : bch2_mark_replicas_slowpath(c, r);
|
||||
? 0 : bch2_mark_replicas_slowpath(c, r, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Old replicas_gc mechanism: only used for journal replicas entries now, should
|
||||
* die at some point:
|
||||
*/
|
||||
|
||||
int bch2_replicas_gc_end(struct bch_fs *c, int ret)
|
||||
static void __replicas_entry_kill(struct bch_fs *c, struct bch_replicas_entry_cpu *e)
|
||||
{
|
||||
lockdep_assert_held(&c->replicas_gc_lock);
|
||||
struct bch_replicas_cpu *r = &c->replicas;
|
||||
|
||||
guard(mutex)(&c->sb_lock);
|
||||
scoped_guard(percpu_write, &c->mark_lock) {
|
||||
ret = ret ?:
|
||||
bch2_cpu_replicas_to_sb_replicas(c, &c->replicas_gc);
|
||||
if (!ret)
|
||||
swap(c->replicas, c->replicas_gc);
|
||||
memcpy(e, cpu_replicas_entry(r, --r->nr), r->entry_size);
|
||||
bch2_cpu_replicas_sort(r);
|
||||
|
||||
kfree(c->replicas_gc.entries);
|
||||
c->replicas_gc.entries = NULL;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
bch2_write_super(c);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_replicas_gc_start(struct bch_fs *c, unsigned typemask)
|
||||
{
|
||||
lockdep_assert_held(&c->replicas_gc_lock);
|
||||
|
||||
guard(mutex)(&c->sb_lock);
|
||||
BUG_ON(c->replicas_gc.entries);
|
||||
|
||||
c->replicas_gc.nr = 0;
|
||||
c->replicas_gc.entry_size = 0;
|
||||
|
||||
for_each_cpu_replicas_entry(&c->replicas, e) {
|
||||
/* Preserve unknown data types */
|
||||
if (e->data_type >= BCH_DATA_NR ||
|
||||
!(BIT(e->data_type) & typemask)) {
|
||||
c->replicas_gc.nr++;
|
||||
c->replicas_gc.entry_size =
|
||||
max_t(unsigned, c->replicas_gc.entry_size,
|
||||
replicas_entry_bytes(e));
|
||||
}
|
||||
}
|
||||
|
||||
c->replicas_gc.entries = kcalloc(c->replicas_gc.nr,
|
||||
c->replicas_gc.entry_size,
|
||||
GFP_KERNEL);
|
||||
if (!c->replicas_gc.entries) {
|
||||
bch_err(c, "error allocating c->replicas_gc");
|
||||
return bch_err_throw(c, ENOMEM_replicas_gc);
|
||||
}
|
||||
|
||||
unsigned i = 0;
|
||||
for_each_cpu_replicas_entry(&c->replicas, e)
|
||||
if (e->data_type >= BCH_DATA_NR ||
|
||||
!(BIT(e->data_type) & typemask))
|
||||
memcpy(cpu_replicas_entry(&c->replicas_gc, i++),
|
||||
e, c->replicas_gc.entry_size);
|
||||
|
||||
bch2_cpu_replicas_sort(&c->replicas_gc);
|
||||
return 0;
|
||||
int ret = bch2_cpu_replicas_to_sb_replicas(c, r);
|
||||
if (WARN(ret, "bch2_cpu_replicas_to_sb_replicas() error: %s", bch2_err_str(ret)))
|
||||
return;
|
||||
}
|
||||
|
||||
void bch2_replicas_entry_kill(struct bch_fs *c, struct bch_replicas_entry_v1 *kill)
|
||||
@ -410,18 +378,110 @@ void bch2_replicas_entry_kill(struct bch_fs *c, struct bch_replicas_entry_v1 *ki
|
||||
lockdep_assert_held(&c->mark_lock);
|
||||
lockdep_assert_held(&c->sb_lock);
|
||||
|
||||
struct bch_replicas_cpu *r = &c->replicas;
|
||||
struct bch_replicas_entry_cpu *e = replicas_entry_search(&c->replicas, kill);
|
||||
|
||||
struct bch_replicas_entry_v1 *e = replicas_entry_search(&c->replicas, kill);
|
||||
if (WARN(!e, "replicas entry not found in sb"))
|
||||
return;
|
||||
|
||||
memcpy(e, cpu_replicas_entry(r, --r->nr), r->entry_size);
|
||||
__replicas_entry_kill(c, e);
|
||||
|
||||
bch2_cpu_replicas_sort(r);
|
||||
/* caller does write_super() after dropping mark_lock */
|
||||
}
|
||||
|
||||
int ret = bch2_cpu_replicas_to_sb_replicas(c, r);
|
||||
WARN(ret, "bch2_cpu_replicas_to_sb_replicas() error: %s", bch2_err_str(ret));
|
||||
static inline int __replicas_entry_put(struct bch_fs *c, struct bch_replicas_entry_v1 *r, unsigned nr)
|
||||
{
|
||||
struct bch_replicas_entry_cpu *e = replicas_entry_search(&c->replicas, r);
|
||||
if (!e)
|
||||
return -1;
|
||||
|
||||
int v = atomic_sub_return(nr, &e->ref);
|
||||
if (v < 0)
|
||||
return -1;
|
||||
return !v;
|
||||
}
|
||||
|
||||
void bch2_replicas_entry_put_many(struct bch_fs *c, struct bch_replicas_entry_v1 *r, unsigned nr)
|
||||
{
|
||||
if (!r->nr_devs)
|
||||
return;
|
||||
|
||||
BUG_ON(r->data_type != BCH_DATA_journal);
|
||||
verify_replicas_entry(r);
|
||||
|
||||
scoped_guard(percpu_read, &c->mark_lock) {
|
||||
int ret = __replicas_entry_put(c, r, nr);
|
||||
if (!ret)
|
||||
return;
|
||||
if (unlikely(ret < 0)) {
|
||||
CLASS(printbuf, buf)();
|
||||
bch2_replicas_entry_to_text(&buf, r);
|
||||
WARN(1, "refcount error putting %s", buf.buf);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
guard(mutex)(&c->sb_lock);
|
||||
scoped_guard(percpu_write, &c->mark_lock) {
|
||||
struct bch_replicas_entry_cpu *e = replicas_entry_search(&c->replicas, r);
|
||||
if (e && !atomic_read(&e->ref))
|
||||
__replicas_entry_kill(c, e);
|
||||
}
|
||||
|
||||
bch2_write_super(c);
|
||||
}
|
||||
|
||||
static inline bool bch2_replicas_entry_get_inmem(struct bch_fs *c, struct bch_replicas_entry_v1 *r)
|
||||
{
|
||||
guard(percpu_read)(&c->mark_lock);
|
||||
struct bch_replicas_entry_cpu *e = replicas_entry_search(&c->replicas, r);
|
||||
if (e)
|
||||
atomic_inc(&e->ref);
|
||||
return e != NULL;
|
||||
}
|
||||
|
||||
int bch2_replicas_entry_get(struct bch_fs *c, struct bch_replicas_entry_v1 *r)
|
||||
{
|
||||
if (!r->nr_devs)
|
||||
return 0;
|
||||
|
||||
BUG_ON(r->data_type != BCH_DATA_journal);
|
||||
verify_replicas_entry(r);
|
||||
|
||||
return bch2_replicas_entry_get_inmem(c, r)
|
||||
? 0
|
||||
: bch2_mark_replicas_slowpath(c, r, 1);
|
||||
}
|
||||
|
||||
int bch2_replicas_gc_reffed(struct bch_fs *c)
|
||||
{
|
||||
bool write_sb = false;
|
||||
|
||||
guard(mutex)(&c->sb_lock);
|
||||
|
||||
scoped_guard(percpu_write, &c->mark_lock) {
|
||||
unsigned dst = 0;
|
||||
for (unsigned i = 0; i < c->replicas.nr; i++) {
|
||||
struct bch_replicas_entry_cpu *e =
|
||||
cpu_replicas_entry(&c->replicas, i);
|
||||
|
||||
if (e->e.data_type != BCH_DATA_journal ||
|
||||
atomic_read(&e->ref))
|
||||
memcpy(cpu_replicas_entry(&c->replicas, dst++),
|
||||
e,
|
||||
c->replicas.entry_size);
|
||||
}
|
||||
|
||||
if (c->replicas.nr != dst) {
|
||||
c->replicas.nr = dst;
|
||||
bch2_cpu_replicas_sort(&c->replicas);
|
||||
|
||||
try(bch2_cpu_replicas_to_sb_replicas(c, &c->replicas));
|
||||
}
|
||||
}
|
||||
|
||||
if (write_sb)
|
||||
bch2_write_super(c);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Replicas tracking - superblock: */
|
||||
@ -438,6 +498,9 @@ __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
|
||||
nr++;
|
||||
}
|
||||
|
||||
entry_size = __cpu_replicas_entry_bytes(entry_size);
|
||||
entry_size = round_up(entry_size, sizeof(atomic_t));
|
||||
|
||||
cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
|
||||
if (!cpu_r->entries)
|
||||
return -BCH_ERR_ENOMEM_cpu_replicas;
|
||||
@ -445,10 +508,10 @@ __bch2_sb_replicas_to_cpu_replicas(struct bch_sb_field_replicas *sb_r,
|
||||
cpu_r->nr = nr;
|
||||
cpu_r->entry_size = entry_size;
|
||||
|
||||
for_each_replicas_entry(sb_r, e) {
|
||||
struct bch_replicas_entry_v1 *dst = cpu_replicas_entry(cpu_r, idx++);
|
||||
memcpy(dst, e, replicas_entry_bytes(e));
|
||||
bch2_replicas_entry_sort(dst);
|
||||
for_each_replicas_entry(sb_r, src) {
|
||||
struct bch_replicas_entry_cpu *dst = cpu_replicas_entry(cpu_r, idx++);
|
||||
memcpy(&dst->e, src, replicas_entry_bytes(src));
|
||||
bch2_replicas_entry_sort(&dst->e);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -466,9 +529,13 @@ __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
|
||||
nr++;
|
||||
}
|
||||
|
||||
entry_size = __cpu_replicas_entry_bytes(entry_size);
|
||||
|
||||
entry_size += sizeof(struct bch_replicas_entry_v1) -
|
||||
sizeof(struct bch_replicas_entry_v0);
|
||||
|
||||
entry_size = round_up(entry_size, sizeof(atomic_t));
|
||||
|
||||
cpu_r->entries = kcalloc(nr, entry_size, GFP_KERNEL);
|
||||
if (!cpu_r->entries)
|
||||
return -BCH_ERR_ENOMEM_cpu_replicas;
|
||||
@ -477,14 +544,14 @@ __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
|
||||
cpu_r->entry_size = entry_size;
|
||||
|
||||
for_each_replicas_entry(sb_r, src) {
|
||||
struct bch_replicas_entry_v1 *dst =
|
||||
struct bch_replicas_entry_cpu *dst =
|
||||
cpu_replicas_entry(cpu_r, idx++);
|
||||
|
||||
dst->data_type = src->data_type;
|
||||
dst->nr_devs = src->nr_devs;
|
||||
dst->nr_required = 1;
|
||||
memcpy(dst->devs, src->devs, src->nr_devs);
|
||||
bch2_replicas_entry_sort(dst);
|
||||
dst->e.data_type = src->data_type;
|
||||
dst->e.nr_devs = src->nr_devs;
|
||||
dst->e.nr_required = 1;
|
||||
memcpy(dst->e.devs, src->devs, src->nr_devs);
|
||||
bch2_replicas_entry_sort(&dst->e);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -492,6 +559,12 @@ __bch2_sb_replicas_v0_to_cpu_replicas(struct bch_sb_field_replicas_v0 *sb_r,
|
||||
|
||||
int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *c)
|
||||
{
|
||||
/*
|
||||
* If called after fs is started (after journal read), we'll be blowing
|
||||
* away refcounts
|
||||
*/
|
||||
BUG_ON(test_bit(BCH_FS_started, &c->flags));
|
||||
|
||||
struct bch_sb_field_replicas *sb_v1;
|
||||
struct bch_sb_field_replicas_v0 *sb_v0;
|
||||
CLASS(bch_replicas_cpu, new_r)();
|
||||
@ -519,7 +592,7 @@ static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
|
||||
bytes = sizeof(struct bch_sb_field_replicas);
|
||||
|
||||
for_each_cpu_replicas_entry(r, src)
|
||||
bytes += replicas_entry_bytes(src) - 1;
|
||||
bytes += replicas_entry_bytes(&src->e) - 1;
|
||||
|
||||
sb_r = bch2_sb_field_resize(&c->disk_sb, replicas_v0,
|
||||
DIV_ROUND_UP(bytes, sizeof(u64)));
|
||||
@ -535,9 +608,9 @@ static int bch2_cpu_replicas_to_sb_replicas_v0(struct bch_fs *c,
|
||||
|
||||
dst = sb_r->entries;
|
||||
for_each_cpu_replicas_entry(r, src) {
|
||||
dst->data_type = src->data_type;
|
||||
dst->nr_devs = src->nr_devs;
|
||||
memcpy(dst->devs, src->devs, src->nr_devs);
|
||||
dst->data_type = src->e.data_type;
|
||||
dst->nr_devs = src->e.nr_devs;
|
||||
memcpy(dst->devs, src->e.devs, src->e.nr_devs);
|
||||
|
||||
dst = replicas_entry_next(dst);
|
||||
|
||||
@ -558,8 +631,8 @@ static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
|
||||
bytes = sizeof(struct bch_sb_field_replicas);
|
||||
|
||||
for_each_cpu_replicas_entry(r, src) {
|
||||
bytes += replicas_entry_bytes(src);
|
||||
if (src->nr_required != 1)
|
||||
bytes += replicas_entry_bytes(&src->e);
|
||||
if (src->e.nr_required != 1)
|
||||
need_v1 = true;
|
||||
}
|
||||
|
||||
@ -580,7 +653,7 @@ static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
|
||||
|
||||
dst = sb_r->entries;
|
||||
for_each_cpu_replicas_entry(r, src) {
|
||||
memcpy(dst, src, replicas_entry_bytes(src));
|
||||
memcpy(dst, &src->e, replicas_entry_bytes(&src->e));
|
||||
|
||||
dst = replicas_entry_next(dst);
|
||||
|
||||
@ -599,24 +672,26 @@ static int bch2_cpu_replicas_validate(struct bch_replicas_cpu *cpu_r,
|
||||
sort_r(cpu_r->entries,
|
||||
cpu_r->nr,
|
||||
cpu_r->entry_size,
|
||||
bch2_memcmp, NULL,
|
||||
cpu_replicas_entry_cmp_r, NULL,
|
||||
(void *)(size_t)cpu_r->entry_size);
|
||||
|
||||
for (i = 0; i < cpu_r->nr; i++) {
|
||||
struct bch_replicas_entry_v1 *e =
|
||||
struct bch_replicas_entry_cpu *e =
|
||||
cpu_replicas_entry(cpu_r, i);
|
||||
|
||||
try(bch2_replicas_entry_sb_validate(e, sb, err));
|
||||
try(bch2_replicas_entry_sb_validate(&e->e, sb, err));
|
||||
|
||||
if (i + 1 < cpu_r->nr) {
|
||||
struct bch_replicas_entry_v1 *n =
|
||||
struct bch_replicas_entry_cpu *n =
|
||||
cpu_replicas_entry(cpu_r, i + 1);
|
||||
|
||||
BUG_ON(memcmp(e, n, cpu_r->entry_size) > 0);
|
||||
int cmp = cpu_replicas_entry_cmp(e, n, cpu_r->entry_size);
|
||||
|
||||
if (!memcmp(e, n, cpu_r->entry_size)) {
|
||||
BUG_ON(cmp > 0);
|
||||
|
||||
if (!cmp) {
|
||||
prt_printf(err, "duplicate replicas entry ");
|
||||
bch2_replicas_entry_to_text(err, e);
|
||||
bch2_replicas_entry_to_text(err, &e->e);
|
||||
return -BCH_ERR_invalid_sb_replicas;
|
||||
}
|
||||
}
|
||||
@ -699,7 +774,9 @@ bool bch2_can_read_fs_with_devs(struct bch_fs *c, struct bch_devs_mask devs,
|
||||
unsigned flags, struct printbuf *err)
|
||||
{
|
||||
guard(percpu_read)(&c->mark_lock);
|
||||
for_each_cpu_replicas_entry(&c->replicas, e) {
|
||||
for_each_cpu_replicas_entry(&c->replicas, i) {
|
||||
struct bch_replicas_entry_v1 *e = &i->e;
|
||||
|
||||
unsigned nr_online = 0, nr_failed = 0, dflags = 0;
|
||||
bool metadata = e->data_type < BCH_DATA_user;
|
||||
|
||||
@ -817,6 +894,25 @@ bool bch2_have_enough_devs(struct bch_fs *c, struct bch_devs_mask devs,
|
||||
return bch2_can_read_fs_with_devs(c, devs, flags, err);
|
||||
}
|
||||
|
||||
bool bch2_sb_has_journal(struct bch_sb *sb)
|
||||
{
|
||||
struct bch_sb_field_replicas *replicas = bch2_sb_field_get(sb, replicas);
|
||||
struct bch_sb_field_replicas_v0 *replicas_v0 = bch2_sb_field_get(sb, replicas_v0);
|
||||
|
||||
if (replicas) {
|
||||
for_each_replicas_entry(replicas, r)
|
||||
if (r->data_type == BCH_DATA_journal)
|
||||
return true;
|
||||
} else if (replicas_v0) {
|
||||
for_each_replicas_entry(replicas_v0, r)
|
||||
if (r->data_type == BCH_DATA_journal)
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
unsigned bch2_sb_dev_has_data(struct bch_sb *sb, unsigned dev)
|
||||
{
|
||||
struct bch_sb_field_replicas *replicas;
|
||||
@ -857,8 +953,17 @@ unsigned bch2_dev_has_data(struct bch_fs *c, struct bch_dev *ca)
|
||||
return bch2_sb_dev_has_data(c->disk_sb.sb, ca->dev_idx);
|
||||
}
|
||||
|
||||
void bch2_verify_replicas_refs_clean(struct bch_fs *c)
|
||||
{
|
||||
for_each_cpu_replicas_entry(&c->replicas, i)
|
||||
if (atomic_read(&i->ref)) {
|
||||
CLASS(printbuf, buf)();
|
||||
bch2_replicas_entry_cpu_to_text(&buf, i);
|
||||
WARN(1, "replicas entry ref leaked:\n%s", buf.buf);
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_fs_replicas_exit(struct bch_fs *c)
|
||||
{
|
||||
kfree(c->replicas.entries);
|
||||
kfree(c->replicas_gc.entries);
|
||||
}
|
||||
|
||||
@ -39,13 +39,22 @@ bool bch2_can_read_fs_with_devs(struct bch_fs *, struct bch_devs_mask,
|
||||
bool bch2_have_enough_devs(struct bch_fs *, struct bch_devs_mask,
|
||||
unsigned, struct printbuf *, bool);
|
||||
|
||||
bool bch2_sb_has_journal(struct bch_sb *);
|
||||
unsigned bch2_sb_dev_has_data(struct bch_sb *, unsigned);
|
||||
unsigned bch2_dev_has_data(struct bch_fs *, struct bch_dev *);
|
||||
|
||||
int bch2_replicas_gc_end(struct bch_fs *, int);
|
||||
int bch2_replicas_gc_start(struct bch_fs *, unsigned);
|
||||
void bch2_replicas_entry_put_many(struct bch_fs *, struct bch_replicas_entry_v1 *, unsigned);
|
||||
static inline void bch2_replicas_entry_put(struct bch_fs *c, struct bch_replicas_entry_v1 *r)
|
||||
{
|
||||
bch2_replicas_entry_put_many(c, r, 1);
|
||||
}
|
||||
|
||||
int bch2_replicas_entry_get(struct bch_fs *, struct bch_replicas_entry_v1 *);
|
||||
|
||||
void bch2_replicas_entry_kill(struct bch_fs *, struct bch_replicas_entry_v1 *);
|
||||
|
||||
int bch2_replicas_gc_reffed(struct bch_fs *);
|
||||
|
||||
static inline bool bch2_replicas_entry_has_dev(struct bch_replicas_entry_v1 *r, unsigned dev)
|
||||
{
|
||||
for (unsigned i = 0; i < r->nr_devs; i++)
|
||||
@ -54,6 +63,12 @@ static inline bool bch2_replicas_entry_has_dev(struct bch_replicas_entry_v1 *r,
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool bch2_replicas_entry_eq(struct bch_replicas_entry_v1 *l,
|
||||
struct bch_replicas_entry_v1 *r)
|
||||
{
|
||||
return l->nr_devs == r->nr_devs && !memcmp(l, r, replicas_entry_bytes(l));
|
||||
}
|
||||
|
||||
/* iterate over superblock replicas - used by userspace tools: */
|
||||
|
||||
#define replicas_entry_next(_i) \
|
||||
@ -69,6 +84,7 @@ int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *);
|
||||
extern const struct bch_sb_field_ops bch_sb_field_ops_replicas;
|
||||
extern const struct bch_sb_field_ops bch_sb_field_ops_replicas_v0;
|
||||
|
||||
void bch2_verify_replicas_refs_clean(struct bch_fs *);
|
||||
void bch2_fs_replicas_exit(struct bch_fs *);
|
||||
|
||||
#endif /* _BCACHEFS_REPLICAS_H */
|
||||
|
||||
@ -2,10 +2,16 @@
|
||||
#ifndef _BCACHEFS_REPLICAS_TYPES_H
|
||||
#define _BCACHEFS_REPLICAS_TYPES_H
|
||||
|
||||
/* unsized - bch_replicas_entry_v1 is variable length */
|
||||
struct bch_replicas_entry_cpu {
|
||||
atomic_t ref;
|
||||
struct bch_replicas_entry_v1 e;
|
||||
};
|
||||
|
||||
struct bch_replicas_cpu {
|
||||
unsigned nr;
|
||||
unsigned entry_size;
|
||||
struct bch_replicas_entry_v1 *entries;
|
||||
unsigned nr;
|
||||
unsigned entry_size;
|
||||
struct bch_replicas_entry_cpu *entries;
|
||||
};
|
||||
|
||||
union bch_replicas_padded {
|
||||
|
||||
@ -667,6 +667,7 @@ struct bch_dev {
|
||||
x(btree_running) \
|
||||
x(accounting_replay_done) \
|
||||
x(may_go_rw) \
|
||||
x(may_upgrade_downgrade) \
|
||||
x(rw) \
|
||||
x(rw_init_done) \
|
||||
x(was_rw) \
|
||||
@ -811,8 +812,6 @@ struct bch_fs {
|
||||
struct bch_accounting_mem accounting;
|
||||
|
||||
struct bch_replicas_cpu replicas;
|
||||
struct bch_replicas_cpu replicas_gc;
|
||||
struct mutex replicas_gc_lock;
|
||||
|
||||
struct journal_entry_res btree_root_journal_res;
|
||||
struct journal_entry_res clock_journal_res;
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
|
||||
#include "btree/cache.h"
|
||||
#include "btree/iter.h"
|
||||
#include "btree/journal_overlay.h"
|
||||
#include "btree/key_cache.h"
|
||||
#include "btree/locking.h"
|
||||
#include "btree/update.h"
|
||||
@ -243,6 +244,7 @@ static int btree_key_cache_create(struct btree_trans *trans,
|
||||
ck->key.btree_id = ck_path->btree_id;
|
||||
ck->key.pos = ck_path->pos;
|
||||
ck->flags = 1U << BKEY_CACHED_ACCESSED;
|
||||
ck->needs_immediate_flush = false;
|
||||
|
||||
if (unlikely(key_u64s > ck->u64s)) {
|
||||
mark_btree_node_locked_noreset(ck_path, 0, BTREE_NODE_UNLOCKED);
|
||||
@ -318,6 +320,7 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
struct bch_fs *c = trans->c;
|
||||
bool needs_immediate_flush = false;
|
||||
|
||||
CLASS(btree_iter, iter)(trans, ck_path->btree_id, ck_path->pos,
|
||||
BTREE_ITER_intent|
|
||||
@ -327,14 +330,31 @@ static noinline int btree_key_cache_fill(struct btree_trans *trans,
|
||||
iter.flags &= ~BTREE_ITER_with_journal;
|
||||
struct bkey_s_c k = bkey_try(bch2_btree_iter_peek_slot(&iter));
|
||||
|
||||
/* Recheck after btree lookup, before allocating: */
|
||||
ck_path = trans->paths + ck_path_idx;
|
||||
|
||||
if (unlikely(trans->journal_replay_not_finished && bkey_deleted(k.k))) {
|
||||
size_t idx = 0;
|
||||
const struct bkey_i *jk =
|
||||
bch2_journal_keys_peek_max(trans->c, ck_path->btree_id, 0,
|
||||
ck_path->pos, ck_path->pos, &idx);
|
||||
if (jk) {
|
||||
k = bkey_i_to_s_c(jk);
|
||||
needs_immediate_flush = true;
|
||||
}
|
||||
}
|
||||
|
||||
/* Recheck after btree lookup, before allocating: */
|
||||
int ret = bch2_btree_key_cache_find(c, ck_path->btree_id, ck_path->pos) ? -EEXIST : 0;
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
|
||||
try(btree_key_cache_create(trans, btree_iter_path(trans, &iter), ck_path, k));
|
||||
|
||||
if (unlikely(needs_immediate_flush)) {
|
||||
struct bkey_cached *ck = (void *) ck_path->l[0].b;
|
||||
ck->needs_immediate_flush = true;
|
||||
}
|
||||
|
||||
if (trace_key_cache_fill_enabled())
|
||||
do_trace_key_cache_fill(trans, ck_path, k);
|
||||
out:
|
||||
@ -447,7 +467,7 @@ static int btree_key_cache_flush_pos(struct btree_trans *trans,
|
||||
|
||||
struct bkey_s_c btree_k = bkey_try(bch2_btree_iter_peek_slot(&b_iter));
|
||||
|
||||
/* * Check that we're not violating cache coherency rules: */
|
||||
/* Check that we're not violating cache coherency rules: */
|
||||
BUG_ON(bkey_deleted(btree_k.k));
|
||||
|
||||
try(bch2_trans_update(trans, &b_iter, ck->k,
|
||||
|
||||
@ -403,6 +403,7 @@ struct bkey_cached {
|
||||
|
||||
unsigned long flags;
|
||||
u16 u64s;
|
||||
bool needs_immediate_flush:1;
|
||||
struct bkey_cached_key key;
|
||||
|
||||
struct rhash_head hash;
|
||||
|
||||
@ -416,6 +416,12 @@ static noinline int flush_new_cached_update(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool key_cache_needs_flush(struct btree_path *path)
|
||||
{
|
||||
struct bkey_cached *ck = (void *) path->l[0].b;
|
||||
return ck->needs_immediate_flush;
|
||||
}
|
||||
|
||||
static int __must_check
|
||||
bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
|
||||
struct bkey_i *k, enum btree_iter_update_trigger_flags flags,
|
||||
@ -430,7 +436,9 @@ bch2_trans_update_by_path(struct btree_trans *trans, btree_path_idx_t path_idx,
|
||||
* the key cache - but the key has to exist in the btree for that to
|
||||
* work:
|
||||
*/
|
||||
return i->cached && (!i->old_btree_u64s || bkey_deleted(&k->k))
|
||||
return i->cached && (!i->old_btree_u64s ||
|
||||
bkey_deleted(&k->k) ||
|
||||
key_cache_needs_flush(trans->paths + path_idx))
|
||||
? flush_new_cached_update(trans, i, flags, ip)
|
||||
: 0;
|
||||
}
|
||||
|
||||
@ -124,22 +124,15 @@ void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
|
||||
|
||||
static struct page *__bio_alloc_page_pool(struct bch_fs *c, bool *using_mempool)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
if (likely(!*using_mempool)) {
|
||||
page = alloc_page(GFP_NOFS);
|
||||
if (unlikely(!page)) {
|
||||
mutex_lock(&c->bio_bounce_pages_lock);
|
||||
*using_mempool = true;
|
||||
goto pool_alloc;
|
||||
struct page *page = alloc_page(GFP_NOFS);
|
||||
if (likely(page))
|
||||
return page;
|
||||
|
||||
}
|
||||
} else {
|
||||
pool_alloc:
|
||||
page = mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
|
||||
mutex_lock(&c->bio_bounce_pages_lock);
|
||||
*using_mempool = true;
|
||||
}
|
||||
|
||||
return page;
|
||||
return mempool_alloc(&c->bio_bounce_pages, GFP_NOFS);
|
||||
}
|
||||
|
||||
void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
|
||||
|
||||
@ -238,6 +238,8 @@
|
||||
x(EOPNOTSUPP, no_casefolding_without_utf8) \
|
||||
x(EOPNOTSUPP, casefolding_disabled) \
|
||||
x(EOPNOTSUPP, casefold_opt_is_dir_only) \
|
||||
x(EOPNOTSUPP, casefolding_in_use) \
|
||||
x(EOPNOTSUPP, casefold_dir_but_disabled) \
|
||||
x(EOPNOTSUPP, unsupported_fsx_flag) \
|
||||
x(EOPNOTSUPP, unsupported_fa_flag) \
|
||||
x(EOPNOTSUPP, unsupported_fallocate_mode) \
|
||||
|
||||
@ -276,16 +276,18 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
|
||||
{
|
||||
struct bch_inode_info *inode = to_bch_ei(vinode);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
|
||||
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
|
||||
|
||||
if (rcu)
|
||||
return ERR_PTR(-ECHILD);
|
||||
|
||||
struct bch_hash_info hash;
|
||||
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
|
||||
|
||||
CLASS(btree_trans, trans)(c);
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
struct bkey_s_c k;
|
||||
int ret = lockrestart_do(trans,
|
||||
int ret = bch2_hash_info_init(c, &inode->ei_inode, &hash) ?:
|
||||
lockrestart_do(trans,
|
||||
bkey_err(k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
|
||||
&hash, inode_inum(inode), &search, 0)));
|
||||
if (ret)
|
||||
@ -305,13 +307,14 @@ int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
|
||||
struct bch_inode_unpacked *inode_u,
|
||||
struct posix_acl *acl, int type)
|
||||
{
|
||||
struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode_u);
|
||||
int ret;
|
||||
struct bch_hash_info hash_info;
|
||||
try(bch2_hash_info_init(trans->c, inode_u, &hash_info));
|
||||
|
||||
if (type == ACL_TYPE_DEFAULT &&
|
||||
!S_ISDIR(inode_u->bi_mode))
|
||||
return acl ? -EACCES : 0;
|
||||
|
||||
int ret;
|
||||
if (acl) {
|
||||
struct bkey_i_xattr *xattr =
|
||||
bch2_acl_to_xattr(trans, acl, type);
|
||||
@ -377,7 +380,9 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
|
||||
umode_t mode,
|
||||
struct posix_acl **new_acl)
|
||||
{
|
||||
struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
|
||||
struct bch_hash_info hash_info;
|
||||
try(bch2_hash_info_init(trans->c, inode, &hash_info));
|
||||
|
||||
struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
|
||||
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
|
||||
@ -153,14 +153,14 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
|
||||
};
|
||||
|
||||
struct bch_inode_unpacked root_inode;
|
||||
struct bch_hash_info root_hash_info;
|
||||
ret = bch2_inode_find_by_inum_snapshot(trans, root_inum.inum, snapshot, &root_inode, 0);
|
||||
bch_err_msg(c, ret, "looking up root inode %llu for subvol %u",
|
||||
root_inum.inum, subvolid);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
root_hash_info = bch2_hash_info_init(c, &root_inode);
|
||||
struct bch_hash_info root_hash_info;
|
||||
try(bch2_hash_info_init(c, &root_inode, &root_hash_info));
|
||||
|
||||
ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum,
|
||||
&lostfound_str, &inum, &d_type, snapshot);
|
||||
@ -338,7 +338,8 @@ int bch2_reattach_inode(struct btree_trans *trans, struct bch_inode_unpacked *in
|
||||
|
||||
try(__bch2_fsck_write_inode(trans, &lostfound));
|
||||
|
||||
struct bch_hash_info dir_hash = bch2_hash_info_init(c, &lostfound);
|
||||
struct bch_hash_info dir_hash;
|
||||
try(bch2_hash_info_init(c, &lostfound, &dir_hash));
|
||||
struct qstr name = QSTR(name_buf);
|
||||
|
||||
inode->bi_dir = lostfound.bi_inum;
|
||||
@ -1590,7 +1591,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
return 0;
|
||||
|
||||
if (dir->first_this_inode)
|
||||
*hash_info = bch2_hash_info_init(c, &i->inode);
|
||||
try(bch2_hash_info_init(c, &i->inode, hash_info));
|
||||
dir->first_this_inode = false;
|
||||
|
||||
hash_info->cf_encoding = bch2_inode_casefold(c, &i->inode) ? c->cf_encoding : NULL;
|
||||
@ -1724,7 +1725,7 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
|
||||
return 0;
|
||||
|
||||
if (inode->first_this_inode)
|
||||
*hash_info = bch2_hash_info_init(c, &i->inode);
|
||||
try(bch2_hash_info_init(c, &i->inode, hash_info));
|
||||
inode->first_this_inode = false;
|
||||
|
||||
bool need_second_pass = false;
|
||||
|
||||
@ -675,7 +675,8 @@ int bch2_fsck_remove_dirent(struct btree_trans *trans, struct bpos pos)
|
||||
struct bch_inode_unpacked dir_inode;
|
||||
try(lookup_first_inode(trans, pos.inode, &dir_inode));
|
||||
|
||||
struct bch_hash_info dir_hash_info = bch2_hash_info_init(c, &dir_inode);
|
||||
struct bch_hash_info dir_hash_info;
|
||||
(bch2_hash_info_init(c, &dir_inode, &dir_hash_info));
|
||||
|
||||
CLASS(btree_iter, iter)(trans, BTREE_ID_dirents, pos, BTREE_ITER_intent);
|
||||
|
||||
|
||||
@ -129,13 +129,13 @@ int bch2_create_trans(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (!(flags & BCH_CREATE_TMPFILE)) {
|
||||
struct bch_hash_info dir_hash = bch2_hash_info_init(c, dir_u);
|
||||
u64 dir_offset;
|
||||
struct bch_hash_info dir_hash;
|
||||
try(bch2_hash_info_init(c, dir_u, &dir_hash));
|
||||
|
||||
if (is_subdir_for_nlink(new_inode))
|
||||
dir_u->bi_nlink++;
|
||||
dir_u->bi_nlink += is_subdir_for_nlink(new_inode);
|
||||
dir_u->bi_mtime = dir_u->bi_ctime = now;
|
||||
|
||||
u64 dir_offset;
|
||||
try(bch2_dirent_create(trans, dir, &dir_hash,
|
||||
dir_type,
|
||||
name,
|
||||
@ -176,7 +176,6 @@ int bch2_link_trans(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(btree_iter_uninit, dir_iter)(trans);
|
||||
CLASS(btree_iter_uninit, inode_iter)(trans);
|
||||
struct bch_hash_info dir_hash;
|
||||
u64 now = bch2_current_time(c);
|
||||
u64 dir_offset = 0;
|
||||
|
||||
@ -195,7 +194,8 @@ int bch2_link_trans(struct btree_trans *trans,
|
||||
|
||||
dir_u->bi_mtime = dir_u->bi_ctime = now;
|
||||
|
||||
dir_hash = bch2_hash_info_init(c, dir_u);
|
||||
struct bch_hash_info dir_hash;
|
||||
try(bch2_hash_info_init(c, dir_u, &dir_hash));
|
||||
|
||||
try(bch2_dirent_create(trans, dir, &dir_hash,
|
||||
mode_to_type(inode_u->bi_mode),
|
||||
@ -227,7 +227,8 @@ int bch2_unlink_trans(struct btree_trans *trans,
|
||||
|
||||
try(bch2_inode_peek(trans, &dir_iter, dir_u, dir, BTREE_ITER_intent));
|
||||
|
||||
struct bch_hash_info dir_hash = bch2_hash_info_init(c, dir_u);
|
||||
struct bch_hash_info dir_hash;
|
||||
try(bch2_hash_info_init(c, dir_u, &dir_hash));
|
||||
|
||||
subvol_inum inum;
|
||||
try(bch2_dirent_lookup_trans(trans, &dirent_iter, dir, &dir_hash,
|
||||
@ -331,19 +332,19 @@ int bch2_rename_trans(struct btree_trans *trans,
|
||||
CLASS(btree_iter_uninit, dst_dir_iter)(trans);
|
||||
CLASS(btree_iter_uninit, src_inode_iter)(trans);
|
||||
CLASS(btree_iter_uninit, dst_inode_iter)(trans);
|
||||
struct bch_hash_info src_hash, dst_hash;
|
||||
subvol_inum src_inum, dst_inum;
|
||||
u64 src_offset, dst_offset;
|
||||
u64 now = bch2_current_time(c);
|
||||
|
||||
try(bch2_inode_peek(trans, &src_dir_iter, src_dir_u, src_dir, BTREE_ITER_intent));
|
||||
|
||||
src_hash = bch2_hash_info_init(c, src_dir_u);
|
||||
struct bch_hash_info src_hash, dst_hash;
|
||||
try(bch2_hash_info_init(c, src_dir_u, &src_hash));
|
||||
|
||||
if (!subvol_inum_eq(dst_dir, src_dir)) {
|
||||
try(bch2_inode_peek(trans, &dst_dir_iter, dst_dir_u, dst_dir, BTREE_ITER_intent));
|
||||
|
||||
dst_hash = bch2_hash_info_init(c, dst_dir_u);
|
||||
try(bch2_hash_info_init(c, dst_dir_u, &dst_hash));
|
||||
} else {
|
||||
dst_dir_u = src_dir_u;
|
||||
dst_hash = src_hash;
|
||||
|
||||
@ -11,6 +11,38 @@
|
||||
|
||||
#include "snapshots/subvolume.h"
|
||||
|
||||
static inline struct bch_hash_info
|
||||
__bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi)
|
||||
{
|
||||
struct bch_hash_info info = {
|
||||
.inum_snapshot = bi->bi_snapshot,
|
||||
.type = INODE_STR_HASH(bi),
|
||||
.is_31bit = bi->bi_flags & BCH_INODE_31bit_dirent_offset,
|
||||
.cf_encoding = bch2_inode_casefold(c, bi) ? c->cf_encoding : NULL,
|
||||
.siphash_key = { .k0 = bi->bi_hash_seed }
|
||||
};
|
||||
|
||||
if (unlikely(info.type == BCH_STR_HASH_siphash_old)) {
|
||||
u8 digest[SHA256_DIGEST_SIZE];
|
||||
|
||||
sha256((const u8 *)&bi->bi_hash_seed,
|
||||
sizeof(bi->bi_hash_seed), digest);
|
||||
memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
int bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi,
|
||||
struct bch_hash_info *ret)
|
||||
{
|
||||
if (bch2_inode_casefold(c, bi) && !c->cf_encoding)
|
||||
return bch_err_throw(c, casefold_dir_but_disabled);
|
||||
|
||||
*ret = __bch2_hash_info_init(c, bi);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_dirent_has_target(struct btree_trans *trans, struct bkey_s_c_dirent d)
|
||||
{
|
||||
if (d.v->d_type == DT_SUBVOL) {
|
||||
@ -136,8 +168,8 @@ int bch2_repair_inode_hash_info(struct btree_trans *trans,
|
||||
if (inode.bi_hash_seed == snapshot_root->bi_hash_seed &&
|
||||
INODE_STR_HASH(&inode) == INODE_STR_HASH(snapshot_root)) {
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
struct bch_hash_info hash1 = bch2_hash_info_init(c, snapshot_root);
|
||||
struct bch_hash_info hash2 = bch2_hash_info_init(c, &inode);
|
||||
struct bch_hash_info hash1 = __bch2_hash_info_init(c, snapshot_root);
|
||||
struct bch_hash_info hash2 = __bch2_hash_info_init(c, &inode);
|
||||
|
||||
BUG_ON(hash1.type != hash2.type ||
|
||||
memcmp(&hash1.siphash_key,
|
||||
@ -208,7 +240,7 @@ static noinline int check_inode_hash_info_matches_root(struct btree_trans *trans
|
||||
struct bch_inode_unpacked snapshot_root;
|
||||
try(bch2_inode_find_snapshot_root(trans, inum, &snapshot_root));
|
||||
|
||||
struct bch_hash_info hash_root = bch2_hash_info_init(trans->c, &snapshot_root);
|
||||
struct bch_hash_info hash_root = __bch2_hash_info_init(trans->c, &snapshot_root);
|
||||
if (hash_info->type != hash_root.type ||
|
||||
memcmp(&hash_info->siphash_key,
|
||||
&hash_root.siphash_key,
|
||||
|
||||
@ -47,27 +47,7 @@ struct bch_hash_info {
|
||||
SIPHASH_KEY siphash_key;
|
||||
};
|
||||
|
||||
static inline struct bch_hash_info
|
||||
bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi)
|
||||
{
|
||||
struct bch_hash_info info = {
|
||||
.inum_snapshot = bi->bi_snapshot,
|
||||
.type = INODE_STR_HASH(bi),
|
||||
.is_31bit = bi->bi_flags & BCH_INODE_31bit_dirent_offset,
|
||||
.cf_encoding = bch2_inode_casefold(c, bi) ? c->cf_encoding : NULL,
|
||||
.siphash_key = { .k0 = bi->bi_hash_seed }
|
||||
};
|
||||
|
||||
if (unlikely(info.type == BCH_STR_HASH_siphash_old)) {
|
||||
u8 digest[SHA256_DIGEST_SIZE];
|
||||
|
||||
sha256((const u8 *)&bi->bi_hash_seed,
|
||||
sizeof(bi->bi_hash_seed), digest);
|
||||
memcpy(&info.siphash_key, digest, sizeof(info.siphash_key));
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
int bch2_hash_info_init(struct bch_fs *, const struct bch_inode_unpacked *, struct bch_hash_info *);
|
||||
|
||||
struct bch_str_hash_ctx {
|
||||
union {
|
||||
|
||||
@ -145,7 +145,9 @@ void bch2_xattr_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info *inode,
|
||||
const char *name, void *buffer, size_t size, int type)
|
||||
{
|
||||
struct bch_hash_info hash = bch2_hash_info_init(trans->c, &inode->ei_inode);
|
||||
struct bch_hash_info hash;
|
||||
try(bch2_hash_info_init(trans->c, &inode->ei_inode, &hash));
|
||||
|
||||
struct xattr_search_key search = X_SEARCH(type, name, strlen(name));
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
struct bkey_s_c k = bkey_try(bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc, &hash,
|
||||
@ -163,7 +165,6 @@ static int bch2_xattr_get_trans(struct btree_trans *trans, struct bch_inode_info
|
||||
|
||||
int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
|
||||
struct bch_inode_unpacked *inode_u,
|
||||
const struct bch_hash_info *hash_info,
|
||||
const char *name, const void *value, size_t size,
|
||||
int type, int flags)
|
||||
{
|
||||
@ -183,6 +184,9 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
|
||||
|
||||
try(bch2_inode_write(trans, &inode_iter, inode_u));
|
||||
|
||||
struct bch_hash_info hash_info;
|
||||
try(bch2_hash_info_init(c, inode_u, &hash_info));
|
||||
|
||||
int ret;
|
||||
if (value) {
|
||||
struct bkey_i_xattr *xattr;
|
||||
@ -205,7 +209,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
|
||||
memcpy(xattr->v.x_name_and_value, name, namelen);
|
||||
memcpy(xattr_val(&xattr->v), value, size);
|
||||
|
||||
ret = bch2_hash_set(trans, bch2_xattr_hash_desc, hash_info,
|
||||
ret = bch2_hash_set(trans, bch2_xattr_hash_desc, &hash_info,
|
||||
inum, &xattr->k_i,
|
||||
(flags & XATTR_CREATE ? STR_HASH_must_create : 0)|
|
||||
(flags & XATTR_REPLACE ? STR_HASH_must_replace : 0));
|
||||
@ -214,7 +218,7 @@ int bch2_xattr_set(struct btree_trans *trans, subvol_inum inum,
|
||||
X_SEARCH(type, name, strlen(name));
|
||||
|
||||
ret = bch2_hash_delete(trans, bch2_xattr_hash_desc,
|
||||
hash_info, inum, &search);
|
||||
&hash_info, inum, &search);
|
||||
}
|
||||
|
||||
if (bch2_err_matches(ret, ENOENT))
|
||||
@ -348,14 +352,12 @@ static int bch2_xattr_set_handler(const struct xattr_handler *handler,
|
||||
{
|
||||
struct bch_inode_info *inode = to_bch_ei(vinode);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
|
||||
struct bch_inode_unpacked inode_u;
|
||||
int ret;
|
||||
|
||||
CLASS(btree_trans, trans)(c);
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
int ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_xattr_set(trans, inode_inum(inode), &inode_u,
|
||||
&hash, name, value, size,
|
||||
name, value, size,
|
||||
handler->flags, flags)) ?:
|
||||
(bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME), 0);
|
||||
|
||||
|
||||
@ -40,7 +40,7 @@ struct bch_inode_info;
|
||||
|
||||
/* Exported for cmd_migrate.c in tools: */
|
||||
int bch2_xattr_set(struct btree_trans *, subvol_inum,
|
||||
struct bch_inode_unpacked *, const struct bch_hash_info *,
|
||||
struct bch_inode_unpacked *,
|
||||
const char *, const void *, size_t, int, int);
|
||||
|
||||
ssize_t bch2_xattr_list(struct dentry *, char *, size_t);
|
||||
|
||||
@ -122,7 +122,10 @@ static bool should_print_loglevel(struct bch_fs *c, const char *fmt)
|
||||
|
||||
void bch2_print_str(struct bch_fs *c, const char *prefix, const char *str)
|
||||
{
|
||||
BUG_ON(!str);
|
||||
/* Nothing to print? Nothing to do: */
|
||||
if (!str)
|
||||
return;
|
||||
|
||||
if (!should_print_loglevel(c, prefix))
|
||||
return;
|
||||
|
||||
@ -371,14 +374,13 @@ void bch2_fs_read_only(struct bch_fs *c)
|
||||
test_bit(BCH_FS_clean_shutdown, &c->flags) &&
|
||||
c->recovery.pass_done >= BCH_RECOVERY_PASS_journal_replay) {
|
||||
BUG_ON(c->journal.last_empty_seq != journal_cur_seq(&c->journal));
|
||||
BUG_ON(!c->sb.clean);
|
||||
BUG_ON(atomic_long_read(&c->btree_cache.nr_dirty));
|
||||
BUG_ON(atomic_long_read(&c->btree_key_cache.nr_dirty));
|
||||
BUG_ON(c->btree_write_buffer.inc.keys.nr);
|
||||
BUG_ON(c->btree_write_buffer.flushing.keys.nr);
|
||||
bch2_verify_replicas_refs_clean(c);
|
||||
bch2_verify_accounting_clean(c);
|
||||
|
||||
bch_verbose(c, "marking filesystem clean");
|
||||
bch2_fs_mark_clean(c);
|
||||
} else {
|
||||
/* Make sure error counts/counters are persisted */
|
||||
guard(mutex)(&c->sb_lock);
|
||||
@ -457,6 +459,8 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
if (WARN_ON(c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)))
|
||||
return bch_err_throw(c, erofs_no_alloc_info);
|
||||
|
||||
BUG_ON(!test_bit(BCH_FS_may_upgrade_downgrade, &c->flags));
|
||||
|
||||
if (test_bit(BCH_FS_initial_gc_unfixed, &c->flags)) {
|
||||
bch_err(c, "cannot go rw, unfixed btree errors");
|
||||
return bch_err_throw(c, erofs_unfixed_errors);
|
||||
@ -474,7 +478,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
|
||||
try(bch2_fs_init_rw(c));
|
||||
try(bch2_sb_members_v2_init(c));
|
||||
try(bch2_fs_mark_dirty(c));
|
||||
|
||||
clear_bit(BCH_FS_clean_shutdown, &c->flags);
|
||||
|
||||
@ -872,8 +875,9 @@ static int bch2_fs_opt_version_init(struct bch_fs *c, struct printbuf *out)
|
||||
if (c->opts.journal_rewind)
|
||||
c->opts.fsck = true;
|
||||
|
||||
bool may_upgrade_downgrade = !(c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) ||
|
||||
bch2_fs_will_resize_on_mount(c);
|
||||
if (!(c->sb.features & BIT_ULL(BCH_FEATURE_small_image)) ||
|
||||
bch2_fs_will_resize_on_mount(c))
|
||||
set_bit(BCH_FS_may_upgrade_downgrade, &c->flags);
|
||||
|
||||
prt_str_indented(out, "starting version ");
|
||||
bch2_version_to_text(out, c->sb.version);
|
||||
@ -953,7 +957,7 @@ static int bch2_fs_opt_version_init(struct bch_fs *c, struct printbuf *out)
|
||||
prt_newline(out);
|
||||
}
|
||||
|
||||
if (may_upgrade_downgrade) {
|
||||
if (test_bit(BCH_FS_may_upgrade_downgrade, &c->flags)) {
|
||||
if (bch2_check_version_downgrade(c)) {
|
||||
prt_str_indented(out, "Version downgrade required");
|
||||
|
||||
@ -1036,7 +1040,6 @@ static int bch2_fs_init(struct bch_fs *c, struct bch_sb *sb,
|
||||
|
||||
init_rwsem(&c->state_lock);
|
||||
mutex_init(&c->sb_lock);
|
||||
mutex_init(&c->replicas_gc_lock);
|
||||
mutex_init(&c->btree_root_lock);
|
||||
INIT_WORK(&c->read_only_work, bch2_fs_read_only_work);
|
||||
|
||||
@ -1269,7 +1272,8 @@ static bool bch2_fs_may_start(struct bch_fs *c, struct printbuf *err)
|
||||
case BCH_DEGRADED_yes:
|
||||
flags |= BCH_FORCE_IF_DEGRADED;
|
||||
break;
|
||||
default:
|
||||
default: {
|
||||
bool missing = false;
|
||||
for_each_member_device(c, ca)
|
||||
if (!bch2_dev_is_online(ca) &&
|
||||
(ca->mi.state != BCH_MEMBER_STATE_failed ||
|
||||
@ -1277,8 +1281,10 @@ static bool bch2_fs_may_start(struct bch_fs *c, struct printbuf *err)
|
||||
prt_printf(err, "Cannot mount without device %u\n", ca->dev_idx);
|
||||
guard(printbuf_indent)(err);
|
||||
bch2_member_to_text_short(err, c, ca);
|
||||
return bch_err_throw(c, insufficient_devices_to_start);
|
||||
missing = true;
|
||||
}
|
||||
return missing ? bch_err_throw(c, insufficient_devices_to_start) : 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!bch2_have_enough_devs(c, c->online_devs, flags, err, !c->opts.read_only)) {
|
||||
@ -1311,6 +1317,12 @@ static int __bch2_fs_start(struct bch_fs *c, struct printbuf *err)
|
||||
bch2_recalc_capacity(c);
|
||||
}
|
||||
|
||||
/*
|
||||
* check mount options as early as possible; some can only be checked
|
||||
* after starting
|
||||
*/
|
||||
try(bch2_opts_hooks_pre_set(c));
|
||||
|
||||
try(BCH_SB_INITIALIZED(c->disk_sb.sb)
|
||||
? bch2_fs_recovery(c)
|
||||
: bch2_fs_initialize(c));
|
||||
@ -1337,13 +1349,15 @@ int bch2_fs_start(struct bch_fs *c)
|
||||
{
|
||||
CLASS(printbuf, err)();
|
||||
bch2_log_msg_start(c, &err);
|
||||
unsigned pos = err.pos;
|
||||
|
||||
int ret = __bch2_fs_start(c, &err);
|
||||
c->recovery_task = NULL;
|
||||
|
||||
if (ret)
|
||||
prt_printf(&err, "error starting filesystem: %s", bch2_err_str(ret));
|
||||
bch2_print_str(c, KERN_ERR, err.buf);
|
||||
if (err.pos != pos)
|
||||
bch2_print_str(c, KERN_ERR, err.buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -21,7 +21,8 @@ enum bch_run_recovery_pass_flags {
|
||||
|
||||
static inline bool go_rw_in_recovery(struct bch_fs *c)
|
||||
{
|
||||
return (c->journal_keys.nr ||
|
||||
return test_bit(BCH_FS_may_upgrade_downgrade, &c->flags) &&
|
||||
(c->journal_keys.nr ||
|
||||
!c->opts.read_only ||
|
||||
!c->sb.clean ||
|
||||
c->opts.recovery_passes ||
|
||||
|
||||
@ -764,6 +764,9 @@ use_clean:
|
||||
|
||||
set_bit(BCH_FS_btree_running, &c->flags);
|
||||
|
||||
/* some mount options can only be checked after the btree is running */
|
||||
try(bch2_opts_hooks_pre_set(c));
|
||||
|
||||
try(bch2_sb_set_upgrade_extra(c));
|
||||
|
||||
try(bch2_run_recovery_passes(c, 0));
|
||||
|
||||
@ -377,6 +377,7 @@ int bch2_fs_journal_start(struct journal *j, struct journal_start_info info)
|
||||
struct journal_replay *i, **_i;
|
||||
struct genradix_iter iter;
|
||||
bool had_entries = false;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
*
|
||||
@ -445,12 +446,26 @@ int bch2_fs_journal_start(struct journal *j, struct journal_start_info info)
|
||||
if (journal_entry_empty(&i->j))
|
||||
j->last_empty_seq = le64_to_cpu(i->j.seq);
|
||||
|
||||
struct bch_devs_list seq_devs = {};
|
||||
darray_for_each(i->ptrs, ptr)
|
||||
seq_devs.data[seq_devs.nr++] = ptr->dev;
|
||||
if (!info.clean) {
|
||||
struct bch_devs_list seq_devs = {};
|
||||
darray_for_each(i->ptrs, ptr)
|
||||
seq_devs.data[seq_devs.nr++] = ptr->dev;
|
||||
|
||||
p = journal_seq_pin(j, seq);
|
||||
bch2_devlist_to_replicas(&p->devs.e, BCH_DATA_journal, seq_devs);
|
||||
p = journal_seq_pin(j, seq);
|
||||
bch2_devlist_to_replicas(&p->devs.e, BCH_DATA_journal, seq_devs);
|
||||
|
||||
CLASS(printbuf, buf)();
|
||||
bch2_replicas_entry_to_text(&buf, &p->devs.e);
|
||||
|
||||
fsck_err_on(!test_bit(JOURNAL_degraded, &j->flags) &&
|
||||
!bch2_replicas_marked(c, &p->devs.e),
|
||||
c, journal_entry_replicas_not_marked,
|
||||
"superblock not marked as containing replicas for journal entry %llu\n%s",
|
||||
le64_to_cpu(i->j.seq), buf.buf);
|
||||
|
||||
if (bch2_replicas_entry_get(c, &p->devs.e))
|
||||
p->devs.e.nr_devs = 0;
|
||||
}
|
||||
|
||||
had_entries = true;
|
||||
}
|
||||
@ -464,7 +479,9 @@ int bch2_fs_journal_start(struct journal *j, struct journal_start_info info)
|
||||
c->last_bucket_seq_cleanup = journal_cur_seq(j);
|
||||
}
|
||||
|
||||
return 0;
|
||||
try(bch2_replicas_gc_reffed(c));
|
||||
fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_journal_set_replay_done(struct journal *j)
|
||||
|
||||
@ -358,7 +358,6 @@ static int journal_entry_open(struct journal *j)
|
||||
|
||||
lockdep_assert_held(&j->lock);
|
||||
BUG_ON(journal_entry_is_open(j));
|
||||
BUG_ON(c->sb.clean);
|
||||
|
||||
if (j->blocked)
|
||||
return bch_err_throw(c, journal_blocked);
|
||||
@ -435,7 +434,8 @@ static int journal_entry_open(struct journal *j)
|
||||
|
||||
bkey_extent_init(&buf->key);
|
||||
buf->noflush = false;
|
||||
buf->must_flush = false;
|
||||
/* if filesystem is clean, the first journal write must be a flush */
|
||||
buf->must_flush = c->sb.clean;
|
||||
buf->separate_flush = false;
|
||||
buf->flush_time = 0;
|
||||
buf->need_flush_to_write_buffer = true;
|
||||
@ -1097,6 +1097,7 @@ void __bch2_journal_debug_to_text(struct printbuf *out, struct journal *j)
|
||||
prt_printf(out, "last_seq:\t%llu\n", j->last_seq);
|
||||
prt_printf(out, "last_seq_ondisk:\t%llu\n", j->last_seq_ondisk);
|
||||
prt_printf(out, "flushed_seq_ondisk:\t%llu\n", j->flushed_seq_ondisk);
|
||||
prt_printf(out, "last_empty_seq:\t%llu\n", j->last_empty_seq);
|
||||
prt_printf(out, "watermark:\t%s\n", bch2_watermarks[j->watermark]);
|
||||
prt_printf(out, "each entry reserved:\t%u\n", j->entry_u64s_reserved);
|
||||
prt_printf(out, "nr flush writes:\t%llu\n", j->nr_flush_writes);
|
||||
|
||||
@ -1351,7 +1351,7 @@ int bch2_journal_read(struct bch_fs *c, struct journal_start_info *info)
|
||||
struct journal_list jlist;
|
||||
struct journal_replay *i, **_i;
|
||||
struct genradix_iter radix_iter;
|
||||
bool degraded = false, last_write_torn = false;
|
||||
bool last_write_torn = false;
|
||||
u64 seq;
|
||||
int ret = 0;
|
||||
|
||||
@ -1377,7 +1377,7 @@ int bch2_journal_read(struct bch_fs *c, struct journal_start_info *info)
|
||||
system_unbound_wq,
|
||||
&jlist.cl);
|
||||
else
|
||||
degraded = true;
|
||||
set_bit(JOURNAL_degraded, &c->journal.flags);
|
||||
}
|
||||
|
||||
while (closure_sync_timeout(&jlist.cl, sysctl_hung_task_timeout_secs * HZ / 2))
|
||||
@ -1515,17 +1515,6 @@ int bch2_journal_read(struct bch_fs *c, struct journal_start_info *info)
|
||||
replicas_entry_add_dev(&replicas.e, ptr->dev);
|
||||
|
||||
bch2_replicas_entry_sort(&replicas.e);
|
||||
|
||||
CLASS(printbuf, buf)();
|
||||
bch2_replicas_entry_to_text(&buf, &replicas.e);
|
||||
|
||||
if (!degraded &&
|
||||
!bch2_replicas_marked(c, &replicas.e) &&
|
||||
(le64_to_cpu(i->j.seq) == info->seq_read_start ||
|
||||
fsck_err(c, journal_entry_replicas_not_marked,
|
||||
"superblock not marked as containing replicas for journal entry %llu\n%s",
|
||||
le64_to_cpu(i->j.seq), buf.buf)))
|
||||
try(bch2_mark_replicas(c, &replicas.e));
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
|
||||
@ -344,25 +344,47 @@ void bch2_journal_update_last_seq(struct journal *j)
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_journal_update_last_seq_ondisk(struct journal *j, u64 last_seq_ondisk)
|
||||
void bch2_journal_update_last_seq_ondisk(struct journal *j, u64 last_seq_ondisk,
|
||||
bool clean)
|
||||
{
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
union bch_replicas_padded replicas;
|
||||
unsigned nr_refs = 0;
|
||||
size_t dirty_entry_bytes = 0;
|
||||
|
||||
scoped_guard(mutex, &j->last_seq_ondisk_lock)
|
||||
while (j->last_seq_ondisk < last_seq_ondisk) {
|
||||
struct journal_entry_pin_list *pin_list = journal_seq_pin(j, j->last_seq_ondisk);
|
||||
scoped_guard(mutex, &j->last_seq_ondisk_lock) {
|
||||
for (u64 seq = j->last_seq_ondisk;
|
||||
seq < (clean ? j->pin.back : last_seq_ondisk);
|
||||
seq++) {
|
||||
struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
|
||||
|
||||
if (pin_list->devs.e.nr_devs) {
|
||||
if (nr_refs &&
|
||||
!bch2_replicas_entry_eq(&replicas.e, &pin_list->devs.e)) {
|
||||
bch2_replicas_entry_put_many(c, &replicas.e, nr_refs);
|
||||
nr_refs = 0;
|
||||
}
|
||||
|
||||
memcpy(&replicas, &pin_list->devs, replicas_entry_bytes(&pin_list->devs.e));
|
||||
pin_list->devs.e.nr_devs = 0;
|
||||
nr_refs++;
|
||||
}
|
||||
|
||||
dirty_entry_bytes += pin_list->bytes;
|
||||
pin_list->bytes = 0;
|
||||
|
||||
j->last_seq_ondisk++;
|
||||
}
|
||||
|
||||
j->last_seq_ondisk = last_seq_ondisk;
|
||||
}
|
||||
|
||||
scoped_guard(spinlock, &j->lock) {
|
||||
if (WARN_ON(j->dirty_entry_bytes < dirty_entry_bytes))
|
||||
dirty_entry_bytes = j->dirty_entry_bytes;
|
||||
j->dirty_entry_bytes -= dirty_entry_bytes;
|
||||
}
|
||||
|
||||
if (nr_refs)
|
||||
bch2_replicas_entry_put_many(c, &replicas.e, nr_refs);
|
||||
}
|
||||
|
||||
bool __bch2_journal_pin_put(struct journal *j, u64 seq)
|
||||
@ -973,39 +995,7 @@ int bch2_journal_flush_device_pins(struct journal *j, int dev_idx)
|
||||
|
||||
try(bch2_journal_error(j));
|
||||
|
||||
guard(mutex)(&c->replicas_gc_lock);
|
||||
bch2_replicas_gc_start(c, 1 << BCH_DATA_journal);
|
||||
|
||||
/*
|
||||
* Now that we've populated replicas_gc, write to the journal to mark
|
||||
* active journal devices. This handles the case where the journal might
|
||||
* be empty. Otherwise we could clear all journal replicas and
|
||||
* temporarily put the fs into an unrecoverable state. Journal recovery
|
||||
* expects to find devices marked for journal data on unclean mount.
|
||||
*/
|
||||
int ret = bch2_journal_meta(&c->journal);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
seq = 0;
|
||||
scoped_guard(spinlock, &j->lock)
|
||||
while (!ret) {
|
||||
seq = max(seq, j->last_seq);
|
||||
if (seq > j->seq_ondisk)
|
||||
break;
|
||||
|
||||
union bch_replicas_padded replicas;
|
||||
memcpy(&replicas, &journal_seq_pin(j, seq)->devs, sizeof(replicas));
|
||||
seq++;
|
||||
|
||||
if (replicas.e.nr_devs) {
|
||||
spin_unlock(&j->lock);
|
||||
ret = bch2_mark_replicas(c, &replicas.e);
|
||||
spin_lock(&j->lock);
|
||||
}
|
||||
}
|
||||
err:
|
||||
return bch2_replicas_gc_end(c, ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool bch2_journal_seq_pins_to_text(struct printbuf *out, struct journal *j, u64 *seq)
|
||||
|
||||
@ -44,7 +44,7 @@ journal_seq_pin(struct journal *j, u64 seq)
|
||||
}
|
||||
|
||||
void bch2_journal_update_last_seq(struct journal *);
|
||||
void bch2_journal_update_last_seq_ondisk(struct journal *, u64);
|
||||
void bch2_journal_update_last_seq_ondisk(struct journal *, u64, bool);
|
||||
|
||||
bool __bch2_journal_pin_put(struct journal *, u64);
|
||||
void bch2_journal_pin_put(struct journal *, u64);
|
||||
|
||||
@ -150,6 +150,7 @@ enum journal_space_from {
|
||||
};
|
||||
|
||||
#define JOURNAL_FLAGS() \
|
||||
x(degraded) \
|
||||
x(replay_done) \
|
||||
x(running) \
|
||||
x(may_skip_flush) \
|
||||
|
||||
@ -196,10 +196,17 @@ static CLOSURE_CALLBACK(journal_write_done)
|
||||
? j->flush_write_time
|
||||
: j->noflush_write_time, j->write_start_time);
|
||||
|
||||
struct bch_replicas_entry_v1 *r = &journal_seq_pin(j, seq)->devs.e;
|
||||
if (w->had_error) {
|
||||
struct bch_replicas_entry_v1 *r = &journal_seq_pin(j, seq)->devs.e;
|
||||
bch2_replicas_entry_put(c, r);
|
||||
r->nr_devs = 0;
|
||||
}
|
||||
|
||||
if (!r->nr_devs && !w->empty) {
|
||||
bch2_devlist_to_replicas(r, BCH_DATA_journal, w->devs_written);
|
||||
err = bch2_replicas_entry_get(c, r);
|
||||
if (err)
|
||||
r->nr_devs = 0;
|
||||
}
|
||||
|
||||
if (!w->devs_written.nr)
|
||||
@ -261,7 +268,7 @@ again:
|
||||
* properly - when the flush completes replcias
|
||||
* refs need to have been dropped
|
||||
* */
|
||||
bch2_journal_update_last_seq_ondisk(j, w->last_seq);
|
||||
bch2_journal_update_last_seq_ondisk(j, w->last_seq, w->empty);
|
||||
last_seq_ondisk_updated = true;
|
||||
spin_lock(&j->lock);
|
||||
goto again;
|
||||
@ -657,7 +664,6 @@ CLOSURE_CALLBACK(bch2_journal_write)
|
||||
unsigned nr_rw_members = dev_mask_nr(&c->rw_devs[BCH_DATA_free]);
|
||||
int ret;
|
||||
|
||||
BUG_ON(BCH_SB_CLEAN(c->disk_sb.sb));
|
||||
BUG_ON(!w->write_started);
|
||||
BUG_ON(w->write_allocated);
|
||||
BUG_ON(w->write_done);
|
||||
@ -718,15 +724,24 @@ CLOSURE_CALLBACK(bch2_journal_write)
|
||||
|
||||
w->devs_written = bch2_bkey_devs(c, bkey_i_to_s_c(&w->key));
|
||||
|
||||
/*
|
||||
* Mark journal replicas before we submit the write to guarantee
|
||||
* recovery will find the journal entries after a crash.
|
||||
*/
|
||||
struct bch_replicas_entry_v1 *r = &journal_seq_pin(j, le64_to_cpu(w->data->seq))->devs.e;
|
||||
bch2_devlist_to_replicas(r, BCH_DATA_journal, w->devs_written);
|
||||
ret = bch2_mark_replicas(c, r);
|
||||
if (ret)
|
||||
goto err;
|
||||
if (!c->sb.clean) {
|
||||
/*
|
||||
* Mark journal replicas before we submit the write to guarantee
|
||||
* recovery will find the journal entries after a crash.
|
||||
*
|
||||
* If the filesystem is clean, we have to defer this until after
|
||||
* the write completes, so the filesystem isn't marked dirty
|
||||
* before anything is in the journal:
|
||||
*/
|
||||
struct bch_replicas_entry_v1 *r = &journal_seq_pin(j, le64_to_cpu(w->data->seq))->devs.e;
|
||||
bch2_devlist_to_replicas(r, BCH_DATA_journal, w->devs_written);
|
||||
|
||||
ret = bch2_replicas_entry_get(c, r);
|
||||
if (ret) {
|
||||
r->nr_devs = 0;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
if (c->opts.nochanges)
|
||||
goto no_io;
|
||||
|
||||
@ -573,6 +573,12 @@ int bch2_opt_hook_pre_set(struct bch_fs *c, struct bch_dev *ca, u64 inum, enum b
|
||||
if (v)
|
||||
bch2_check_set_feature(c, BCH_FEATURE_ec);
|
||||
break;
|
||||
case Opt_casefold_disabled:
|
||||
if (v && (c->sb.features & BIT_ULL(BCH_FEATURE_casefolding))) {
|
||||
bch_err(c, "cannot mount with casefolding disabled: casefolding already in use");
|
||||
return bch_err_throw(c, casefolding_in_use);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
@ -255,18 +255,10 @@ const struct bch_sb_field_ops bch_sb_field_ops_clean = {
|
||||
.to_text = bch2_sb_clean_to_text,
|
||||
};
|
||||
|
||||
int bch2_fs_mark_dirty(struct bch_fs *c)
|
||||
void bch2_fs_mark_dirty(struct bch_fs *c)
|
||||
{
|
||||
/*
|
||||
* Unconditionally write superblock, to verify it hasn't changed before
|
||||
* we go rw:
|
||||
*/
|
||||
|
||||
guard(mutex)(&c->sb_lock);
|
||||
SET_BCH_SB_CLEAN(c->disk_sb.sb, false);
|
||||
c->disk_sb.sb->features[0] |= cpu_to_le64(BCH_SB_FEATURES_ALWAYS);
|
||||
|
||||
return bch2_write_super(c);
|
||||
}
|
||||
|
||||
void bch2_fs_mark_clean(struct bch_fs *c)
|
||||
@ -276,7 +268,6 @@ void bch2_fs_mark_clean(struct bch_fs *c)
|
||||
unsigned u64s;
|
||||
int ret;
|
||||
|
||||
guard(mutex)(&c->sb_lock);
|
||||
if (BCH_SB_CLEAN(c->disk_sb.sb))
|
||||
return;
|
||||
|
||||
@ -320,6 +311,4 @@ void bch2_fs_mark_clean(struct bch_fs *c)
|
||||
}
|
||||
|
||||
bch2_journal_pos_from_member_info_set(c);
|
||||
|
||||
bch2_write_super(c);
|
||||
}
|
||||
|
||||
@ -10,7 +10,7 @@ void bch2_journal_super_entries_add_common(struct bch_fs *, struct jset_entry **
|
||||
|
||||
extern const struct bch_sb_field_ops bch_sb_field_ops_clean;
|
||||
|
||||
int bch2_fs_mark_dirty(struct bch_fs *);
|
||||
void bch2_fs_mark_dirty(struct bch_fs *);
|
||||
void bch2_fs_mark_clean(struct bch_fs *);
|
||||
|
||||
#endif /* _BCACHEFS_SB_CLEAN_H */
|
||||
|
||||
@ -1008,6 +1008,9 @@ int bch2_write_super(struct bch_fs *c)
|
||||
DARRAY(struct bch_dev *) online_devices = {};
|
||||
int ret = 0;
|
||||
|
||||
if (!test_bit(BCH_FS_may_upgrade_downgrade, &c->flags))
|
||||
return 0;
|
||||
|
||||
trace_and_count(c, write_super, c, _RET_IP_);
|
||||
|
||||
if (c->opts.degraded == BCH_DEGRADED_very)
|
||||
@ -1018,6 +1021,11 @@ int bch2_write_super(struct bch_fs *c)
|
||||
closure_init_stack(cl);
|
||||
memset(&sb_written, 0, sizeof(sb_written));
|
||||
|
||||
if (bch2_sb_has_journal(c->disk_sb.sb))
|
||||
bch2_fs_mark_dirty(c);
|
||||
else
|
||||
bch2_fs_mark_clean(c);
|
||||
|
||||
/*
|
||||
* Note: we do writes to RO devices here, and we might want to change
|
||||
* that in the future.
|
||||
|
||||
@ -691,7 +691,12 @@ static struct dentry *bch2_lookup(struct inode *vdir, struct dentry *dentry,
|
||||
{
|
||||
struct bch_fs *c = vdir->i_sb->s_fs_info;
|
||||
struct bch_inode_info *dir = to_bch_ei(vdir);
|
||||
struct bch_hash_info hash = bch2_hash_info_init(c, &dir->ei_inode);
|
||||
|
||||
struct bch_hash_info hash;
|
||||
int ret = bch2_hash_info_init(c, &dir->ei_inode, &hash);
|
||||
bch_err_fn(c, ret);
|
||||
if (ret)
|
||||
return d_splice_alias(NULL, dentry);
|
||||
|
||||
struct bch_inode_info *inode;
|
||||
bch2_trans_do(c,
|
||||
@ -1229,7 +1234,9 @@ static int bch2_vfs_readdir(struct file *file, struct dir_context *ctx)
|
||||
{
|
||||
struct bch_inode_info *inode = file_bch_inode(file);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
|
||||
|
||||
struct bch_hash_info hash;
|
||||
try(bch2_hash_info_init(c, &inode->ei_inode, &hash));
|
||||
|
||||
if (!dir_emit_dots(file, ctx))
|
||||
return 0;
|
||||
@ -1320,7 +1327,8 @@ static int fssetxattr_inode_update_fn(struct btree_trans *trans,
|
||||
(s->flags & (BCH_INODE_nodump|BCH_INODE_noatime)) != s->flags)
|
||||
return -EINVAL;
|
||||
|
||||
if (s->casefold != bch2_inode_casefold(c, bi))
|
||||
if (s->set_casefold &&
|
||||
s->casefold != bch2_inode_casefold(c, bi))
|
||||
try(bch2_inode_set_casefold(trans, inode_inum(inode), bi, s->casefold));
|
||||
|
||||
if (s->set_project) {
|
||||
|
||||
@ -47,7 +47,6 @@ static int bch2_ioc_reinherit_attrs(struct bch_fs *c,
|
||||
struct bch_inode_info *src,
|
||||
const char __user *name)
|
||||
{
|
||||
struct bch_hash_info hash = bch2_hash_info_init(c, &src->ei_inode);
|
||||
struct bch_inode_info *dst;
|
||||
struct inode *vinode = NULL;
|
||||
char *kname = NULL;
|
||||
@ -55,6 +54,9 @@ static int bch2_ioc_reinherit_attrs(struct bch_fs *c,
|
||||
int ret = 0;
|
||||
subvol_inum inum;
|
||||
|
||||
struct bch_hash_info hash;
|
||||
try(bch2_hash_info_init(c, &src->ei_inode, &hash));
|
||||
|
||||
kname = kmalloc(BCH_NAME_MAX, GFP_KERNEL);
|
||||
if (!kname)
|
||||
return -ENOMEM;
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user