Update bcachefs sources to 753b29cc7989 bcachefs: opts.data_allowed should be OPT_FORMAT

This commit is contained in:
Kent Overstreet 2025-08-26 16:48:18 -04:00
parent 4b97a99fa2
commit 90cbe3f3b4
20 changed files with 160 additions and 74 deletions

View File

@ -1 +1 @@
bd062cbcd84d5e7b007561be6e972fcdd283c7aa
753b29cc79897dd7f776fa09654196f5b25f8b96

View File

@ -1537,7 +1537,6 @@ void bch2_fs_alloc_debug_to_text(struct printbuf *out, struct bch_fs *c)
prt_printf(out, "cached\t%llu\n", percpu_u64_get(&c->usage->cached));
prt_printf(out, "reserved\t%llu\n", percpu_u64_get(&c->usage->reserved));
prt_printf(out, "online_reserved\t%llu\n", percpu_u64_get(c->online_reserved));
prt_printf(out, "nr_inodes\t%llu\n", percpu_u64_get(&c->usage->nr_inodes));
prt_newline(out);
prt_printf(out, "freelist_wait\t%s\n", c->freelist_wait.list.first ? "waiting" : "empty");

View File

@ -29,7 +29,7 @@ static inline int bch2_bkey_buf_reassemble_noprof(struct bkey_buf *s,
struct bch_fs *c,
struct bkey_s_c k)
{
bch2_bkey_buf_realloc(s, c, k.k->u64s);
bch2_bkey_buf_realloc_noprof(s, c, k.k->u64s);
bkey_reassemble(s->k, k);
return 0;
}
@ -39,7 +39,7 @@ static inline int bch2_bkey_buf_copy_noprof(struct bkey_buf *s,
struct bch_fs *c,
struct bkey_i *src)
{
bch2_bkey_buf_realloc(s, c, src->k.u64s);
bch2_bkey_buf_realloc_noprof(s, c, src->k.u64s);
bkey_copy(s->k, src);
return 0;
}
@ -50,7 +50,7 @@ static inline int bch2_bkey_buf_unpack_noprof(struct bkey_buf *s,
struct btree *b,
struct bkey_packed *src)
{
bch2_bkey_buf_realloc(s, c, BKEY_U64s + bkeyp_val_u64s(&b->format, src));
bch2_bkey_buf_realloc_noprof(s, c, BKEY_U64s + bkeyp_val_u64s(&b->format, src));
bch2_bkey_unpack(b, s->k, src);
return 0;
}

View File

@ -3271,9 +3271,10 @@ void *__bch2_trans_kmalloc(struct btree_trans *trans, size_t size, unsigned long
EBUG_ON(trans->mem_bytes);
EBUG_ON(trans->mem_top);
EBUG_ON(new_bytes > BTREE_TRANS_MEM_MAX);
bool lock_dropped = false;
new_mem = allocate_dropping_locks_norelock(trans, lock_dropped, kmalloc(new_bytes, _gfp));
new_mem = allocate_dropping_locks_norelock(trans, lock_dropped,
kmalloc(new_bytes, _gfp|__GFP_NOWARN));
if (!new_mem) {
new_mem = mempool_alloc(&c->btree_trans_mem_pool, GFP_KERNEL);
new_bytes = BTREE_TRANS_MEM_MAX;
@ -3525,7 +3526,7 @@ got_trans:
if (s->max_mem) {
unsigned expected_mem_bytes = roundup_pow_of_two(s->max_mem);
trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL);
trans->mem = kmalloc(expected_mem_bytes, GFP_KERNEL|__GFP_NOWARN);
if (likely(trans->mem))
trans->mem_bytes = expected_mem_bytes;
}
@ -3696,6 +3697,9 @@ void bch2_btree_trans_to_text(struct printbuf *out, struct btree_trans *trans)
prt_printf(out, "%i %s\n", task ? task->pid : 0, trans->fn);
if (trans->journal_replay_not_finished)
prt_printf(out, "has journal_keys ref\n");
/* trans->paths is rcu protected vs. freeing */
guard(rcu)();
guard(printbuf_atomic)(out);

View File

@ -31,7 +31,7 @@ struct btree_and_journal_iter {
static inline u32 journal_entry_radix_idx(struct bch_fs *c, u64 seq)
{
return (seq - c->journal_entries_base_seq) & (~0U >> 1);
return seq - c->journal_entries_base_seq;
}
static inline struct bkey_i *journal_key_k(struct bch_fs *c,

View File

@ -63,8 +63,6 @@ __bch2_fs_usage_read_short(struct bch_fs *c)
ret.used = min(ret.capacity, data + reserve_factor(reserved));
ret.free = ret.capacity - ret.used;
ret.nr_inodes = percpu_u64_get(&c->usage->nr_inodes);
return ret;
}
@ -113,14 +111,26 @@ static int bch2_check_fix_ptr(struct btree_trans *trans,
CLASS(bch2_dev_tryget_noerror, ca)(c, p.ptr.dev);
if (!ca) {
if (fsck_err_on(p.ptr.dev != BCH_SB_MEMBER_INVALID,
trans, ptr_to_invalid_device,
"pointer to missing device %u\n"
"while marking %s",
p.ptr.dev,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
*do_update = true;
if (p.ptr.dev == BCH_SB_MEMBER_INVALID)
return 0;
if (test_bit(p.ptr.dev, c->devs_removed.d)) {
if (fsck_err(trans, ptr_to_removed_device,
"pointer to removed device %u\n"
"while marking %s",
p.ptr.dev,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
*do_update = true;
} else {
if (fsck_err(trans, ptr_to_invalid_device,
"pointer to missing device %u\n"
"while marking %s",
p.ptr.dev,
(printbuf_reset(&buf),
bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
*do_update = true;
}
return 0;
}

View File

@ -78,14 +78,12 @@ struct bch_fs_usage_base {
u64 data;
u64 cached;
u64 reserved;
u64 nr_inodes;
};
struct bch_fs_usage_short {
u64 capacity;
u64 used;
u64 free;
u64 nr_inodes;
};
/*

View File

@ -812,10 +812,14 @@ static int can_write_extent(struct bch_fs *c, struct data_update *m)
break;
}
if (!nr_replicas) {
if (nr_replicas < m->op.nr_replicas) {
prt_printf(&buf, "\nnr_replicas %u < %u", nr_replicas, m->op.nr_replicas);
trace_data_update_done_no_rw_devs(c, buf.buf);
return bch_err_throw(c, data_update_done_no_rw_devs);
}
if (!nr_replicas)
return bch_err_throw(c, data_update_done_no_rw_devs);
if (nr_replicas < m->op.nr_replicas)
return bch_err_throw(c, insufficient_devices);
return 0;

View File

@ -11,6 +11,7 @@
#include "disk_accounting.h"
#include "error.h"
#include "journal_io.h"
#include "recovery_passes.h"
#include "replicas.h"
/*
@ -910,6 +911,40 @@ int bch2_accounting_read(struct bch_fs *c)
u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false);
/*
* Check for underflow, schedule check_allocations
* necessary:
*
* XXX - see if we can factor this out to run on a bkey
* so we can check everything lazily, right now we don't
* check the non in-mem counters at all
*/
bool underflow = false;
for (unsigned j = 0; j < acc->k.data[i].nr_counters; j++)
underflow |= (s64) v[j] < 0;
if (underflow) {
CLASS(printbuf, buf)();
bch2_log_msg_start(c, &buf);
prt_printf(&buf, "Accounting underflow for\n");
bch2_accounting_key_to_text(&buf, &k);
for (unsigned j = 0; j < acc->k.data[i].nr_counters; j++)
prt_printf(&buf, " %lli", v[j]);
bool print = bch2_count_fsck_err(c, accounting_key_underflow, &buf);
unsigned pos = buf.pos;
ret = bch2_run_explicit_recovery_pass(c, &buf,
BCH_RECOVERY_PASS_check_allocations, 0);
print |= buf.pos != pos;
if (print)
bch2_print_str(c, KERN_ERR, buf.buf);
if (ret)
return ret;
}
switch (k.type) {
case BCH_DISK_ACCOUNTING_persistent_reserved:
usage->reserved += v[0] * k.persistent_reserved.nr_replicas;
@ -1063,7 +1098,6 @@ void bch2_verify_accounting_clean(struct bch_fs *c)
check(data);
check(cached);
check(reserved);
check(nr_inodes);
WARN_ON(mismatch);
}

View File

@ -157,7 +157,7 @@ static inline bool ptr_better(struct bch_fs *c,
const struct extent_ptr_decoded p2,
u64 p2_latency)
{
struct bch_dev *ca2 = bch2_dev_rcu(c, p2.ptr.dev);
struct bch_dev *ca2 = bch2_dev_rcu_noerror(c, p2.ptr.dev);
int failed_delta = dev_failed(ca1) - dev_failed(ca2);
if (unlikely(failed_delta))
@ -419,7 +419,7 @@ bool bch2_extent_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
return false;
/* Extents may not straddle buckets: */
struct bch_dev *ca = bch2_dev_rcu(c, lp.ptr.dev);
struct bch_dev *ca = bch2_dev_rcu_noerror(c, lp.ptr.dev);
bool same_bucket = ca && PTR_BUCKET_NR(ca, &lp.ptr) == PTR_BUCKET_NR(ca, &rp.ptr);
if (!same_bucket)
@ -815,14 +815,14 @@ static inline unsigned __extent_ptr_durability(struct bch_dev *ca, struct extent
unsigned bch2_extent_ptr_desired_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
struct bch_dev *ca = bch2_dev_rcu_noerror(c, p->ptr.dev);
return ca ? __extent_ptr_durability(ca, p) : 0;
}
unsigned bch2_extent_ptr_durability(struct bch_fs *c, struct extent_ptr_decoded *p)
{
struct bch_dev *ca = bch2_dev_rcu(c, p->ptr.dev);
struct bch_dev *ca = bch2_dev_rcu_noerror(c, p->ptr.dev);
if (!ca || ca->mi.state == BCH_MEMBER_STATE_failed)
return 0;
@ -1044,7 +1044,7 @@ bool bch2_bkey_has_target(struct bch_fs *c, struct bkey_s_c k, unsigned target)
guard(rcu)();
bkey_for_each_ptr(ptrs, ptr)
if (bch2_dev_in_target(c, ptr->dev, target) &&
(ca = bch2_dev_rcu(c, ptr->dev)) &&
(ca = bch2_dev_rcu_noerror(c, ptr->dev)) &&
(!ptr->cached ||
!dev_ptr_stale_rcu(ca, ptr)))
return true;
@ -1228,7 +1228,7 @@ bool bch2_extent_normalize(struct bch_fs *c, struct bkey_s k)
guard(rcu)();
bch2_bkey_drop_ptrs(k, ptr,
ptr->cached &&
(!(ca = bch2_dev_rcu(c, ptr->dev)) ||
(!(ca = bch2_dev_rcu_noerror(c, ptr->dev)) ||
dev_ptr_stale_rcu(ca, ptr) > 0));
return bkey_deleted(k.k);

View File

@ -667,6 +667,17 @@ do_io:
return 0;
}
static int bch2_write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, void *data)
{
struct folio *folio = NULL;
int error;
while ((folio = writeback_iter(mapping, wbc, folio, &error)))
error = __bch2_writepage(folio, wbc, data);
return error;
}
int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
struct bch_fs *c = mapping->host->i_sb->s_fs_info;
@ -675,7 +686,7 @@ int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc
bch2_inode_opts_get(&w->opts, c, &to_bch_ei(mapping->host)->ei_inode);
blk_start_plug(&w->plug);
int ret = write_cache_pages(mapping, wbc, __bch2_writepage, w);
int ret = bch2_write_cache_pages(mapping, wbc, w);
if (w->io)
bch2_writepage_do_io(w);
blk_finish_plug(&w->plug);

View File

@ -8,6 +8,7 @@
#include "buckets.h"
#include "chardev.h"
#include "dirent.h"
#include "disk_accounting.h"
#include "errcode.h"
#include "extents.h"
#include "fs.h"
@ -2247,7 +2248,12 @@ static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_bfree = usage.free >> shift;
buf->f_bavail = avail_factor(usage.free) >> shift;
buf->f_files = usage.nr_inodes + avail_inodes;
u64 nr_inodes = 0;
struct disk_accounting_pos k;
disk_accounting_key_init(k, nr_inodes);
bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&k), &nr_inodes, 1);
buf->f_files = nr_inodes + avail_inodes;
buf->f_ffree = avail_inodes;
buf->f_fsid = uuid_to_fsid(c->sb.user_uuid.b);

View File

@ -152,6 +152,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
struct journal_replay **_i, *i, *dup;
size_t bytes = vstruct_bytes(j);
u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0;
u64 seq = le64_to_cpu(j->seq);
CLASS(printbuf, buf)();
int ret = JOURNAL_ENTRY_ADD_OK;
@ -159,12 +160,11 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
last_seq = min(last_seq, c->opts.journal_rewind);
if (!c->journal.oldest_seq_found_ondisk ||
le64_to_cpu(j->seq) < c->journal.oldest_seq_found_ondisk)
c->journal.oldest_seq_found_ondisk = le64_to_cpu(j->seq);
seq < c->journal.oldest_seq_found_ondisk)
c->journal.oldest_seq_found_ondisk = seq;
/* Is this entry older than the range we need? */
if (!c->opts.read_entire_journal &&
le64_to_cpu(j->seq) < jlist->last_seq)
if (!c->opts.read_entire_journal && seq < jlist->last_seq)
return JOURNAL_ENTRY_ADD_OUT_OF_RANGE;
/*
@ -173,7 +173,7 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
* within the range of +-2billion of the filrst one we find.
*/
if (!c->journal_entries_base_seq)
c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX);
c->journal_entries_base_seq = max_t(s64, 1, seq - S32_MAX);
/* Drop entries we don't need anymore */
if (last_seq > jlist->last_seq && !c->opts.read_entire_journal) {
@ -194,25 +194,33 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
/* Drop overwrites, log entries if we don't need them: */
if (!c->opts.retain_recovery_info &&
!c->opts.journal_rewind) {
vstruct_for_each_safe(j, src)
if (vstruct_end(src) > vstruct_end(j))
goto nocompact;
struct jset_entry *dst = j->start;
vstruct_for_each_safe(j, src) {
if (src->type == BCH_JSET_ENTRY_log ||
src->type == BCH_JSET_ENTRY_overwrite)
continue;
memcpy(dst, src, vstruct_bytes(src));
memmove_u64s_down(dst, src, vstruct_u64s(src));
dst = vstruct_next(dst);
}
j->u64s = cpu_to_le32((u64 *) dst - j->_data);
bytes = vstruct_bytes(j);
}
nocompact:
jlist->last_seq = max(jlist->last_seq, last_seq);
_i = genradix_ptr_alloc(&c->journal_entries,
journal_entry_radix_idx(c, le64_to_cpu(j->seq)),
GFP_KERNEL);
if (seq < c->journal_entries_base_seq ||
seq >= c->journal_entries_base_seq + U32_MAX) {
bch_err(c, "journal entry sequence numbers span too large a range: cannot reply, contact developers");
return bch_err_throw(c, ENOMEM_journal_entry_add);
}
_i = genradix_ptr_alloc(&c->journal_entries, journal_entry_radix_idx(c, seq), GFP_KERNEL);
if (!_i)
return bch_err_throw(c, ENOMEM_journal_entry_add);
@ -222,8 +230,6 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
*/
dup = *_i;
if (dup) {
BUG_ON(dup->j.seq != j->seq);
bool identical = bytes == vstruct_bytes(&dup->j) &&
!memcmp(j, &dup->j, bytes);
bool not_identical = !identical &&
@ -254,7 +260,6 @@ static int journal_entry_add(struct bch_fs *c, struct bch_dev *ca,
if (entry_ptr.csum_good && !identical)
goto replace;
BUG_ON(dup->j.seq != j->seq);
return ret;
}
replace:

View File

@ -539,7 +539,7 @@ enum fsck_err_opts {
"n", "Data written to this device will be considered\n"\
"to have already been replicated n times") \
x(data_allowed, u8, \
OPT_DEVICE, \
OPT_DEVICE|OPT_FORMAT, \
OPT_BITFIELD(__bch2_data_types), \
BCH_MEMBER_DATA_ALLOWED, BIT(BCH_DATA_journal)|BIT(BCH_DATA_btree)|BIT(BCH_DATA_user),\
"types", "Allowed data types for this device: journal, btree, and/or user")\

View File

@ -421,6 +421,7 @@ static struct bkey_s_c next_rebalance_extent(struct btree_trans *trans,
trace_rebalance_extent(c, buf.buf);
}
count_event(c, rebalance_extent);
return k;
}
@ -478,11 +479,12 @@ out:
return ret;
}
static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
static int do_rebalance_scan(struct moving_context *ctxt,
u64 inum, u64 cookie, u64 *sectors_scanned)
{
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct bch_fs_rebalance *r = &trans->c->rebalance;
struct bch_fs_rebalance *r = &c->rebalance;
bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
ctxt->stats = &r->scan_stats;
@ -514,14 +516,16 @@ static int do_rebalance_scan(struct moving_context *ctxt, u64 inum, u64 cookie)
bch2_clear_rebalance_needs_scan(trans, inum, cookie));
per_snapshot_io_opts_exit(&snapshot_io_opts);
bch2_move_stats_exit(&r->scan_stats, trans->c);
*sectors_scanned += atomic64_read(&r->scan_stats.sectors_seen);
bch2_move_stats_exit(&r->scan_stats, c);
/*
* Ensure that the rebalance_work entries we created are seen by the
* next iteration of do_rebalance(), so we don't end up stuck in
* rebalance_wait():
*/
atomic64_inc(&r->scan_stats.sectors_seen);
*sectors_scanned += 1;
bch2_btree_write_buffer_flush_sync(trans);
return ret;
@ -561,6 +565,7 @@ static int do_rebalance(struct moving_context *ctxt)
struct bch_fs *c = trans->c;
struct bch_fs_rebalance *r = &c->rebalance;
struct btree_iter extent_iter = {};
u64 sectors_scanned = 0;
u32 kick = r->kick;
struct bpos work_pos = POS_MIN;
@ -570,7 +575,6 @@ static int do_rebalance(struct moving_context *ctxt)
return ret;
bch2_move_stats_init(&r->work_stats, "rebalance_work");
bch2_move_stats_init(&r->scan_stats, "rebalance_scan");
while (!bch2_move_ratelimit(ctxt)) {
if (!bch2_rebalance_enabled(c)) {
@ -587,19 +591,20 @@ static int do_rebalance(struct moving_context *ctxt)
ret = k->k.type == KEY_TYPE_cookie
? do_rebalance_scan(ctxt, k->k.p.inode,
le64_to_cpu(bkey_i_to_cookie(k)->v.cookie))
le64_to_cpu(bkey_i_to_cookie(k)->v.cookie),
&sectors_scanned)
: do_rebalance_extent(ctxt, k->k.p, &extent_iter);
if (ret)
break;
}
bch2_trans_iter_exit(&extent_iter);
bch2_move_stats_exit(&r->scan_stats, c);
bch2_move_stats_exit(&r->work_stats, c);
if (!ret &&
!kthread_should_stop() &&
!atomic64_read(&r->work_stats.sectors_seen) &&
!atomic64_read(&r->scan_stats.sectors_seen) &&
!sectors_scanned &&
kick == r->kick) {
bch2_moving_ctxt_flush_all(ctxt);
bch2_trans_unlock_long(trans);

View File

@ -35,6 +35,7 @@ enum counters_flags {
x(io_move_noop, 92, TYPE_COUNTER) \
x(io_move_created_rebalance, 83, TYPE_COUNTER) \
x(io_move_evacuate_bucket, 84, TYPE_COUNTER) \
x(rebalance_extent, 96, TYPE_COUNTER) \
x(bucket_invalidate, 3, TYPE_COUNTER) \
x(bucket_discard, 4, TYPE_COUNTER) \
x(bucket_discard_fast, 79, TYPE_COUNTER) \
@ -127,7 +128,7 @@ struct bch_sb_field_counters {
static inline void __maybe_unused check_bch_counter_ids_unique(void) {
switch(0){
#define x(t, n, ...) case (n):
BCH_PERSISTENT_COUNTERS()
BCH_PERSISTENT_COUNTERS();
#undef x
;
}

View File

@ -328,6 +328,7 @@ enum bch_fsck_flags {
x(accounting_key_replicas_devs_unsorted, 280, FSCK_AUTOFIX) \
x(accounting_key_version_0, 282, FSCK_AUTOFIX) \
x(accounting_key_nr_counters_wrong, 307, FSCK_AUTOFIX) \
x(accounting_key_underflow, 325, FSCK_AUTOFIX) \
x(logged_op_but_clean, 283, FSCK_AUTOFIX) \
x(compression_opt_not_marked_in_sb, 295, FSCK_AUTOFIX) \
x(compression_type_not_marked_in_sb, 296, FSCK_AUTOFIX) \
@ -336,7 +337,7 @@ enum bch_fsck_flags {
x(dirent_stray_data_after_cf_name, 305, 0) \
x(rebalance_work_incorrectly_set, 309, FSCK_AUTOFIX) \
x(rebalance_work_incorrectly_unset, 310, FSCK_AUTOFIX) \
x(MAX, 325, 0)
x(MAX, 326, 0)
enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n,

View File

@ -237,6 +237,7 @@ static int bch2_dev_alloc(struct bch_fs *, unsigned);
static int bch2_dev_sysfs_online(struct bch_fs *, struct bch_dev *);
static void bch2_dev_io_ref_stop(struct bch_dev *, int);
static void __bch2_dev_read_only(struct bch_fs *, struct bch_dev *);
static int bch2_dev_attach_bdev(struct bch_fs *, struct bch_sb_handle *, struct printbuf *);
struct bch_fs *bch2_dev_to_fs(dev_t dev)
{
@ -988,11 +989,7 @@ static int bch2_fs_opt_version_init(struct bch_fs *c)
}
}
if (c->cf_encoding)
prt_printf(&p, "\nUsing encoding defined by superblock: utf8-%u.%u.%u",
unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING),
unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING),
unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
/* cf_encoding log message should be here, but it breaks xfstests - sigh */
if (c->opts.journal_rewind)
prt_printf(&p, "\nrewinding journal, fsck required");
@ -1008,8 +1005,9 @@ static int bch2_fs_opt_version_init(struct bch_fs *c)
return ret;
__le64 now = cpu_to_le64(ktime_get_real_seconds());
for_each_online_member_rcu(c, ca)
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = now;
scoped_guard(rcu)
for_each_online_member_rcu(c, ca)
bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx)->last_mount = now;
if (BCH_SB_HAS_TOPOLOGY_ERRORS(c->disk_sb.sb))
ext->recovery_passes_required[0] |=
@ -1060,6 +1058,14 @@ static int bch2_fs_opt_version_init(struct bch_fs *c)
bch2_print_str(c, KERN_INFO, p.buf);
/* this really should be part of our one multi line mount message, but -
* xfstests... */
if (c->cf_encoding)
bch_info(c, "Using encoding defined by superblock: utf8-%u.%u.%u",
unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING),
unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING),
unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
if (BCH_SB_INITIALIZED(c->disk_sb.sb)) {
if (!(c->sb.features & (1ULL << BCH_FEATURE_new_extent_overwrite))) {
bch_err(c, "feature new_extent_overwrite not set, filesystem no longer supported");
@ -1313,6 +1319,16 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
&c->clock_journal_res,
(sizeof(struct jset_entry_clock) / sizeof(u64)) * 2);
scoped_guard(rwsem_write, &c->state_lock)
darray_for_each(*sbs, sb) {
CLASS(printbuf, err)();
ret = bch2_dev_attach_bdev(c, sb, &err);
if (ret) {
bch_err(bch2_dev_locked(c, sb->sb->dev_idx), "%s", err.buf);
goto err;
}
}
ret = bch2_fs_opt_version_init(c);
if (ret)
goto err;
@ -2604,16 +2620,6 @@ struct bch_fs *bch2_fs_open(darray_const_str *devices,
if (ret)
goto err;
scoped_guard(rwsem_write, &c->state_lock)
darray_for_each(sbs, sb) {
CLASS(printbuf, err)();
ret = bch2_dev_attach_bdev(c, sb, &err);
if (ret) {
bch_err(bch2_dev_locked(c, sb->sb->dev_idx), "%s", err.buf);
goto err;
}
}
if (!c->opts.nostart) {
ret = bch2_fs_start(c);
if (ret)

View File

@ -304,7 +304,6 @@ static void bch2_fs_usage_base_to_text(struct printbuf *out, struct bch_fs *c)
prt_printf(out, "data:\t\t%llu\n", b.data);
prt_printf(out, "cached:\t%llu\n", b.cached);
prt_printf(out, "reserved:\t\t%llu\n", b.reserved);
prt_printf(out, "nr_inodes:\t%llu\n", b.nr_inodes);
}
static int bch2_read_fua_test(struct printbuf *out, struct bch_dev *ca)

View File

@ -23,6 +23,9 @@
(size_t) (offsetof(_type, _data) + (_u64s) * sizeof(u64)); \
})
#define vstruct_u64s(_s) \
(offsetof(typeof(*(_s)), _data) / sizeof(u64) + __vstruct_u64s(_s))
#define vstruct_bytes(_s) \
__vstruct_bytes(typeof(*(_s)), __vstruct_u64s(_s))