Update bcachefs sources to 8efd93eb2d0f bcachefs: Fix replicas max options

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2025-07-12 11:18:33 -04:00
parent 4e973f190c
commit dae984cf38
23 changed files with 245 additions and 137 deletions

View File

@ -1 +1 @@
6fc0b31163ea24a1478550a2f91945a87fb81cbf
8efd93eb2d0f977564b0db7d9e54369ae870c437

View File

@ -9,14 +9,15 @@
#include <stdarg.h>
#include <stdio.h>
#define KERN_EMERG ""
#define KERN_ALERT ""
#define KERN_CRIT ""
#define KERN_ERR ""
#define KERN_WARNING ""
#define KERN_NOTICE ""
#define KERN_INFO ""
#define KERN_DEBUG ""
#define KERN_EMERG KERN_SOH "0" /* system is unusable */
#define KERN_ALERT KERN_SOH "1" /* action must be taken immediately */
#define KERN_CRIT KERN_SOH "2" /* critical conditions */
#define KERN_ERR KERN_SOH "3" /* error conditions */
#define KERN_WARNING KERN_SOH "4" /* warning conditions */
#define KERN_NOTICE KERN_SOH "5" /* normal but significant condition */
#define KERN_INFO KERN_SOH "6" /* informational */
#define KERN_DEBUG KERN_SOH "7" /* debug-level messages */
#define KERN_DEFAULT ""
#define KERN_CONT ""
#define KERN_SOH "\001"
@ -46,8 +47,8 @@ static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
return i;
}
#define printk(...) printf(__VA_ARGS__)
#define vprintk(...) vprintf(__VA_ARGS__)
void vprintk(const char *fmt, va_list args);
void printk(const char *fmt, ...);
#define no_printk(fmt, ...) \
({ \

View File

@ -511,7 +511,8 @@ again:
bch2_dev_usage_read_fast(ca, &req->usage);
avail = dev_buckets_free(ca, req->usage, req->watermark);
if (req->usage.buckets[BCH_DATA_need_discard] > avail)
if (req->usage.buckets[BCH_DATA_need_discard] >
min(avail, ca->mi.nbuckets >> 7))
bch2_dev_do_discards(ca);
if (req->usage.buckets[BCH_DATA_need_gc_gens] > avail)

View File

@ -3,9 +3,10 @@
#define _BCACHEFS_ASYNC_OBJS_H
#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS
static inline void __async_object_list_del(struct fast_list *head, unsigned idx)
static inline void __async_object_list_del(struct fast_list *head, unsigned *idx)
{
fast_list_remove(head, idx);
fast_list_remove(head, *idx);
*idx = 0;
}
static inline int __async_object_list_add(struct fast_list *head, void *obj, unsigned *idx)
@ -16,7 +17,7 @@ static inline int __async_object_list_add(struct fast_list *head, void *obj, uns
}
#define async_object_list_del(_c, _list, idx) \
__async_object_list_del(&(_c)->async_objs[BCH_ASYNC_OBJ_LIST_##_list].list, idx)
__async_object_list_del(&(_c)->async_objs[BCH_ASYNC_OBJ_LIST_##_list].list, &idx)
#define async_object_list_add(_c, _list, obj, idx) \
__async_object_list_add(&(_c)->async_objs[BCH_ASYNC_OBJ_LIST_##_list].list, obj, idx)

View File

@ -18,7 +18,7 @@ enum bch_async_obj_lists {
struct async_obj_list {
struct fast_list list;
void (*obj_to_text)(struct printbuf *, void *);
void (*obj_to_text)(struct printbuf *, struct bch_fs *, void *);
unsigned idx;
};

View File

@ -329,19 +329,21 @@ do { \
bch2_print_str(_c, __VA_ARGS__); \
} while (0)
#define bch_info(c, fmt, ...) \
bch2_print(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_info_ratelimited(c, fmt, ...) \
bch2_print_ratelimited(c, KERN_INFO bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_notice(c, fmt, ...) \
bch2_print(c, KERN_NOTICE bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_warn(c, fmt, ...) \
bch2_print(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_warn_ratelimited(c, fmt, ...) \
bch2_print_ratelimited(c, KERN_WARNING bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_log(c, loglevel, fmt, ...) \
bch2_print(c, loglevel bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_log_ratelimited(c, loglevel, fmt, ...) \
bch2_print_ratelimited(c, loglevel bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_err(c, ...) bch_log(c, KERN_ERR, __VA_ARGS__)
#define bch_err_ratelimited(c, ...) bch_log_ratelimited(c, KERN_ERR, __VA_ARGS__)
#define bch_warn(c, ...) bch_log(c, KERN_WARNING, __VA_ARGS__)
#define bch_warn_ratelimited(c, ...) bch_log_ratelimited(c, KERN_WARNING, __VA_ARGS__)
#define bch_notice(c, ...) bch_log(c, KERN_NOTICE, __VA_ARGS__)
#define bch_info(c, ...) bch_log(c, KERN_INFO, __VA_ARGS__)
#define bch_info_ratelimited(c, ...) bch_log_ratelimited(c, KERN_INFO, __VA_ARGS__)
#define bch_verbose(c, ...) bch_log(c, KERN_DEBUG, __VA_ARGS__)
#define bch_verbose_ratelimited(c, ...) bch_log_ratelimited(c, KERN_DEBUG, __VA_ARGS__)
#define bch_err(c, fmt, ...) \
bch2_print(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_err_dev(ca, fmt, ...) \
bch2_print(c, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
#define bch_err_dev_offset(ca, _offset, fmt, ...) \
@ -351,8 +353,6 @@ do { \
#define bch_err_inum_offset(c, _inum, _offset, fmt, ...) \
bch2_print(c, KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
#define bch_err_ratelimited(c, fmt, ...) \
bch2_print_ratelimited(c, KERN_ERR bch2_fmt(c, fmt), ##__VA_ARGS__)
#define bch_err_dev_ratelimited(ca, fmt, ...) \
bch2_print_ratelimited(ca, KERN_ERR bch2_fmt_dev(ca, fmt), ##__VA_ARGS__)
#define bch_err_dev_offset_ratelimited(ca, _offset, fmt, ...) \
@ -386,24 +386,6 @@ do { \
##__VA_ARGS__, bch2_err_str(_ret)); \
} while (0)
#define bch_verbose(c, fmt, ...) \
do { \
if ((c)->opts.verbose) \
bch_info(c, fmt, ##__VA_ARGS__); \
} while (0)
#define bch_verbose_ratelimited(c, fmt, ...) \
do { \
if ((c)->opts.verbose) \
bch_info_ratelimited(c, fmt, ##__VA_ARGS__); \
} while (0)
#define pr_verbose_init(opts, fmt, ...) \
do { \
if (opt_get(opts, verbose)) \
pr_info(fmt, ##__VA_ARGS__); \
} while (0)
static inline int __bch2_err_trace(struct bch_fs *c, int err)
{
trace_error_throw(c, err, _THIS_IP_);
@ -833,6 +815,8 @@ struct bch_fs {
struct bch_disk_groups_cpu __rcu *disk_groups;
struct bch_opts opts;
unsigned loglevel;
unsigned prev_loglevel;
/* Updated by bch2_sb_update():*/
struct {

View File

@ -5,6 +5,7 @@
#include "bset.h"
#include "btree_cache.h"
#include "btree_journal_iter.h"
#include "disk_accounting.h"
#include "journal_io.h"
#include <linux/sort.h>
@ -278,12 +279,23 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
if (idx < keys->size &&
journal_key_cmp(&n, &keys->data[idx]) == 0) {
struct bkey_i *o = keys->data[idx].k;
if (k->k.type == KEY_TYPE_accounting &&
o->k.type == KEY_TYPE_accounting) {
if (!keys->data[idx].allocated)
goto insert;
bch2_accounting_accumulate(bkey_i_to_accounting(k),
bkey_i_to_s_c_accounting(o));
}
if (keys->data[idx].allocated)
kfree(keys->data[idx].k);
keys->data[idx] = n;
return 0;
}
insert:
if (idx > keys->gap)
idx -= keys->size - keys->nr;

View File

@ -591,7 +591,8 @@ static noinline int bch2_trans_commit_run_gc_triggers(struct btree_trans *trans)
}
static inline int
bch2_trans_commit_write_locked(struct btree_trans *trans, unsigned flags,
bch2_trans_commit_write_locked(struct btree_trans *trans,
enum bch_trans_commit_flags flags,
struct btree_insert_entry **stopped_at,
unsigned long trace_ip)
{
@ -826,7 +827,8 @@ static int bch2_trans_commit_journal_pin_flush(struct journal *j,
/*
* Get journal reservation, take write locks, and attempt to do btree update(s):
*/
static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags,
static inline int do_bch2_trans_commit(struct btree_trans *trans,
enum bch_trans_commit_flags flags,
struct btree_insert_entry **stopped_at,
unsigned long trace_ip)
{
@ -962,16 +964,33 @@ out:
* do.
*/
static noinline int
do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans,
enum bch_trans_commit_flags flags)
{
struct bch_fs *c = trans->c;
int ret = 0;
BUG_ON(current != c->recovery_task);
trans_for_each_update(trans, i) {
int ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k);
struct bkey_i *accounting;
percpu_down_read(&c->mark_lock);
for (accounting = btree_trans_subbuf_base(trans, &trans->accounting);
accounting != btree_trans_subbuf_top(trans, &trans->accounting);
accounting = bkey_next(accounting)) {
ret = likely(!(flags & BCH_TRANS_COMMIT_skip_accounting_apply))
? bch2_accounting_mem_mod_locked(trans, bkey_i_to_s_c_accounting(accounting),
BCH_ACCOUNTING_normal, false)
: 0;
if (ret)
return ret;
goto revert_fs_usage;
}
percpu_up_read(&c->mark_lock);
trans_for_each_update(trans, i) {
ret = bch2_journal_key_insert(c, i->btree_id, i->level, i->k);
if (ret)
goto fatal_err;
}
for (struct jset_entry *i = btree_trans_journal_entries_start(trans);
@ -980,9 +999,9 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
if (i->type == BCH_JSET_ENTRY_btree_keys ||
i->type == BCH_JSET_ENTRY_write_buffer_keys) {
jset_entry_for_each_key(i, k) {
int ret = bch2_journal_key_insert(c, i->btree_id, i->level, k);
ret = bch2_journal_key_insert(c, i->btree_id, i->level, k);
if (ret)
return ret;
goto fatal_err;
}
}
@ -1000,12 +1019,24 @@ do_bch2_trans_commit_to_journal_replay(struct btree_trans *trans)
for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting);
i != btree_trans_subbuf_top(trans, &trans->accounting);
i = bkey_next(i)) {
int ret = bch2_journal_key_insert(c, BTREE_ID_accounting, 0, i);
ret = bch2_journal_key_insert(c, BTREE_ID_accounting, 0, i);
if (ret)
return ret;
goto fatal_err;
}
return 0;
fatal_err:
bch2_fs_fatal_error(c, "fatal error in transaction commit: %s", bch2_err_str(ret));
percpu_down_read(&c->mark_lock);
revert_fs_usage:
BUG();
/* error path not handled by __bch2_trans_commit() */
for (struct bkey_i *i = btree_trans_subbuf_base(trans, &trans->accounting);
i != accounting;
i = bkey_next(i))
bch2_accounting_trans_commit_revert(trans, bkey_i_to_accounting(i), flags);
percpu_up_read(&c->mark_lock);
return ret;
}
int __bch2_trans_commit(struct btree_trans *trans, enum bch_trans_commit_flags flags)
@ -1031,7 +1062,7 @@ int __bch2_trans_commit(struct btree_trans *trans, enum bch_trans_commit_flags f
if (!(flags & BCH_TRANS_COMMIT_no_check_rw) &&
unlikely(!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_trans))) {
if (unlikely(!test_bit(BCH_FS_may_go_rw, &c->flags)))
ret = do_bch2_trans_commit_to_journal_replay(trans);
ret = do_bch2_trans_commit_to_journal_replay(trans, flags);
else
ret = bch_err_throw(c, erofs_trans_commit);
goto out_reset;

View File

@ -191,19 +191,29 @@ int bch2_btree_insert_clone_trans(struct btree_trans *, enum btree_id, struct bk
int bch2_btree_write_buffer_insert_err(struct bch_fs *, enum btree_id, struct bkey_i *);
static inline int bch2_btree_write_buffer_insert_checks(struct bch_fs *c, enum btree_id btree,
struct bkey_i *k)
{
if (unlikely(!btree_type_uses_write_buffer(btree) ||
k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX)) {
int ret = bch2_btree_write_buffer_insert_err(c, btree, k);
dump_stack();
return ret;
}
return 0;
}
static inline int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
enum btree_id btree,
struct bkey_i *k)
{
kmsan_check_memory(k, bkey_bytes(&k->k));
EBUG_ON(k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
if (unlikely(!btree_type_uses_write_buffer(btree))) {
int ret = bch2_btree_write_buffer_insert_err(trans->c, btree, k);
dump_stack();
int ret = bch2_btree_write_buffer_insert_checks(trans->c, btree, k);
if (unlikely(ret))
return ret;
}
/*
* Most updates skip the btree write buffer until journal replay is
* finished because synchronization with journal replay relies on having
@ -220,7 +230,7 @@ static inline int __must_check bch2_trans_update_buffered(struct btree_trans *tr
return bch2_btree_insert_clone_trans(trans, btree, k);
struct jset_entry *e = bch2_trans_jset_entry_alloc(trans, jset_u64s(k->k.u64s));
int ret = PTR_ERR_OR_ZERO(e);
ret = PTR_ERR_OR_ZERO(e);
if (ret)
return ret;

View File

@ -330,10 +330,9 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
darray_for_each(wb->sorted, i) {
struct btree_write_buffered_key *k = &wb->flushing.keys.data[i->idx];
if (unlikely(!btree_type_uses_write_buffer(k->btree))) {
ret = bch2_btree_write_buffer_insert_err(trans->c, k->btree, &k->k);
ret = bch2_btree_write_buffer_insert_checks(c, k->btree, &k->k);
if (unlikely(ret))
goto err;
}
for (struct wb_key_ref *n = i + 1; n < min(i + 4, &darray_top(wb->sorted)); n++)
prefetch(&wb->flushing.keys.data[n->idx]);

View File

@ -89,11 +89,9 @@ static inline int bch2_journal_key_to_wb(struct bch_fs *c,
struct journal_keys_to_wb *dst,
enum btree_id btree, struct bkey_i *k)
{
if (unlikely(!btree_type_uses_write_buffer(btree))) {
int ret = bch2_btree_write_buffer_insert_err(c, btree, k);
dump_stack();
int ret = bch2_btree_write_buffer_insert_checks(c, btree, k);
if (unlikely(ret))
return ret;
}
EBUG_ON(!dst->seq);

View File

@ -675,7 +675,7 @@ void bch2_data_update_inflight_to_text(struct printbuf *out, struct data_update
if (!m->read_done) {
prt_printf(out, "read:\n");
printbuf_indent_add(out, 2);
bch2_read_bio_to_text(out, &m->rbio);
bch2_read_bio_to_text(out, m->op.c, &m->rbio);
} else {
prt_printf(out, "write:\n");
printbuf_indent_add(out, 2);

View File

@ -622,7 +622,8 @@ int bch2_gc_accounting_done(struct bch_fs *c)
if (fsck_err(c, accounting_mismatch, "%s", buf.buf)) {
percpu_up_write(&c->mark_lock);
ret = commit_do(trans, NULL, NULL, 0,
ret = commit_do(trans, NULL, NULL,
BCH_TRANS_COMMIT_skip_accounting_apply,
bch2_disk_accounting_mod(trans, &acc_k, src_v, nr, false));
percpu_down_write(&c->mark_lock);
if (ret)

View File

@ -26,7 +26,8 @@ const char *bch2_err_str(int err)
err = abs(err);
BUG_ON(err >= BCH_ERR_MAX);
if (err >= BCH_ERR_MAX)
return "(Invalid error)";
if (err >= BCH_ERR_START)
errstr = bch2_errcode_strs[err - BCH_ERR_START];

View File

@ -160,6 +160,7 @@ static noinline void promote_free(struct bch_read_bio *rbio)
BUG_ON(ret);
async_object_list_del(c, promote, op->list_idx);
async_object_list_del(c, rbio, rbio->list_idx);
bch2_data_update_exit(&op->write);
@ -356,12 +357,14 @@ nopromote:
return NULL;
}
void bch2_promote_op_to_text(struct printbuf *out, struct promote_op *op)
void bch2_promote_op_to_text(struct printbuf *out,
struct bch_fs *c,
struct promote_op *op)
{
if (!op->write.read_done) {
prt_printf(out, "parent read: %px\n", op->write.rbio.parent);
printbuf_indent_add(out, 2);
bch2_read_bio_to_text(out, op->write.rbio.parent);
bch2_read_bio_to_text(out, c, op->write.rbio.parent);
printbuf_indent_sub(out, 2);
}
@ -459,6 +462,10 @@ static void bch2_rbio_done(struct bch_read_bio *rbio)
if (rbio->start_time)
bch2_time_stats_update(&rbio->c->times[BCH_TIME_data_read],
rbio->start_time);
#ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS
if (rbio->list_idx)
async_object_list_del(rbio->c, rbio, rbio->list_idx);
#endif
bio_endio(&rbio->bio);
}
@ -1476,19 +1483,34 @@ static const char * const bch2_read_bio_flags[] = {
NULL
};
void bch2_read_bio_to_text(struct printbuf *out, struct bch_read_bio *rbio)
void bch2_read_bio_to_text(struct printbuf *out,
struct bch_fs *c,
struct bch_read_bio *rbio)
{
if (!out->nr_tabstops)
printbuf_tabstop_push(out, 20);
bch2_read_err_msg(c, out, rbio, rbio->read_pos);
prt_newline(out);
/* Are we in a retry? */
printbuf_indent_add(out, 2);
u64 now = local_clock();
prt_printf(out, "start_time:\t%llu\n", rbio->start_time ? now - rbio->start_time : 0);
prt_printf(out, "submit_time:\t%llu\n", rbio->submit_time ? now - rbio->submit_time : 0);
prt_printf(out, "start_time:\t");
bch2_pr_time_units(out, max_t(s64, 0, now - rbio->start_time));
prt_newline(out);
prt_printf(out, "submit_time:\t");
bch2_pr_time_units(out, max_t(s64, 0, now - rbio->submit_time));
prt_newline(out);
if (!rbio->split)
prt_printf(out, "end_io:\t%ps\n", rbio->end_io);
else
prt_printf(out, "parent:\t%px\n", rbio->parent);
prt_printf(out, "bi_end_io:\t%ps\n", rbio->bio.bi_end_io);
prt_printf(out, "promote:\t%u\n", rbio->promote);
prt_printf(out, "bounce:\t%u\n", rbio->bounce);
prt_printf(out, "split:\t%u\n", rbio->split);
@ -1507,6 +1529,7 @@ void bch2_read_bio_to_text(struct printbuf *out, struct bch_read_bio *rbio)
prt_newline(out);
bch2_bio_to_text(out, &rbio->bio);
printbuf_indent_sub(out, 2);
}
void bch2_fs_io_read_exit(struct bch_fs *c)

View File

@ -207,8 +207,8 @@ static inline struct bch_read_bio *rbio_init(struct bio *bio,
}
struct promote_op;
void bch2_promote_op_to_text(struct printbuf *, struct promote_op *);
void bch2_read_bio_to_text(struct printbuf *, struct bch_read_bio *);
void bch2_promote_op_to_text(struct printbuf *, struct bch_fs *, struct promote_op *);
void bch2_read_bio_to_text(struct printbuf *, struct bch_fs *, struct bch_read_bio *);
void bch2_fs_io_read_exit(struct bch_fs *);
int bch2_fs_io_read_init(struct bch_fs *);

View File

@ -1147,16 +1147,14 @@ static int bch2_set_nr_journal_buckets_iter(struct bch_dev *ca, unsigned nr,
if (ret)
break;
if (!new_fs) {
ret = bch2_trans_run(c,
bch2_trans_mark_metadata_bucket(trans, ca,
ob[nr_got]->bucket, BCH_DATA_journal,
ca->mi.bucket_size, BTREE_TRIGGER_transactional));
if (ret) {
bch2_open_bucket_put(c, ob[nr_got]);
bch_err_msg(c, ret, "marking new journal buckets");
break;
}
ret = bch2_trans_run(c,
bch2_trans_mark_metadata_bucket(trans, ca,
ob[nr_got]->bucket, BCH_DATA_journal,
ca->mi.bucket_size, BTREE_TRIGGER_transactional));
if (ret) {
bch2_open_bucket_put(c, ob[nr_got]);
bch_err_msg(c, ret, "marking new journal buckets");
break;
}
bu[nr_got] = ob[nr_got]->bucket;
@ -1226,7 +1224,7 @@ err_unblock:
mutex_unlock(&c->sb_lock);
}
if (ret && !new_fs)
if (ret)
for (i = 0; i < nr_got; i++)
bch2_trans_run(c,
bch2_trans_mark_metadata_bucket(trans, ca,

View File

@ -1780,6 +1780,7 @@ static CLOSURE_CALLBACK(journal_write_done)
closure_wake_up(&c->freelist_wait);
bch2_reset_alloc_cursors(c);
do_discards = true;
}
j->seq_ondisk = seq;

View File

@ -468,7 +468,7 @@ struct bch_io_opts *bch2_move_get_io_opts(struct btree_trans *trans,
struct bch_io_opts *opts_ret = &io_opts->fs_io_opts;
int ret = 0;
if (extent_iter->min_depth)
if (btree_iter_path(trans, extent_iter)->level)
return opts_ret;
if (extent_k.k->type == KEY_TYPE_reflink_v)
@ -672,8 +672,7 @@ retry_root:
k = bkey_i_to_s_c(&b->key);
io_opts = bch2_move_get_io_opts(trans, &snapshot_io_opts,
iter.pos, &iter, k);
io_opts = &snapshot_io_opts.fs_io_opts;
ret = PTR_ERR_OR_ZERO(io_opts);
if (ret)
goto root_err;

View File

@ -150,12 +150,12 @@ enum fsck_err_opts {
NULL, "Number of consecutive write errors allowed before kicking out a device")\
x(metadata_replicas, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_UINT(1, BCH_REPLICAS_MAX), \
OPT_UINT(1, BCH_REPLICAS_MAX + 1), \
BCH_SB_META_REPLICAS_WANT, 1, \
"#", "Number of metadata replicas") \
x(data_replicas, u8, \
OPT_FS|OPT_INODE|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_UINT(1, BCH_REPLICAS_MAX), \
OPT_UINT(1, BCH_REPLICAS_MAX + 1), \
BCH_SB_DATA_REPLICAS_WANT, 1, \
"#", "Number of data replicas") \
x(metadata_replicas_required, u8, \
@ -165,7 +165,7 @@ enum fsck_err_opts {
"#", NULL) \
x(data_replicas_required, u8, \
OPT_FS|OPT_FORMAT|OPT_MOUNT, \
OPT_UINT(1, BCH_REPLICAS_MAX), \
OPT_UINT(1, BCH_REPLICAS_MAX + 1), \
BCH_SB_DATA_REPLICAS_REQ, 1, \
"#", NULL) \
x(encoded_extent_max, u32, \
@ -529,7 +529,7 @@ enum fsck_err_opts {
"size", "Specifies the bucket size; must be greater than the btree node size")\
x(durability, u8, \
OPT_DEVICE|OPT_RUNTIME|OPT_SB_FIELD_ONE_BIAS, \
OPT_UINT(0, BCH_REPLICAS_MAX), \
OPT_UINT(0, BCH_REPLICAS_MAX + 1), \
BCH_MEMBER_DURABILITY, 1, \
"n", "Data written to this device will be considered\n"\
"to have already been replicated n times") \

View File

@ -1196,30 +1196,10 @@ int bch2_fs_initialize(struct bch_fs *c)
mutex_unlock(&c->sb_lock);
set_bit(BCH_FS_btree_running, &c->flags);
set_bit(BCH_FS_may_go_rw, &c->flags);
for (unsigned i = 0; i < BTREE_ID_NR; i++)
bch2_btree_root_alloc_fake(c, i, 0);
ret = bch2_fs_journal_alloc(c);
if (ret)
goto err;
/*
* journal_res_get() will crash if called before this has
* set up the journal.pin FIFO and journal.cur pointer:
*/
ret = bch2_fs_journal_start(&c->journal, 1, 1);
if (ret)
goto err;
ret = bch2_fs_read_write_early(c);
if (ret)
goto err;
set_bit(BCH_FS_accounting_replay_done, &c->flags);
bch2_journal_set_replay_done(&c->journal);
for_each_member_device(c, ca) {
ret = bch2_dev_usage_init(ca, false);
if (ret) {
@ -1238,6 +1218,27 @@ int bch2_fs_initialize(struct bch_fs *c)
if (ret)
goto err;
ret = bch2_fs_journal_alloc(c);
if (ret)
goto err;
/*
* journal_res_get() will crash if called before this has
* set up the journal.pin FIFO and journal.cur pointer:
*/
ret = bch2_fs_journal_start(&c->journal, 1, 1);
if (ret)
goto err;
set_bit(BCH_FS_may_go_rw, &c->flags);
ret = bch2_fs_read_write_early(c);
if (ret)
goto err;
ret = bch2_journal_replay(c);
if (ret)
goto err;
ret = bch2_fs_freespace_init(c);
if (ret)
goto err;

View File

@ -103,9 +103,32 @@ const char * const bch2_dev_write_refs[] = {
};
#undef x
static void __bch2_print_str(struct bch_fs *c, const char *prefix,
const char *str)
static bool should_print_loglevel(struct bch_fs *c, const char *fmt)
{
unsigned loglevel_opt = c->loglevel ?: c->opts.verbose ? 7: 6;
bool have_soh = fmt[0] == KERN_SOH[0];
bool have_loglevel = have_soh && fmt[1] >= '0' && fmt[1] <= '9';
unsigned loglevel = have_loglevel
? fmt[1] - '0'
: c->prev_loglevel;
if (have_loglevel)
c->prev_loglevel = loglevel;
return loglevel <= loglevel_opt;
}
void bch2_print_str(struct bch_fs *c, const char *prefix, const char *str)
{
if (!should_print_loglevel(c, prefix))
return;
#ifndef __KERNEL__
prefix = "";
#endif
#ifdef __KERNEL__
struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c);
@ -114,12 +137,7 @@ static void __bch2_print_str(struct bch_fs *c, const char *prefix,
return;
}
#endif
bch2_print_string_as_lines(KERN_ERR, str);
}
void bch2_print_str(struct bch_fs *c, const char *prefix, const char *str)
{
__bch2_print_str(c, prefix, str);
bch2_print_string_as_lines(prefix, str);
}
__printf(2, 0)
@ -149,6 +167,14 @@ void bch2_print_opts(struct bch_opts *opts, const char *fmt, ...)
void __bch2_print(struct bch_fs *c, const char *fmt, ...)
{
if (!should_print_loglevel(c, fmt))
return;
#ifndef __KERNEL__
if (fmt[0] == KERN_SOH[0])
fmt += 2;
#endif
struct stdio_redirect *stdio = bch2_fs_stdio_redirect(c);
va_list args;
@ -1066,6 +1092,8 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
if (ret)
goto err;
c->recovery_task = current;
out:
return c;
err:
@ -1208,7 +1236,6 @@ int bch2_fs_start(struct bch_fs *c)
bch2_recalc_capacity(c);
up_write(&c->state_lock);
c->recovery_task = current;
ret = BCH_SB_INITIALIZED(c->disk_sb.sb)
? bch2_fs_recovery(c)
: bch2_fs_initialize(c);
@ -1988,11 +2015,11 @@ int bch2_dev_add(struct bch_fs *c, const char *path)
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
if (test_bit(BCH_FS_started, &c->flags)) {
ret = bch2_dev_usage_init(ca, false);
if (ret)
goto err_late;
ret = bch2_dev_usage_init(ca, false);
if (ret)
goto err_late;
if (test_bit(BCH_FS_started, &c->flags)) {
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
bch_err_msg(ca, ret, "marking new superblock");
if (ret)

20
linux/printk.c Normal file
View File

@ -0,0 +1,20 @@
#include <stdarg.h>
#include <stdio.h>
static inline const char *real_fmt(const char *fmt)
{
return fmt[0] == '\001' ? fmt + 2 : fmt;
}
void vprintk(const char *fmt, va_list args)
{
vprintf(real_fmt(fmt), args);
}
void printk(const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vprintk(fmt, args);
va_end(args);
}