Update bcachefs sources to aa540d47ab bcachefs: Option improvements

This commit is contained in:
Kent Overstreet 2021-12-21 23:13:52 -05:00
parent 00f49f23b4
commit ad03173172
23 changed files with 226 additions and 149 deletions

View File

@ -1 +1 @@
d3422f9b18ea3154abe19d859f1a61c4fae9ccdc
aa540d47abe2c4ab53fcf2a6d118dfda13800b56

View File

@ -295,7 +295,7 @@ static void print_node_ondisk(struct bch_fs *c, struct btree *b)
bio_put(bio);
percpu_ref_put(&ca->io_ref);
while (offset < c->opts.btree_node_size) {
while (offset < btree_sectors(c)) {
struct bset *i;
struct nonce nonce;
struct bch_csum csum;

View File

@ -682,7 +682,7 @@ static int migrate_fs(const char *fs_path,
u64 bcachefs_inum;
ranges extents = reserve_new_fs_space(file_path,
fs_opts.block_size << 9,
fs_opts.block_size >> 9,
get_size(dev.path, dev.fd) / 5,
&bcachefs_inum, stat.st_dev, force);

View File

@ -102,12 +102,14 @@ void bch2_pick_bucket_size(struct bch_opts opts, struct dev_opts *dev)
dev->nbuckets = dev->size / dev->bucket_size;
if (dev->bucket_size < opts.block_size)
die("Bucket size cannot be smaller than block size");
if (dev->bucket_size << 9 < opts.block_size)
die("Bucket size (%u) cannot be smaller than block size (%u)",
dev->bucket_size << 9, opts.block_size);
if (opt_defined(opts, btree_node_size) &&
dev->bucket_size < opts.btree_node_size)
die("Bucket size cannot be smaller than btree node size");
dev->bucket_size << 9 < opts.btree_node_size)
die("Bucket size (%u) cannot be smaller than btree node size (%u)",
dev->bucket_size << 9, opts.btree_node_size);
if (dev->nbuckets < BCH_MIN_NR_NBUCKETS)
die("Not enough buckets: %llu, need %u (bucket size %u)",
@ -167,20 +169,14 @@ struct bch_sb *bch2_format(struct bch_opt_strs fs_opt_strs,
/* calculate btree node size: */
if (!opt_defined(fs_opts, btree_node_size)) {
/* 256k default btree node size */
opt_set(fs_opts, btree_node_size, 512);
opt_set(fs_opts, btree_node_size, 256 << 10);
for (i = devs; i < devs + nr_devs; i++)
fs_opts.btree_node_size =
min_t(unsigned, fs_opts.btree_node_size,
i->bucket_size);
i->bucket_size << 9);
}
if (!is_power_of_2(fs_opts.block_size))
die("block size must be power of 2");
if (!is_power_of_2(fs_opts.btree_node_size))
die("btree node size must be power of 2");
if (uuid_is_null(opts.uuid.b))
uuid_generate(opts.uuid.b);
@ -190,7 +186,6 @@ struct bch_sb *bch2_format(struct bch_opt_strs fs_opt_strs,
sb.sb->version = le16_to_cpu(opts.version);
sb.sb->version_min = le16_to_cpu(opts.version);
sb.sb->magic = BCACHE_MAGIC;
sb.sb->block_size = cpu_to_le16(fs_opts.block_size);
sb.sb->user_uuid = opts.uuid;
sb.sb->nr_devices = nr_devs;
@ -207,17 +202,13 @@ struct bch_sb *bch2_format(struct bch_opt_strs fs_opt_strs,
for (opt_id = 0;
opt_id < bch2_opts_nr;
opt_id++) {
const struct bch_option *opt = &bch2_opt_table[opt_id];
u64 v;
if (opt->set_sb == SET_NO_SB_OPT)
continue;
v = bch2_opt_defined_by_id(&fs_opts, opt_id)
? bch2_opt_get_by_id(&fs_opts, opt_id)
: bch2_opt_get_by_id(&bch2_opts_default, opt_id);
opt->set_sb(sb.sb, v);
__bch2_opt_set_sb(sb.sb, &bch2_opt_table[opt_id], v);
}
SET_BCH_SB_ENCODED_EXTENT_MAX_BITS(sb.sb,
@ -1066,7 +1057,7 @@ struct bch_opt_strs bch2_cmdline_opts_get(int *argc, char *argv[],
optid = bch2_opt_lookup(optstr);
if (optid < 0 ||
!(bch2_opt_table[optid].mode & opt_types)) {
!(bch2_opt_table[optid].flags & opt_types)) {
i++;
goto next;
}
@ -1106,7 +1097,8 @@ struct bch_opts bch2_parse_opts(struct bch_opt_strs strs)
bch2_opt_table[i].type == BCH_OPT_FN)
continue;
ret = bch2_opt_parse(NULL, &bch2_opt_table[i],
ret = bch2_opt_parse(NULL, "option",
&bch2_opt_table[i],
strs.by_id[i], &v);
if (ret < 0)
die("Invalid %s: %s",
@ -1134,7 +1126,7 @@ void bch2_opts_usage(unsigned opt_types)
for (opt = bch2_opt_table;
opt < bch2_opt_table + bch2_opts_nr;
opt++) {
if (!(opt->mode & opt_types))
if (!(opt->flags & opt_types))
continue;
c += printf(" --%s", opt->attr.name);

View File

@ -928,10 +928,20 @@ static inline unsigned bucket_bytes(const struct bch_dev *ca)
static inline unsigned block_bytes(const struct bch_fs *c)
{
return c->opts.block_size << 9;
return c->opts.block_size;
}
static inline struct timespec64 bch2_time_to_timespec(struct bch_fs *c, s64 time)
static inline unsigned block_sectors(const struct bch_fs *c)
{
return c->opts.block_size >> 9;
}
static inline size_t btree_sectors(const struct bch_fs *c)
{
return c->opts.btree_node_size >> 9;
}
static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
{
struct timespec64 t;
s32 rem;
@ -943,13 +953,13 @@ static inline struct timespec64 bch2_time_to_timespec(struct bch_fs *c, s64 time
return t;
}
static inline s64 timespec_to_bch2_time(struct bch_fs *c, struct timespec64 ts)
static inline s64 timespec_to_bch2_time(const struct bch_fs *c, struct timespec64 ts)
{
return (ts.tv_sec * c->sb.time_units_per_sec +
(int) ts.tv_nsec / c->sb.nsec_per_time_unit) - c->sb.time_base_lo;
}
static inline s64 bch2_current_time(struct bch_fs *c)
static inline s64 bch2_current_time(const struct bch_fs *c)
{
struct timespec64 now;

View File

@ -71,7 +71,7 @@ static inline bool btree_node_hashed(struct btree *b)
static inline size_t btree_bytes(struct bch_fs *c)
{
return c->opts.btree_node_size << 9;
return c->opts.btree_node_size;
}
static inline size_t btree_max_u64s(struct bch_fs *c)
@ -86,7 +86,7 @@ static inline size_t btree_pages(struct bch_fs *c)
static inline unsigned btree_blocks(struct bch_fs *c)
{
return c->opts.btree_node_size >> c->block_bits;
return btree_sectors(c) >> c->block_bits;
}
#define BTREE_SPLIT_THRESHOLD(c) (btree_max_u64s(c) * 2 / 3)

View File

@ -687,7 +687,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
BTREE_ERR_FATAL, c, ca, b, i,
"BSET_SEPARATE_WHITEOUTS no longer supported");
if (btree_err_on(offset + sectors > c->opts.btree_node_size,
if (btree_err_on(offset + sectors > btree_sectors(c),
BTREE_ERR_FIXABLE, c, ca, b, i,
"bset past end of btree node")) {
i->u64s = 0;
@ -901,7 +901,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
b->data->keys.seq, bp->seq);
}
while (b->written < (ptr_written ?: c->opts.btree_node_size)) {
while (b->written < (ptr_written ?: btree_sectors(c))) {
unsigned sectors, whiteout_u64s = 0;
struct nonce nonce;
struct bch_csum csum;
@ -1210,7 +1210,7 @@ static unsigned btree_node_sectors_written(struct bch_fs *c, void *data)
if (le64_to_cpu(bn->magic) != bset_magic(c))
return 0;
while (offset < c->opts.btree_node_size) {
while (offset < btree_sectors(c)) {
if (!offset) {
offset += vstruct_sectors(bn, c->block_bits);
} else {
@ -1232,7 +1232,7 @@ static bool btree_node_has_extra_bsets(struct bch_fs *c, unsigned offset, void *
if (!offset)
return false;
while (offset < c->opts.btree_node_size) {
while (offset < btree_sectors(c)) {
bne = data + (offset << 9);
if (bne->keys.seq == bn->keys.seq)
return true;
@ -1302,7 +1302,7 @@ fsck_err:
if (ra->err[i])
continue;
while (offset < c->opts.btree_node_size) {
while (offset < btree_sectors(c)) {
if (!offset) {
sectors = vstruct_sectors(bn, c->block_bits);
} else {
@ -1319,7 +1319,7 @@ fsck_err:
offset += sectors;
}
while (offset < c->opts.btree_node_size) {
while (offset < btree_sectors(c)) {
bne = ra->buf[i] + (offset << 9);
if (bne->keys.seq == bn->keys.seq) {
if (!gap)
@ -1797,8 +1797,8 @@ do_write:
BUG_ON(btree_node_fake(b));
BUG_ON((b->will_make_reachable != 0) != !b->written);
BUG_ON(b->written >= c->opts.btree_node_size);
BUG_ON(b->written & (c->opts.block_size - 1));
BUG_ON(b->written >= btree_sectors(c));
BUG_ON(b->written & (block_sectors(c) - 1));
BUG_ON(bset_written(b, btree_bset_last(b)));
BUG_ON(le64_to_cpu(b->data->magic) != bset_magic(c));
BUG_ON(memcmp(&b->data->format, &b->format, sizeof(b->format)));
@ -1871,7 +1871,7 @@ do_write:
memset(data + bytes_to_write, 0,
(sectors_to_write << 9) - bytes_to_write);
BUG_ON(b->written + sectors_to_write > c->opts.btree_node_size);
BUG_ON(b->written + sectors_to_write > btree_sectors(c));
BUG_ON(BSET_BIG_ENDIAN(i) != CPU_BIG_ENDIAN);
BUG_ON(i->seq != b->data->keys.seq);

View File

@ -223,12 +223,12 @@ retry:
if (IS_ERR(wp))
return ERR_CAST(wp);
if (wp->sectors_free < c->opts.btree_node_size) {
if (wp->sectors_free < btree_sectors(c)) {
struct open_bucket *ob;
unsigned i;
open_bucket_for_each(c, &wp->ptrs, ob, i)
if (ob->sectors_free < c->opts.btree_node_size)
if (ob->sectors_free < btree_sectors(c))
ob->sectors_free = 0;
bch2_alloc_sectors_done(c, wp);
@ -236,7 +236,7 @@ retry:
}
bkey_btree_ptr_v2_init(&tmp.k);
bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, c->opts.btree_node_size);
bch2_alloc_sectors_append_ptrs(c, wp, &tmp.k, btree_sectors(c));
bch2_open_bucket_get(c, wp, &ob);
bch2_alloc_sectors_done(c, wp);
@ -1029,7 +1029,7 @@ retry:
}
ret = bch2_disk_reservation_get(c, &as->disk_res,
nr_nodes * c->opts.btree_node_size,
nr_nodes * btree_sectors(c),
c->opts.metadata_replicas,
disk_res_flags);
if (ret)

View File

@ -218,7 +218,7 @@ static inline ssize_t __bch_btree_u64s_remaining(struct bch_fs *c,
{
ssize_t used = bset_byte_offset(b, end) / sizeof(u64) +
b->whiteout_u64s;
ssize_t total = c->opts.btree_node_size << 6;
ssize_t total = c->opts.btree_node_size >> 3;
/* Always leave one extra u64 for bch2_varint_decode: */
used++;

View File

@ -996,7 +996,7 @@ static int bch2_mark_extent(struct btree_trans *trans,
? BCH_DATA_btree
: BCH_DATA_user;
s64 sectors = bkey_is_btree_ptr(k.k)
? c->opts.btree_node_size
? btree_sectors(c)
: k.k->size;
s64 dirty_sectors = 0;
bool stale;
@ -1604,7 +1604,7 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
? BCH_DATA_btree
: BCH_DATA_user;
s64 sectors = bkey_is_btree_ptr(k.k)
? c->opts.btree_node_size
? btree_sectors(c)
: k.k->size;
s64 dirty_sectors = 0;
bool stale;
@ -2179,7 +2179,7 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
alloc_heap alloc_heap;
size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
ca->mi.bucket_size / c->opts.btree_node_size);
ca->mi.bucket_size / btree_sectors(c));
/* XXX: these should be tunable */
size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 6);

View File

@ -376,7 +376,7 @@ static unsigned __bio_compress(struct bch_fs *c,
BUG_ON(!mempool_initialized(&c->compress_workspace[compression_type]));
/* If it's only one block, don't bother trying to compress: */
if (bio_sectors(src) <= c->opts.block_size)
if (src->bi_iter.bi_size <= c->opts.block_size)
return 0;
dst_data = bio_map_or_bounce(c, dst, WRITE);

View File

@ -373,7 +373,9 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
bch2_trans_init(&trans, i->c, 0, 0);
bch2_trans_iter_init(&trans, &iter, i->id, i->from, BTREE_ITER_PREFETCH);
bch2_trans_iter_init(&trans, &iter, i->id, i->from,
BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS);
while ((k = bch2_btree_iter_peek(&iter)).k &&
!(err = bkey_err(k))) {

View File

@ -1038,7 +1038,7 @@ const char *bch2_bkey_ptrs_invalid(const struct bch_fs *c, struct bkey_s_c k)
if (k.k->type == KEY_TYPE_btree_ptr ||
k.k->type == KEY_TYPE_btree_ptr_v2)
size_ondisk = c->opts.btree_node_size;
size_ondisk = btree_sectors(c);
bkey_extent_entry_for_each(ptrs, entry) {
if (__extent_entry_type(entry) >= BCH_EXTENT_ENTRY_MAX)

View File

@ -866,8 +866,8 @@ static int bch2_fill_extent(struct bch_fs *c,
else
offset += p.crc.offset;
if ((offset & (c->opts.block_size - 1)) ||
(k.k->size & (c->opts.block_size - 1)))
if ((offset & (block_sectors(c) - 1)) ||
(k.k->size & (block_sectors(c) - 1)))
flags2 |= FIEMAP_EXTENT_NOT_ALIGNED;
ret = fiemap_fill_next_extent(info,
@ -1681,7 +1681,7 @@ static int bch2_show_options(struct seq_file *seq, struct dentry *root)
const struct bch_option *opt = &bch2_opt_table[i];
u64 v = bch2_opt_get_by_id(&c->opts, i);
if (!(opt->mode & OPT_MOUNT))
if (!(opt->flags & OPT_MOUNT))
continue;
if (v == bch2_opt_get_by_id(&bch2_opts_default, i))

View File

@ -1289,7 +1289,7 @@ void bch2_write(struct closure *cl)
bch2_keylist_init(&op->insert_keys, op->inline_keys);
wbio_init(bio)->put_bio = false;
if (bio_sectors(bio) & (c->opts.block_size - 1)) {
if (bio->bi_iter.bi_size & (c->opts.block_size - 1)) {
bch_err_inum_ratelimited(c, op->pos.inode,
"misaligned write");
op->error = -EIO;
@ -2365,7 +2365,7 @@ int bch2_fs_io_init(struct bch_fs *c)
BIOSET_NEED_BVECS) ||
mempool_init_page_pool(&c->bio_bounce_pages,
max_t(unsigned,
c->opts.btree_node_size,
btree_sectors(c),
c->sb.encoded_extent_max) /
PAGE_SECTORS, 0) ||
rhashtable_init(&c->promote_table, &bch_promote_params))

View File

@ -710,7 +710,7 @@ reread:
case JOURNAL_ENTRY_NONE:
if (!saw_bad)
return 0;
sectors = c->opts.block_size;
sectors = block_sectors(c);
goto next_block;
case JOURNAL_ENTRY_BAD:
saw_bad = true;
@ -719,7 +719,7 @@ reread:
* field of the journal entry we read, so try reading
* again at next block boundary:
*/
sectors = c->opts.block_size;
sectors = block_sectors(c);
break;
default:
return ret;

View File

@ -141,41 +141,27 @@ void bch2_opt_set_by_id(struct bch_opts *opts, enum bch_opt_id id, u64 v)
}
}
/*
* Initial options from superblock - here we don't want any options undefined,
* any options the superblock doesn't specify are set to 0:
*/
struct bch_opts bch2_opts_from_sb(struct bch_sb *sb)
{
struct bch_opts opts = bch2_opts_empty();
#define x(_name, _bits, _mode, _type, _sb_opt, ...) \
if (_sb_opt != NO_SB_OPT) \
opt_set(opts, _name, _sb_opt(sb));
BCH_OPTS()
#undef x
return opts;
}
const struct bch_option bch2_opt_table[] = {
#define OPT_BOOL() .type = BCH_OPT_BOOL
#define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, .min = _min, .max = _max
#define OPT_SECTORS(_min, _max) .type = BCH_OPT_SECTORS, .min = _min, .max = _max
#define OPT_STR(_choices) .type = BCH_OPT_STR, .choices = _choices
#define OPT_BOOL() .type = BCH_OPT_BOOL, .min = 0, .max = 2
#define OPT_UINT(_min, _max) .type = BCH_OPT_UINT, \
.min = _min, .max = _max
#define OPT_STR(_choices) .type = BCH_OPT_STR, \
.min = 0, .max = ARRAY_SIZE(_choices),\
.choices = _choices
#define OPT_FN(_fn) .type = BCH_OPT_FN, \
.parse = _fn##_parse, \
.to_text = _fn##_to_text
#define x(_name, _bits, _mode, _type, _sb_opt, _default, _hint, _help) \
#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
[Opt_##_name] = { \
.attr = { \
.name = #_name, \
.mode = (_mode) & OPT_RUNTIME ? 0644 : 0444, \
.mode = (_flags) & OPT_RUNTIME ? 0644 : 0444, \
}, \
.mode = _mode, \
.flags = _flags, \
.hint = _hint, \
.help = _help, \
.get_sb = _sb_opt, \
.set_sb = SET_##_sb_opt, \
_type \
},
@ -218,7 +204,41 @@ static int bch2_mount_opt_lookup(const char *name)
return bch2_opt_lookup(name);
}
int bch2_opt_parse(struct bch_fs *c, const struct bch_option *opt,
static int bch2_opt_validate(const struct bch_option *opt, const char *msg, u64 v)
{
if (v < opt->min) {
if (msg)
pr_err("invalid %s%s: too small (min %llu)",
msg, opt->attr.name, opt->min);
return -ERANGE;
}
if (opt->max && v >= opt->max) {
if (msg)
pr_err("invalid %s%s: too big (max %llu)",
msg, opt->attr.name, opt->max);
return -ERANGE;
}
if ((opt->flags & OPT_SB_FIELD_SECTORS) && (v & 511)) {
if (msg)
pr_err("invalid %s %s: not a multiple of 512",
msg, opt->attr.name);
return -EINVAL;
}
if ((opt->flags & OPT_MUST_BE_POW_2) && !is_power_of_2(v)) {
if (msg)
pr_err("invalid %s%s: must be a power of two",
msg, opt->attr.name);
return -EINVAL;
}
return 0;
}
int bch2_opt_parse(struct bch_fs *c, const char *msg,
const struct bch_option *opt,
const char *val, u64 *res)
{
ssize_t ret;
@ -228,30 +248,13 @@ int bch2_opt_parse(struct bch_fs *c, const struct bch_option *opt,
ret = kstrtou64(val, 10, res);
if (ret < 0)
return ret;
if (*res > 1)
return -ERANGE;
break;
case BCH_OPT_UINT:
ret = kstrtou64(val, 10, res);
ret = opt->flags & OPT_HUMAN_READABLE
? bch2_strtou64_h(val, res)
: kstrtou64(val, 10, res);
if (ret < 0)
return ret;
if (*res < opt->min || *res >= opt->max)
return -ERANGE;
break;
case BCH_OPT_SECTORS:
ret = bch2_strtou64_h(val, res);
if (ret < 0)
return ret;
if (*res & 511)
return -EINVAL;
*res >>= 9;
if (*res < opt->min || *res >= opt->max)
return -ERANGE;
break;
case BCH_OPT_STR:
ret = match_string(opt->choices, -1, val);
@ -264,10 +267,12 @@ int bch2_opt_parse(struct bch_fs *c, const struct bch_option *opt,
if (!c)
return 0;
return opt->parse(c, val, res);
ret = opt->parse(c, val, res);
if (ret < 0)
return ret;
}
return 0;
return bch2_opt_validate(opt, msg, *res);
}
void bch2_opt_to_text(struct printbuf *out, struct bch_fs *c,
@ -288,10 +293,10 @@ void bch2_opt_to_text(struct printbuf *out, struct bch_fs *c,
switch (opt->type) {
case BCH_OPT_BOOL:
case BCH_OPT_UINT:
pr_buf(out, "%lli", v);
break;
case BCH_OPT_SECTORS:
bch2_hprint(out, v << 9);
if (opt->flags & OPT_HUMAN_READABLE)
bch2_hprint(out, v);
else
pr_buf(out, "%lli", v);
break;
case BCH_OPT_STR:
if (flags & OPT_SHOW_FULL_LIST)
@ -365,7 +370,8 @@ int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
if (id < 0)
goto bad_opt;
ret = bch2_opt_parse(c, &bch2_opt_table[id], val, &v);
ret = bch2_opt_parse(c, "mount option ",
&bch2_opt_table[id], val, &v);
if (ret < 0)
goto bad_val;
} else {
@ -385,7 +391,7 @@ int bch2_parse_mount_opts(struct bch_fs *c, struct bch_opts *opts,
goto no_val;
}
if (!(bch2_opt_table[id].mode & OPT_MOUNT))
if (!(bch2_opt_table[id].flags & OPT_MOUNT))
goto bad_opt;
if (id == Opt_acl &&
@ -420,6 +426,65 @@ out:
return ret;
}
/*
* Initial options from superblock - here we don't want any options undefined,
* any options the superblock doesn't specify are set to 0:
*/
int bch2_opts_from_sb(struct bch_opts *opts, struct bch_sb *sb)
{
unsigned id;
int ret;
for (id = 0; id < bch2_opts_nr; id++) {
const struct bch_option *opt = bch2_opt_table + id;
u64 v;
if (opt->get_sb == NO_SB_OPT)
continue;
v = opt->get_sb(sb);
if (opt->flags & OPT_SB_FIELD_ILOG2)
v = 1ULL << v;
if (opt->flags & OPT_SB_FIELD_SECTORS)
v <<= 9;
ret = bch2_opt_validate(opt, "superblock option ", v);
if (ret)
return ret;
bch2_opt_set_by_id(opts, id, v);
}
return 0;
}
void __bch2_opt_set_sb(struct bch_sb *sb, const struct bch_option *opt, u64 v)
{
if (opt->set_sb == SET_NO_SB_OPT)
return;
if (opt->flags & OPT_SB_FIELD_SECTORS)
v >>= 9;
if (opt->flags & OPT_SB_FIELD_ILOG2)
v = ilog2(v);
opt->set_sb(sb, v);
}
void bch2_opt_set_sb(struct bch_fs *c, const struct bch_option *opt, u64 v)
{
if (opt->set_sb == SET_NO_SB_OPT)
return;
mutex_lock(&c->sb_lock);
__bch2_opt_set_sb(c->disk_sb.sb, opt, v);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
/* io opts: */
struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts src)

View File

@ -44,19 +44,22 @@ static inline const char *bch2_d_type_str(unsigned d_type)
LE64_BITMASK(NO_SB_OPT, struct bch_sb, flags[0], 0, 0);
/* When can be set: */
enum opt_mode {
enum opt_flags {
OPT_FS = (1 << 0), /* Filesystem option */
OPT_DEVICE = (1 << 1), /* Device option */
OPT_INODE = (1 << 2), /* Inode option */
OPT_FORMAT = (1 << 3), /* May be specified at format time */
OPT_MOUNT = (1 << 4), /* May be specified at mount time */
OPT_RUNTIME = (1 << 5), /* May be specified at runtime */
OPT_HUMAN_READABLE = (1 << 6),
OPT_MUST_BE_POW_2 = (1 << 7), /* Must be power of 2 */
OPT_SB_FIELD_SECTORS = (1 << 8),/* Superblock field is >> 9 of actual value */
OPT_SB_FIELD_ILOG2 = (1 << 9), /* Superblock field is ilog2 of actual value */
};
enum opt_type {
BCH_OPT_BOOL,
BCH_OPT_UINT,
BCH_OPT_SECTORS,
BCH_OPT_STR,
BCH_OPT_FN,
};
@ -88,13 +91,15 @@ enum opt_type {
#define BCH_OPTS() \
x(block_size, u16, \
OPT_FS|OPT_FORMAT, \
OPT_SECTORS(1, 128), \
OPT_FS|OPT_FORMAT| \
OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
OPT_UINT(512, 1U << 16), \
BCH_SB_BLOCK_SIZE, 8, \
"size", NULL) \
x(btree_node_size, u16, \
OPT_FS|OPT_FORMAT, \
OPT_SECTORS(1, 512), \
x(btree_node_size, u32, \
OPT_FS|OPT_FORMAT| \
OPT_HUMAN_READABLE|OPT_MUST_BE_POW_2|OPT_SB_FIELD_SECTORS, \
OPT_UINT(512, 1U << 20), \
BCH_SB_BTREE_NODE_SIZE, 512, \
"size", "Btree node size, default 256k") \
x(errors, u8, \
@ -198,8 +203,9 @@ enum opt_type {
BCH_SB_GC_RESERVE, 8, \
"%", "Percentage of disk space to reserve for copygc")\
x(gc_reserve_bytes, u64, \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_SECTORS(0, U64_MAX), \
OPT_FS|OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME| \
OPT_HUMAN_READABLE|OPT_SB_FIELD_SECTORS, \
OPT_UINT(0, U64_MAX), \
BCH_SB_GC_RESERVE_BYTES, 0, \
"%", "Amount of disk space to reserve for copygc\n" \
"Takes precedence over gc_reserve_percent if set")\
@ -354,12 +360,12 @@ enum opt_type {
NULL, NULL) \
x(fs_size, u64, \
OPT_DEVICE, \
OPT_SECTORS(0, S64_MAX), \
OPT_UINT(0, S64_MAX), \
NO_SB_OPT, 0, \
"size", "Size of filesystem on device") \
x(bucket, u32, \
OPT_DEVICE, \
OPT_SECTORS(0, S64_MAX), \
OPT_UINT(0, S64_MAX), \
NO_SB_OPT, 0, \
"size", "Size of filesystem on device") \
x(durability, u8, \
@ -418,13 +424,14 @@ struct printbuf;
struct bch_option {
struct attribute attr;
u64 (*get_sb)(const struct bch_sb *);
void (*set_sb)(struct bch_sb *, u64);
enum opt_mode mode;
enum opt_type type;
enum opt_flags flags;
u64 min, max;
union {
struct {
u64 min, max;
};
struct {
const char * const *choices;
@ -446,10 +453,13 @@ bool bch2_opt_defined_by_id(const struct bch_opts *, enum bch_opt_id);
u64 bch2_opt_get_by_id(const struct bch_opts *, enum bch_opt_id);
void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64);
struct bch_opts bch2_opts_from_sb(struct bch_sb *);
int bch2_opts_from_sb(struct bch_opts *, struct bch_sb *);
void __bch2_opt_set_sb(struct bch_sb *, const struct bch_option *, u64);
void bch2_opt_set_sb(struct bch_fs *, const struct bch_option *, u64);
int bch2_opt_lookup(const char *);
int bch2_opt_parse(struct bch_fs *, const struct bch_option *, const char *, u64 *);
int bch2_opt_parse(struct bch_fs *, const char *, const struct bch_option *,
const char *, u64 *);
#define OPT_SHOW_FULL_LIST (1 << 0)
#define OPT_SHOW_MOUNT_STYLE (1 << 1)

View File

@ -261,8 +261,7 @@ const char *bch2_sb_validate(struct bch_sb_handle *disk_sb)
block_size = le16_to_cpu(sb->block_size);
if (!is_power_of_2(block_size) ||
block_size > PAGE_SECTORS)
if (block_size > PAGE_SECTORS)
return "Bad block size";
if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le)))
@ -304,9 +303,6 @@ const char *bch2_sb_validate(struct bch_sb_handle *disk_sb)
if (!BCH_SB_BTREE_NODE_SIZE(sb))
return "Btree node size not set";
if (!is_power_of_2(BCH_SB_BTREE_NODE_SIZE(sb)))
return "Btree node size not a power of two";
if (BCH_SB_GC_RESERVE(sb) < 5)
return "gc reserve percentage too small";
@ -621,8 +617,12 @@ got_super:
err = "Superblock block size smaller than device block size";
ret = -EINVAL;
if (le16_to_cpu(sb->sb->block_size) << 9 <
bdev_logical_block_size(sb->bdev))
goto err;
bdev_logical_block_size(sb->bdev)) {
pr_err("error reading superblock: Superblock block size (%u) smaller than device block size (%u)",
le16_to_cpu(sb->sb->block_size) << 9,
bdev_logical_block_size(sb->bdev));
goto err_no_print;
}
ret = 0;
sb->have_layout = true;
@ -630,8 +630,9 @@ out:
pr_verbose_init(*opts, "ret %i", ret);
return ret;
err:
bch2_free_super(sb);
pr_err("error reading superblock: %s", err);
err_no_print:
bch2_free_super(sb);
goto out;
}

View File

@ -754,10 +754,13 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
c->opts = bch2_opts_default;
bch2_opts_apply(&c->opts, bch2_opts_from_sb(sb));
ret = bch2_opts_from_sb(&c->opts, sb);
if (ret)
goto err;
bch2_opts_apply(&c->opts, opts);
c->block_bits = ilog2(c->opts.block_size);
c->block_bits = ilog2(block_sectors(c));
c->btree_foreground_merge_threshold = BTREE_FOREGROUND_MERGE_THRESHOLD(c);
if (bch2_fs_init_fault("fs_alloc")) {
@ -869,7 +872,7 @@ static void print_mount_opts(struct bch_fs *c)
const struct bch_option *opt = &bch2_opt_table[i];
u64 v = bch2_opt_get_by_id(&c->opts, i);
if (!(opt->mode & OPT_MOUNT))
if (!(opt->flags & OPT_MOUNT))
continue;
if (v == bch2_opt_get_by_id(&bch2_opts_default, i))
@ -995,7 +998,7 @@ static const char *bch2_dev_may_add(struct bch_sb *sb, struct bch_fs *c)
if (!sb_mi)
return "Invalid superblock: member info area missing";
if (le16_to_cpu(sb->block_size) != c->opts.block_size)
if (le16_to_cpu(sb->block_size) != block_sectors(c))
return "mismatched block size";
if (le16_to_cpu(sb_mi->members[sb->dev_idx].bucket_size) <

View File

@ -626,7 +626,7 @@ STORE(bch2_fs_opts_dir)
if (!tmp)
return -ENOMEM;
ret = bch2_opt_parse(c, opt, strim(tmp), &v);
ret = bch2_opt_parse(c, NULL, opt, strim(tmp), &v);
kfree(tmp);
if (ret < 0)
@ -636,13 +636,7 @@ STORE(bch2_fs_opts_dir)
if (ret < 0)
return ret;
if (opt->set_sb != SET_NO_SB_OPT) {
mutex_lock(&c->sb_lock);
opt->set_sb(c->disk_sb.sb, v);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}
bch2_opt_set_sb(c, opt, v);
bch2_opt_set_by_id(&c->opts, id, v);
if ((id == Opt_background_target ||
@ -665,7 +659,7 @@ int bch2_opts_create_sysfs_files(struct kobject *kobj)
for (i = bch2_opt_table;
i < bch2_opt_table + bch2_opts_nr;
i++) {
if (!(i->mode & OPT_FS))
if (!(i->flags & OPT_FS))
continue;
ret = sysfs_create_file(kobj, &i->attr);

View File

@ -525,7 +525,7 @@ static int bch2_xattr_bcachefs_set(const struct xattr_handler *handler,
memcpy(buf, value, size);
buf[size] = '\0';
ret = bch2_opt_parse(c, opt, buf, &v);
ret = bch2_opt_parse(c, NULL, opt, buf, &v);
kfree(buf);
if (ret < 0)

View File

@ -248,11 +248,11 @@ unsigned get_blocksize(const char *path, int fd)
struct stat statbuf = xfstat(fd);
if (!S_ISBLK(statbuf.st_mode))
return statbuf.st_blksize >> 9;
return statbuf.st_blksize;
unsigned ret;
xioctl(fd, BLKPBSZGET, &ret);
return ret >> 9;
return ret;
}
/* Open a block device, do magic blkid stuff to probe for existing filesystems: */