Update bcachefs sources to 0b8c5d0fb7 bcachefs: refactor superblock validation

This commit is contained in:
Kent Overstreet 2017-12-29 19:14:53 -05:00
parent aca9f96dcc
commit 88242ec7a3
9 changed files with 328 additions and 249 deletions

View File

@ -1 +1 @@
f4b290345a983c534879e603fa5bf4d7465c9e2e
0b8c5d0fb7b5de6fb99030565cd2d0411da37f2b

View File

@ -737,22 +737,35 @@ struct bch_alloc {
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(alloc, BCH_ALLOC);
/* Superblock */
/* Optional/variable size superblock sections: */
/*
* Version 8: BCH_SB_ENCODED_EXTENT_MAX_BITS
* BCH_MEMBER_DATA_ALLOWED
* Version 9: incompatible extent nonce change
*/
struct bch_sb_field {
__u64 _data[0];
__le32 u64s;
__le32 type;
};
#define BCH_SB_VERSION_MIN 7
#define BCH_SB_VERSION_EXTENT_MAX 8
#define BCH_SB_VERSION_EXTENT_NONCE_V1 9
#define BCH_SB_VERSION_MAX 9
#define BCH_SB_FIELDS() \
x(journal, 0) \
x(members, 1) \
x(crypt, 2) \
x(replicas, 3)
#define BCH_SB_SECTOR 8
#define BCH_SB_LABEL_SIZE 32
#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
enum bch_sb_field_type {
#define x(f, nr) BCH_SB_FIELD_##f = nr,
BCH_SB_FIELDS()
#undef x
BCH_SB_FIELD_NR
};
/* BCH_SB_FIELD_journal: */
struct bch_sb_field_journal {
struct bch_sb_field field;
__le64 buckets[0];
};
/* BCH_SB_FIELD_members: */
struct bch_member {
uuid_le uuid;
@ -794,42 +807,12 @@ enum cache_replacement {
CACHE_REPLACEMENT_NR = 3,
};
struct bch_sb_layout {
uuid_le magic; /* bcachefs superblock UUID */
__u8 layout_type;
__u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
__u8 nr_superblocks;
__u8 pad[5];
__le64 sb_offset[61];
} __attribute__((packed, aligned(8)));
#define BCH_SB_LAYOUT_SECTOR 7
struct bch_sb_field {
__u64 _data[0];
__le32 u64s;
__le32 type;
};
enum bch_sb_field_type {
BCH_SB_FIELD_journal = 0,
BCH_SB_FIELD_members = 1,
BCH_SB_FIELD_crypt = 2,
BCH_SB_FIELD_replicas = 3,
BCH_SB_FIELD_NR = 4,
};
struct bch_sb_field_journal {
struct bch_sb_field field;
__le64 buckets[0];
};
struct bch_sb_field_members {
struct bch_sb_field field;
struct bch_member members[0];
};
/* Crypto: */
/* BCH_SB_FIELD_crypt: */
struct nonce {
__le32 d[4];
@ -877,6 +860,8 @@ LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
/* BCH_SB_FIELD_replicas: */
enum bch_data_type {
BCH_DATA_NONE = 0,
BCH_DATA_SB = 1,
@ -898,6 +883,34 @@ struct bch_sb_field_replicas {
struct bch_replicas_entry entries[0];
};
/* Superblock: */
/*
* Version 8: BCH_SB_ENCODED_EXTENT_MAX_BITS
* BCH_MEMBER_DATA_ALLOWED
* Version 9: incompatible extent nonce change
*/
#define BCH_SB_VERSION_MIN 7
#define BCH_SB_VERSION_EXTENT_MAX 8
#define BCH_SB_VERSION_EXTENT_NONCE_V1 9
#define BCH_SB_VERSION_MAX 9
#define BCH_SB_SECTOR 8
#define BCH_SB_LABEL_SIZE 32
#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
struct bch_sb_layout {
uuid_le magic; /* bcachefs superblock UUID */
__u8 layout_type;
__u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
__u8 nr_superblocks;
__u8 pad[5];
__le64 sb_offset[61];
} __attribute__((packed, aligned(8)));
#define BCH_SB_LAYOUT_SECTOR 7
/*
* @offset - sector where this sb was written
* @version - on disk format version

View File

@ -1636,8 +1636,6 @@ static int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
bch2_open_bucket_put(c, ob);
}
BUG_ON(bch2_sb_validate_journal(ca->disk_sb.sb, ca->mi));
bch2_write_super(c);
ret = 0;
@ -1882,6 +1880,9 @@ int bch2_journal_flush_all_pins(struct journal *j)
struct bch_fs *c = container_of(j, struct bch_fs, journal);
bool flush;
if (!test_bit(JOURNAL_STARTED, &j->flags))
return 0;
bch2_journal_flush_pins(j, U64_MAX);
spin_lock(&j->lock);

View File

@ -338,6 +338,7 @@ static inline int bch2_journal_res_get(struct journal *j, struct journal_res *re
EBUG_ON(res->ref);
EBUG_ON(u64s_max < u64s_min);
EBUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
if (journal_res_get_fast(j, res, u64s_min, u64s_max))
goto out;
@ -391,13 +392,6 @@ ssize_t bch2_journal_print_pins(struct journal *, char *);
int bch2_dev_journal_alloc(struct bch_dev *);
static inline unsigned bch2_nr_journal_buckets(struct bch_sb_field_journal *j)
{
return j
? (__le64 *) vstruct_end(&j->field) - j->buckets
: 0;
}
void bch2_dev_journal_stop(struct journal *, struct bch_dev *);
void bch2_fs_journal_stop(struct journal *);
void bch2_dev_journal_exit(struct bch_dev *);

View File

@ -3,7 +3,6 @@
#include "checksum.h"
#include "error.h"
#include "io.h"
#include "journal.h"
#include "super-io.h"
#include "super.h"
#include "vstructs.h"
@ -14,11 +13,42 @@
static int bch2_sb_replicas_to_cpu_replicas(struct bch_fs *);
static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *,
struct bch_replicas_cpu *);
static const char *bch2_sb_validate_replicas(struct bch_sb *);
static inline void __bch2_sb_layout_size_assert(void)
/* superblock fields (optional/variable size sections: */
const char * const bch2_sb_fields[] = {
#define x(name, nr) #name,
BCH_SB_FIELDS()
#undef x
NULL
};
#define x(f, nr) \
static const char *bch2_sb_validate_##f(struct bch_sb *, struct bch_sb_field *);
BCH_SB_FIELDS()
#undef x
struct bch_sb_field_ops {
const char * (*validate)(struct bch_sb *, struct bch_sb_field *);
};
static const struct bch_sb_field_ops bch2_sb_field_ops[] = {
#define x(f, nr) \
[BCH_SB_FIELD_##f] = { \
.validate = bch2_sb_validate_##f, \
},
BCH_SB_FIELDS()
#undef x
};
static const char *bch2_sb_field_validate(struct bch_sb *sb,
struct bch_sb_field *f)
{
BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
unsigned type = le32_to_cpu(f->type);
return type < BCH_SB_FIELD_NR
? bch2_sb_field_ops[type].validate(sb, f)
: NULL;
}
struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb,
@ -34,6 +64,37 @@ struct bch_sb_field *bch2_sb_field_get(struct bch_sb *sb,
return NULL;
}
static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb *sb,
struct bch_sb_field *f,
unsigned u64s)
{
unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
if (!f) {
f = vstruct_last(sb);
memset(f, 0, sizeof(u64) * u64s);
f->u64s = cpu_to_le32(u64s);
f->type = 0;
} else {
void *src, *dst;
src = vstruct_end(f);
f->u64s = cpu_to_le32(u64s);
dst = vstruct_end(f);
memmove(dst, src, vstruct_end(sb) - src);
if (dst > src)
memset(src, 0, dst - src);
}
le32_add_cpu(&sb->u64s, u64s - old_u64s);
return f;
}
/* Superblock realloc/free: */
void bch2_free_super(struct bch_sb_handle *sb)
{
if (sb->bio)
@ -118,35 +179,6 @@ static int bch2_fs_sb_realloc(struct bch_fs *c, unsigned u64s)
return 0;
}
static struct bch_sb_field *__bch2_sb_field_resize(struct bch_sb *sb,
struct bch_sb_field *f,
unsigned u64s)
{
unsigned old_u64s = f ? le32_to_cpu(f->u64s) : 0;
if (!f) {
f = vstruct_last(sb);
memset(f, 0, sizeof(u64) * u64s);
f->u64s = cpu_to_le32(u64s);
f->type = 0;
} else {
void *src, *dst;
src = vstruct_end(f);
f->u64s = cpu_to_le32(u64s);
dst = vstruct_end(f);
memmove(dst, src, vstruct_end(sb) - src);
if (dst > src)
memset(src, 0, dst - src);
}
le32_add_cpu(&sb->u64s, u64s - old_u64s);
return f;
}
struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb,
enum bch_sb_field_type type,
unsigned u64s)
@ -194,6 +226,13 @@ struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *c,
return f;
}
/* Superblock validate: */
static inline void __bch2_sb_layout_size_assert(void)
{
BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
}
static const char *validate_sb_layout(struct bch_sb_layout *layout)
{
u64 offset, prev_offset, max_sectors;
@ -226,93 +265,11 @@ static const char *validate_sb_layout(struct bch_sb_layout *layout)
return NULL;
}
static int u64_cmp(const void *_l, const void *_r)
{
u64 l = *((const u64 *) _l), r = *((const u64 *) _r);
return l < r ? -1 : l > r ? 1 : 0;
}
const char *bch2_sb_validate_journal(struct bch_sb *sb,
struct bch_member_cpu mi)
{
struct bch_sb_field_journal *journal;
const char *err;
unsigned nr;
unsigned i;
u64 *b;
journal = bch2_sb_get_journal(sb);
if (!journal)
return NULL;
nr = bch2_nr_journal_buckets(journal);
if (!nr)
return NULL;
b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
if (!b)
return "cannot allocate memory";
for (i = 0; i < nr; i++)
b[i] = le64_to_cpu(journal->buckets[i]);
sort(b, nr, sizeof(u64), u64_cmp, NULL);
err = "journal bucket at sector 0";
if (!b[0])
goto err;
err = "journal bucket before first bucket";
if (b[0] < mi.first_bucket)
goto err;
err = "journal bucket past end of device";
if (b[nr - 1] >= mi.nbuckets)
goto err;
err = "duplicate journal buckets";
for (i = 0; i + 1 < nr; i++)
if (b[i] == b[i + 1])
goto err;
err = NULL;
err:
kfree(b);
return err;
}
static const char *bch2_sb_validate_members(struct bch_sb *sb)
{
struct bch_sb_field_members *mi;
unsigned i;
mi = bch2_sb_get_members(sb);
if (!mi)
return "Invalid superblock: member info area missing";
if ((void *) (mi->members + sb->nr_devices) >
vstruct_end(&mi->field))
return "Invalid superblock: bad member info";
for (i = 0; i < sb->nr_devices; i++) {
if (!bch2_dev_exists(sb, mi, i))
continue;
if (le16_to_cpu(mi->members[i].bucket_size) <
BCH_SB_BTREE_NODE_SIZE(sb))
return "bucket size smaller than btree node size";
}
return NULL;
}
const char *bch2_sb_validate(struct bch_sb_handle *disk_sb)
{
struct bch_sb *sb = disk_sb->sb;
struct bch_sb_field *f;
struct bch_sb_field_members *sb_mi;
struct bch_member_cpu mi;
struct bch_sb_field_members *mi;
const char *err;
u16 block_size;
@ -394,48 +351,26 @@ const char *bch2_sb_validate(struct bch_sb_handle *disk_sb)
if (vstruct_next(f) > vstruct_last(sb))
return "Invalid superblock: invalid optional field";
if (le32_to_cpu(f->type) >= BCH_SB_FIELD_NR)
return "Invalid superblock: unknown optional field type";
}
err = bch2_sb_validate_members(sb);
/* members must be validated first: */
mi = bch2_sb_get_members(sb);
if (!mi)
return "Invalid superblock: member info area missing";
err = bch2_sb_field_validate(sb, &mi->field);
if (err)
return err;
sb_mi = bch2_sb_get_members(sb);
mi = bch2_mi_to_cpu(sb_mi->members + sb->dev_idx);
vstruct_for_each(sb, f) {
if (le32_to_cpu(f->type) == BCH_SB_FIELD_members)
continue;
if (le64_to_cpu(sb->version) < BCH_SB_VERSION_EXTENT_MAX) {
struct bch_member *m;
for (m = sb_mi->members;
m < sb_mi->members + sb->nr_devices;
m++)
SET_BCH_MEMBER_DATA_ALLOWED(m, ~0);
err = bch2_sb_field_validate(sb, f);
if (err)
return err;
}
if (mi.nbuckets > LONG_MAX)
return "Too many buckets";
if (mi.nbuckets - mi.first_bucket < 1 << 10)
return "Not enough buckets";
if (mi.bucket_size < block_size)
return "Bad bucket size";
if (get_capacity(disk_sb->bdev->bd_disk) <
mi.bucket_size * mi.nbuckets)
return "Invalid superblock: device too small";
err = bch2_sb_validate_journal(sb, mi);
if (err)
return err;
err = bch2_sb_validate_replicas(sb);
if (err)
return err;
if (le64_to_cpu(sb->version) < BCH_SB_VERSION_EXTENT_NONCE_V1 &&
bch2_sb_get_crypt(sb) &&
BCH_SB_INITIALIZED(sb))
@ -537,8 +472,9 @@ int bch2_sb_to_fs(struct bch_fs *c, struct bch_sb *src)
lockdep_assert_held(&c->sb_lock);
if (bch2_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s))
return -ENOMEM;
ret = bch2_fs_sb_realloc(c, le32_to_cpu(src->u64s) - journal_u64s);
if (ret)
return ret;
__copy_super(c->disk_sb, src);
@ -566,7 +502,6 @@ int bch2_sb_from_fs(struct bch_fs *c, struct bch_dev *ca)
return ret;
__copy_super(dst, src);
return 0;
}
@ -841,6 +776,126 @@ out:
bch2_sb_update(c);
}
/* BCH_SB_FIELD_journal: */
static int u64_cmp(const void *_l, const void *_r)
{
u64 l = *((const u64 *) _l), r = *((const u64 *) _r);
return l < r ? -1 : l > r ? 1 : 0;
}
static const char *bch2_sb_validate_journal(struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_journal *journal = field_to_type(f, journal);
struct bch_member *m = bch2_sb_get_members(sb)->members + sb->dev_idx;
const char *err;
unsigned nr;
unsigned i;
u64 *b;
journal = bch2_sb_get_journal(sb);
if (!journal)
return NULL;
nr = bch2_nr_journal_buckets(journal);
if (!nr)
return NULL;
b = kmalloc_array(sizeof(u64), nr, GFP_KERNEL);
if (!b)
return "cannot allocate memory";
for (i = 0; i < nr; i++)
b[i] = le64_to_cpu(journal->buckets[i]);
sort(b, nr, sizeof(u64), u64_cmp, NULL);
err = "journal bucket at sector 0";
if (!b[0])
goto err;
err = "journal bucket before first bucket";
if (m && b[0] < le16_to_cpu(m->first_bucket))
goto err;
err = "journal bucket past end of device";
if (m && b[nr - 1] >= le64_to_cpu(m->nbuckets))
goto err;
err = "duplicate journal buckets";
for (i = 0; i + 1 < nr; i++)
if (b[i] == b[i + 1])
goto err;
err = NULL;
err:
kfree(b);
return err;
}
/* BCH_SB_FIELD_members: */
static const char *bch2_sb_validate_members(struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_members *mi = field_to_type(f, members);
struct bch_member *m;
if ((void *) (mi->members + sb->nr_devices) >
vstruct_end(&mi->field))
return "Invalid superblock: bad member info";
for (m = mi->members;
m < mi->members + sb->nr_devices;
m++) {
if (!bch2_member_exists(m))
continue;
if (le64_to_cpu(m->nbuckets) > LONG_MAX)
return "Too many buckets";
if (le64_to_cpu(m->nbuckets) -
le16_to_cpu(m->first_bucket) < 1 << 10)
return "Not enough buckets";
if (le16_to_cpu(m->bucket_size) <
le16_to_cpu(sb->block_size))
return "bucket size smaller than block size";
if (le16_to_cpu(m->bucket_size) <
BCH_SB_BTREE_NODE_SIZE(sb))
return "bucket size smaller than btree node size";
}
if (le64_to_cpu(sb->version) < BCH_SB_VERSION_EXTENT_MAX)
for (m = mi->members;
m < mi->members + sb->nr_devices;
m++)
SET_BCH_MEMBER_DATA_ALLOWED(m, ~0);
return NULL;
}
/* BCH_SB_FIELD_crypt: */
static const char *bch2_sb_validate_crypt(struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_crypt *crypt = field_to_type(f, crypt);
if (vstruct_bytes(&crypt->field) != sizeof(*crypt))
return "invalid field crypt: wrong size";
if (BCH_CRYPT_KDF_TYPE(crypt))
return "invalid field crypt: bad kdf type";
return NULL;
}
/* BCH_SB_FIELD_replicas: */
/* Replicas tracking - in memory: */
#define for_each_cpu_replicas_entry(_r, _i) \
@ -1292,20 +1347,16 @@ static int bch2_cpu_replicas_to_sb_replicas(struct bch_fs *c,
return 0;
}
static const char *bch2_sb_validate_replicas(struct bch_sb *sb)
static const char *bch2_sb_validate_replicas(struct bch_sb *sb,
struct bch_sb_field *f)
{
struct bch_sb_field_members *mi;
struct bch_sb_field_replicas *sb_r;
struct bch_sb_field_replicas *sb_r = field_to_type(f, replicas);
struct bch_sb_field_members *mi = bch2_sb_get_members(sb);
struct bch_replicas_cpu *cpu_r = NULL;
struct bch_replicas_entry *e;
const char *err;
unsigned i;
mi = bch2_sb_get_members(sb);
sb_r = bch2_sb_get_replicas(sb);
if (!sb_r)
return NULL;
for_each_replicas_entry(sb_r, e) {
err = "invalid replicas entry: invalid data type";
if (e->data_type >= BCH_DATA_NR)

View File

@ -17,7 +17,7 @@ struct bch_sb_field *bch2_fs_sb_field_resize(struct bch_fs *,
#define field_to_type(_f, _name) \
container_of_or_null(_f, struct bch_sb_field_##_name, field)
#define BCH_SB_FIELD_TYPE(_name) \
#define x(_name, _nr) \
static inline struct bch_sb_field_##_name * \
bch2_sb_get_##_name(struct bch_sb *sb) \
{ \
@ -39,18 +39,10 @@ bch2_fs_sb_resize_##_name(struct bch_fs *c, unsigned u64s) \
BCH_SB_FIELD_##_name, u64s), _name); \
}
BCH_SB_FIELD_TYPE(journal);
BCH_SB_FIELD_TYPE(members);
BCH_SB_FIELD_TYPE(crypt);
BCH_SB_FIELD_TYPE(replicas);
BCH_SB_FIELDS()
#undef x
static inline bool bch2_dev_exists(struct bch_sb *sb,
struct bch_sb_field_members *mi,
unsigned dev)
{
return dev < sb->nr_devices &&
!bch2_is_zero(mi->members[dev].uuid.b, sizeof(uuid_le));
}
extern const char * const bch2_sb_fields[];
static inline bool bch2_sb_test_feature(struct bch_sb *sb,
enum bch_sb_features f)
@ -94,6 +86,42 @@ static inline __u64 bset_magic(struct bch_fs *c)
return __le64_to_cpu(bch2_sb_magic(c) ^ BSET_MAGIC);
}
int bch2_sb_to_fs(struct bch_fs *, struct bch_sb *);
int bch2_sb_from_fs(struct bch_fs *, struct bch_dev *);
void bch2_free_super(struct bch_sb_handle *);
int bch2_super_realloc(struct bch_sb_handle *, unsigned);
const char *bch2_sb_validate(struct bch_sb_handle *);
const char *bch2_read_super(const char *, struct bch_opts,
struct bch_sb_handle *);
void bch2_write_super(struct bch_fs *);
/* BCH_SB_FIELD_journal: */
static inline unsigned bch2_nr_journal_buckets(struct bch_sb_field_journal *j)
{
return j
? (__le64 *) vstruct_end(&j->field) - j->buckets
: 0;
}
/* BCH_SB_FIELD_members: */
static inline bool bch2_member_exists(struct bch_member *m)
{
return !bch2_is_zero(m->uuid.b, sizeof(uuid_le));
}
static inline bool bch2_dev_exists(struct bch_sb *sb,
struct bch_sb_field_members *mi,
unsigned dev)
{
return dev < sb->nr_devices &&
bch2_member_exists(&mi->members[dev]);
}
static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
{
return (struct bch_member_cpu) {
@ -109,21 +137,7 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
};
}
int bch2_sb_to_fs(struct bch_fs *, struct bch_sb *);
int bch2_sb_from_fs(struct bch_fs *, struct bch_dev *);
void bch2_free_super(struct bch_sb_handle *);
int bch2_super_realloc(struct bch_sb_handle *, unsigned);
const char *bch2_sb_validate_journal(struct bch_sb *,
struct bch_member_cpu);
const char *bch2_sb_validate(struct bch_sb_handle *);
const char *bch2_read_super(const char *, struct bch_opts,
struct bch_sb_handle *);
void bch2_write_super(struct bch_fs *);
/* replicas: */
/* BCH_SB_FIELD_replicas: */
bool bch2_sb_has_replicas(struct bch_fs *, struct bkey_s_c_extent,
enum bch_data_type);

View File

@ -393,7 +393,7 @@ static void bch2_fs_free(struct bch_fs *c)
destroy_workqueue(c->wq);
free_pages((unsigned long) c->disk_sb, c->disk_sb_order);
kfree(c);
kvpfree(c, sizeof(*c));
module_put(THIS_MODULE);
}
@ -469,7 +469,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts opts)
struct bch_fs *c;
unsigned i, iter_size;
c = kzalloc(sizeof(struct bch_fs), GFP_KERNEL);
c = kvpmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
if (!c)
return NULL;
@ -1115,12 +1115,19 @@ static int __bch2_dev_online(struct bch_fs *c, struct bch_sb_handle *sb)
!c->devs[sb->sb->dev_idx]);
ca = bch_dev_locked(c, sb->sb->dev_idx);
if (ca->disk_sb.bdev) {
bch_err(c, "already have device online in slot %u",
if (bch2_dev_is_online(ca)) {
bch_err(ca, "already have device online in slot %u",
sb->sb->dev_idx);
return -EINVAL;
}
if (get_capacity(sb->bdev->bd_disk) <
ca->mi.bucket_size * ca->mi.nbuckets) {
bch_err(ca, "cannot online: device too small");
return -EINVAL;
}
BUG_ON(!percpu_ref_is_zero(&ca->io_ref));
ret = bch2_dev_journal_init(ca, sb->sb);

View File

@ -27,7 +27,7 @@ static inline sector_t bucket_remainder(const struct bch_dev *ca, sector_t s)
static inline bool bch2_dev_is_online(struct bch_dev *ca)
{
return !percpu_ref_is_zero(&ca->io_ref);
return ca->disk_sb.bdev != NULL;
}
static inline unsigned dev_mask_nr(struct bch_devs_mask *devs)

View File

@ -651,7 +651,7 @@ static ssize_t show_quantiles(struct bch_dev *ca, char *buf,
}
for (i = ca->mi.first_bucket; i < n; i++)
p[i] = fn(ca, n, private);
p[i] = fn(ca, i, private);
sort(p, n, sizeof(unsigned), cmp, NULL);
up_read(&ca->bucket_lock);
@ -671,7 +671,6 @@ static ssize_t show_quantiles(struct bch_dev *ca, char *buf,
buf[ret - 1] = '\n';
return ret;
}
static ssize_t show_reserve_stats(struct bch_dev *ca, char *buf)