Update bcachefs sources to e2b8120595 bcachefs: Use x-macros for more enums

This commit is contained in:
Kent Overstreet 2021-02-20 22:31:38 -05:00
parent b64369c59f
commit 9e5d774875
46 changed files with 331 additions and 330 deletions

View File

@ -1 +1 @@
9b77e72c474e11130b514abd41a3c06e3f67c2ab
e2b8120595b8d82ad51f3b4310deaef1c96b3e26

View File

@ -114,7 +114,7 @@ int cmd_dump(int argc, char *argv[])
opt_set(opts, nochanges, true);
opt_set(opts, norecovery, true);
opt_set(opts, degraded, true);
opt_set(opts, errors, BCH_ON_ERROR_CONTINUE);
opt_set(opts, errors, BCH_ON_ERROR_continue);
opt_set(opts, fix_errors, FSCK_OPT_YES);
while ((opt = getopt(argc, argv, "o:fvh")) != -1)
@ -447,7 +447,7 @@ int cmd_list(int argc, char *argv[])
opt_set(opts, nochanges, true);
opt_set(opts, norecovery, true);
opt_set(opts, degraded, true);
opt_set(opts, errors, BCH_ON_ERROR_CONTINUE);
opt_set(opts, errors, BCH_ON_ERROR_continue);
while ((opt = getopt(argc, argv, "b:s:e:i:m:fvh")) != -1)
switch (opt) {
@ -540,7 +540,7 @@ int cmd_list_journal(int argc, char *argv[])
opt_set(opts, nochanges, true);
opt_set(opts, norecovery, true);
opt_set(opts, degraded, true);
opt_set(opts, errors, BCH_ON_ERROR_CONTINUE);
opt_set(opts, errors, BCH_ON_ERROR_continue);
opt_set(opts, fix_errors, FSCK_OPT_YES);
opt_set(opts, keep_journal, true);

View File

@ -313,9 +313,9 @@ int cmd_device_evacuate(int argc, char *argv[])
struct bch_ioctl_dev_usage u = bchu_dev_usage(fs, dev_idx);
if (u.state == BCH_MEMBER_STATE_RW) {
if (u.state == BCH_MEMBER_STATE_rw) {
printf("Setting %s readonly\n", dev_path);
bchu_disk_set_state(fs, dev_idx, BCH_MEMBER_STATE_RO, 0);
bchu_disk_set_state(fs, dev_idx, BCH_MEMBER_STATE_ro, 0);
}
return bchu_data(fs, (struct bch_ioctl_data) {
@ -372,7 +372,7 @@ int cmd_device_set_state(int argc, char *argv[])
die("Please supply a device state");
unsigned new_state = read_string_list_or_die(new_state_str,
bch2_dev_state, "device state");
bch2_member_states, "device state");
if (!offline) {
unsigned dev_idx;

View File

@ -37,7 +37,7 @@ static void print_dev_usage(struct bchfs_handle fs,
printf("\n");
printf_pad(20, "%s (device %u):", d->label ?: "(no label)", d->idx);
printf("%30s%16s\n", d->dev ?: "(device not found)", bch2_dev_state[u.state]);
printf("%30s%16s\n", d->dev ?: "(device not found)", bch2_member_states[u.state]);
printf("%-20s%16s%16s%16s\n",
"", "data", "buckets", "fragmented");

View File

@ -123,7 +123,7 @@ static void update_inode(struct bch_fs *c,
int ret;
bch2_inode_pack(c, &packed, inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
ret = bch2_btree_insert(c, BTREE_ID_inodes, &packed.inode.k_i,
NULL, NULL, 0);
if (ret)
die("error updating inode: %s", strerror(-ret));
@ -329,7 +329,7 @@ static void link_data(struct bch_fs *c, struct bch_inode_unpacked *dst,
bch2_mark_bkey_replicas(c, extent_i_to_s_c(e).s_c);
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &e->k_i,
ret = bch2_btree_insert(c, BTREE_ID_extents, &e->k_i,
&res, NULL, 0);
if (ret)
die("btree insert error %s", strerror(-ret));

View File

@ -255,7 +255,7 @@ struct bch_sb *bch2_format(struct bch_opt_strs fs_opt_strs,
m->first_bucket = 0;
m->bucket_size = cpu_to_le16(i->bucket_size);
SET_BCH_MEMBER_REPLACEMENT(m, CACHE_REPLACEMENT_LRU);
SET_BCH_MEMBER_REPLACEMENT(m, BCH_CACHE_REPLACEMENT_lru);
SET_BCH_MEMBER_DISCARD(m, i->discard);
SET_BCH_MEMBER_DATA_ALLOWED(m, i->data_allowed);
SET_BCH_MEMBER_DURABILITY(m, i->durability + 1);
@ -535,14 +535,14 @@ static void bch2_sb_print_members(struct bch_sb *sb, struct bch_sb_field *f,
time_str,
BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR
? bch2_dev_state[BCH_MEMBER_STATE(m)]
? bch2_member_states[BCH_MEMBER_STATE(m)]
: "unknown",
group,
data_allowed_str,
data_has_str,
BCH_MEMBER_REPLACEMENT(m) < CACHE_REPLACEMENT_NR
BCH_MEMBER_REPLACEMENT(m) < BCH_CACHE_REPLACEMENT_NR
? bch2_cache_replacement_policies[BCH_MEMBER_REPLACEMENT(m)]
: "unknown",
@ -778,7 +778,7 @@ void bch2_sb_print(struct bch_sb *sb, bool print_layout,
pr_units(le16_to_cpu(sb->block_size), units),
pr_units(BCH_SB_BTREE_NODE_SIZE(sb), units),
BCH_SB_ERROR_ACTION(sb) < BCH_NR_ERROR_ACTIONS
BCH_SB_ERROR_ACTION(sb) < BCH_ON_ERROR_NR
? bch2_error_actions[BCH_SB_ERROR_ACTION(sb)]
: "unknown",

View File

@ -341,7 +341,7 @@ int bch2_alloc_read(struct bch_fs *c, struct journal_keys *journal_keys)
int ret;
down_read(&c->gc_lock);
ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_ALLOC,
ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_alloc,
NULL, bch2_alloc_read_fn);
up_read(&c->gc_lock);
@ -369,7 +369,7 @@ retry:
bch2_trans_begin(trans);
ret = bch2_btree_key_cache_flush(trans,
BTREE_ID_ALLOC, iter->pos);
BTREE_ID_alloc, iter->pos);
if (ret)
goto err;
@ -411,7 +411,7 @@ int bch2_alloc_write(struct bch_fs *c, unsigned flags)
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC, POS_MIN,
iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
for_each_member_device(ca, c, i) {
@ -448,7 +448,7 @@ int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
u64 *time, now;
int ret = 0;
iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, POS(dev, bucket_nr),
iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, POS(dev, bucket_nr),
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
@ -753,13 +753,13 @@ static size_t find_reclaimable_buckets(struct bch_fs *c, struct bch_dev *ca)
ca->inc_gen_needs_gc = 0;
switch (ca->mi.replacement) {
case CACHE_REPLACEMENT_LRU:
case BCH_CACHE_REPLACEMENT_lru:
find_reclaimable_buckets_lru(c, ca);
break;
case CACHE_REPLACEMENT_FIFO:
case BCH_CACHE_REPLACEMENT_fifo:
find_reclaimable_buckets_fifo(c, ca);
break;
case CACHE_REPLACEMENT_RANDOM:
case BCH_CACHE_REPLACEMENT_random:
find_reclaimable_buckets_random(c, ca);
break;
}
@ -960,7 +960,7 @@ static int bch2_invalidate_buckets(struct bch_fs *c, struct bch_dev *ca)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_ALLOC,
iter = bch2_trans_get_iter(&trans, BTREE_ID_alloc,
POS(ca->dev_idx, 0),
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|

View File

@ -369,14 +369,14 @@ enum gc_phase {
GC_PHASE_START,
GC_PHASE_SB,
GC_PHASE_BTREE_EC,
GC_PHASE_BTREE_EXTENTS,
GC_PHASE_BTREE_INODES,
GC_PHASE_BTREE_DIRENTS,
GC_PHASE_BTREE_XATTRS,
GC_PHASE_BTREE_ALLOC,
GC_PHASE_BTREE_QUOTAS,
GC_PHASE_BTREE_REFLINK,
GC_PHASE_BTREE_stripes,
GC_PHASE_BTREE_extents,
GC_PHASE_BTREE_inodes,
GC_PHASE_BTREE_dirents,
GC_PHASE_BTREE_xattrs,
GC_PHASE_BTREE_alloc,
GC_PHASE_BTREE_quotas,
GC_PHASE_BTREE_reflink,
GC_PHASE_PENDING_DELETE,
GC_PHASE_ALLOC,
@ -728,7 +728,7 @@ struct bch_fs {
* Tracks GC's progress - everything in the range [ZERO_KEY..gc_cur_pos]
* has been marked by GC.
*
* gc_cur_phase is a superset of btree_ids (BTREE_ID_EXTENTS etc.)
* gc_cur_phase is a superset of btree_ids (BTREE_ID_extents etc.)
*
* Protected by gc_pos_lock. Only written to by GC thread, so GC thread
* can read without a lock.

View File

@ -326,7 +326,7 @@ static inline void bkey_init(struct bkey *k)
x(discard, 1) \
x(error, 2) \
x(cookie, 3) \
x(whiteout, 4) \
x(hash_whiteout, 4) \
x(btree_ptr, 5) \
x(extent, 6) \
x(reservation, 7) \
@ -351,11 +351,27 @@ enum bch_bkey_type {
KEY_TYPE_MAX,
};
struct bch_deleted {
struct bch_val v;
};
struct bch_discard {
struct bch_val v;
};
struct bch_error {
struct bch_val v;
};
struct bch_cookie {
struct bch_val v;
__le64 cookie;
};
struct bch_hash_whiteout {
struct bch_val v;
};
/* Extents */
/*
@ -971,19 +987,29 @@ LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
#endif
#define BCH_MEMBER_STATES() \
x(rw, 0) \
x(ro, 1) \
x(failed, 2) \
x(spare, 3)
enum bch_member_state {
BCH_MEMBER_STATE_RW = 0,
BCH_MEMBER_STATE_RO = 1,
BCH_MEMBER_STATE_FAILED = 2,
BCH_MEMBER_STATE_SPARE = 3,
BCH_MEMBER_STATE_NR = 4,
#define x(t, n) BCH_MEMBER_STATE_##t = n,
BCH_MEMBER_STATES()
#undef x
BCH_MEMBER_STATE_NR
};
enum cache_replacement {
CACHE_REPLACEMENT_LRU = 0,
CACHE_REPLACEMENT_FIFO = 1,
CACHE_REPLACEMENT_RANDOM = 2,
CACHE_REPLACEMENT_NR = 3,
#define BCH_CACHE_REPLACEMENT_POLICIES() \
x(lru, 0) \
x(fifo, 1) \
x(random, 2)
enum bch_cache_replacement_policies {
#define x(t, n) BCH_CACHE_REPLACEMENT_##t = n,
BCH_CACHE_REPLACEMENT_POLICIES()
#undef x
BCH_CACHE_REPLACEMENT_NR
};
struct bch_sb_field_members {
@ -1377,11 +1403,16 @@ enum bch_sb_compat {
#define BCH_BKEY_PTRS_MAX 16U
#define BCH_ERROR_ACTIONS() \
x(continue, 0) \
x(ro, 1) \
x(panic, 2)
enum bch_error_actions {
BCH_ON_ERROR_CONTINUE = 0,
BCH_ON_ERROR_RO = 1,
BCH_ON_ERROR_PANIC = 2,
BCH_NR_ERROR_ACTIONS = 3,
#define x(t, n) BCH_ON_ERROR_##t = n,
BCH_ERROR_ACTIONS()
#undef x
BCH_ON_ERROR_NR
};
enum bch_str_hash_type {
@ -1392,11 +1423,16 @@ enum bch_str_hash_type {
BCH_STR_HASH_NR = 4,
};
#define BCH_STR_HASH_OPTS() \
x(crc32c, 0) \
x(crc64, 1) \
x(siphash, 2)
enum bch_str_hash_opts {
BCH_STR_HASH_OPT_CRC32C = 0,
BCH_STR_HASH_OPT_CRC64 = 1,
BCH_STR_HASH_OPT_SIPHASH = 2,
BCH_STR_HASH_OPT_NR = 3,
#define x(t, n) BCH_STR_HASH_OPT_##t = n,
BCH_STR_HASH_OPTS()
#undef x
BCH_STR_HASH_OPT_NR
};
enum bch_csum_type {
@ -1431,11 +1467,16 @@ static inline _Bool bch2_csum_type_is_encryption(enum bch_csum_type type)
}
}
#define BCH_CSUM_OPTS() \
x(none, 0) \
x(crc32c, 1) \
x(crc64, 2)
enum bch_csum_opts {
BCH_CSUM_OPT_NONE = 0,
BCH_CSUM_OPT_CRC32C = 1,
BCH_CSUM_OPT_CRC64 = 2,
BCH_CSUM_OPT_NR = 3,
#define x(t, n) BCH_CSUM_OPT_##t = n,
BCH_CSUM_OPTS()
#undef x
BCH_CSUM_OPT_NR
};
#define BCH_COMPRESSION_TYPES() \
@ -1447,7 +1488,7 @@ enum bch_csum_opts {
x(incompressible, 5)
enum bch_compression_type {
#define x(t, n) BCH_COMPRESSION_TYPE_##t,
#define x(t, n) BCH_COMPRESSION_TYPE_##t = n,
BCH_COMPRESSION_TYPES()
#undef x
BCH_COMPRESSION_TYPE_NR
@ -1460,7 +1501,7 @@ enum bch_compression_type {
x(zstd, 3)
enum bch_compression_opts {
#define x(t, n) BCH_COMPRESSION_OPT_##t,
#define x(t, n) BCH_COMPRESSION_OPT_##t = n,
BCH_COMPRESSION_OPTS()
#undef x
BCH_COMPRESSION_OPT_NR
@ -1627,18 +1668,18 @@ LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
/* Btree: */
#define BCH_BTREE_IDS() \
x(EXTENTS, 0, "extents") \
x(INODES, 1, "inodes") \
x(DIRENTS, 2, "dirents") \
x(XATTRS, 3, "xattrs") \
x(ALLOC, 4, "alloc") \
x(QUOTAS, 5, "quotas") \
x(EC, 6, "stripes") \
x(REFLINK, 7, "reflink")
#define BCH_BTREE_IDS() \
x(extents, 0) \
x(inodes, 1) \
x(dirents, 2) \
x(xattrs, 3) \
x(alloc, 4) \
x(quotas, 5) \
x(stripes, 6) \
x(reflink, 7)
enum btree_id {
#define x(kwd, val, name) BTREE_ID_##kwd = val,
#define x(kwd, val) BTREE_ID_##kwd = val,
BCH_BTREE_IDS()
#undef x
BTREE_ID_NR

View File

@ -403,7 +403,7 @@ static inline struct bkey_s_c bkey_i_to_s_c(const struct bkey_i *k)
* bkey_i_extent to a bkey_i - since that's always safe, instead of conversion
* functions.
*/
#define BKEY_VAL_ACCESSORS(name) \
#define x(name, ...) \
struct bkey_i_##name { \
union { \
struct bkey k; \
@ -514,23 +514,8 @@ static inline struct bkey_i_##name *bkey_##name##_init(struct bkey_i *_k)\
return k; \
}
BKEY_VAL_ACCESSORS(cookie);
BKEY_VAL_ACCESSORS(btree_ptr);
BKEY_VAL_ACCESSORS(extent);
BKEY_VAL_ACCESSORS(reservation);
BKEY_VAL_ACCESSORS(inode);
BKEY_VAL_ACCESSORS(inode_generation);
BKEY_VAL_ACCESSORS(dirent);
BKEY_VAL_ACCESSORS(xattr);
BKEY_VAL_ACCESSORS(alloc);
BKEY_VAL_ACCESSORS(quota);
BKEY_VAL_ACCESSORS(stripe);
BKEY_VAL_ACCESSORS(reflink_p);
BKEY_VAL_ACCESSORS(reflink_v);
BKEY_VAL_ACCESSORS(inline_data);
BKEY_VAL_ACCESSORS(btree_ptr_v2);
BKEY_VAL_ACCESSORS(indirect_inline_data);
BKEY_VAL_ACCESSORS(alloc_v2);
BCH_BKEY_TYPES();
#undef x
/* byte order helpers */

View File

@ -59,7 +59,7 @@ static const char *key_type_cookie_invalid(const struct bch_fs *c,
.key_invalid = key_type_cookie_invalid, \
}
#define bch2_bkey_ops_whiteout (struct bkey_ops) { \
#define bch2_bkey_ops_hash_whiteout (struct bkey_ops) { \
.key_invalid = empty_val_key_invalid, \
}
@ -104,7 +104,7 @@ const char *__bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
if (k.k->u64s < BKEY_U64s)
return "u64s too small";
if (type == BKEY_TYPE_BTREE &&
if (type == BKEY_TYPE_btree &&
bkey_val_u64s(k.k) > BKEY_BTREE_PTR_VAL_U64s_MAX)
return "value too big";
@ -122,7 +122,7 @@ const char *__bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
if (k.k->p.snapshot)
return "nonzero snapshot";
if (type != BKEY_TYPE_BTREE &&
if (type != BKEY_TYPE_btree &&
!bkey_cmp(k.k->p, POS_MAX))
return "POS_MAX key";
@ -263,18 +263,18 @@ static const struct old_bkey_type {
u8 old;
u8 new;
} bkey_renumber_table[] = {
{BKEY_TYPE_BTREE, 128, KEY_TYPE_btree_ptr },
{BKEY_TYPE_EXTENTS, 128, KEY_TYPE_extent },
{BKEY_TYPE_EXTENTS, 129, KEY_TYPE_extent },
{BKEY_TYPE_EXTENTS, 130, KEY_TYPE_reservation },
{BKEY_TYPE_INODES, 128, KEY_TYPE_inode },
{BKEY_TYPE_INODES, 130, KEY_TYPE_inode_generation },
{BKEY_TYPE_DIRENTS, 128, KEY_TYPE_dirent },
{BKEY_TYPE_DIRENTS, 129, KEY_TYPE_whiteout },
{BKEY_TYPE_XATTRS, 128, KEY_TYPE_xattr },
{BKEY_TYPE_XATTRS, 129, KEY_TYPE_whiteout },
{BKEY_TYPE_ALLOC, 128, KEY_TYPE_alloc },
{BKEY_TYPE_QUOTAS, 128, KEY_TYPE_quota },
{BKEY_TYPE_btree, 128, KEY_TYPE_btree_ptr },
{BKEY_TYPE_extents, 128, KEY_TYPE_extent },
{BKEY_TYPE_extents, 129, KEY_TYPE_extent },
{BKEY_TYPE_extents, 130, KEY_TYPE_reservation },
{BKEY_TYPE_inodes, 128, KEY_TYPE_inode },
{BKEY_TYPE_inodes, 130, KEY_TYPE_inode_generation },
{BKEY_TYPE_dirents, 128, KEY_TYPE_dirent },
{BKEY_TYPE_dirents, 129, KEY_TYPE_hash_whiteout },
{BKEY_TYPE_xattrs, 128, KEY_TYPE_xattr },
{BKEY_TYPE_xattrs, 129, KEY_TYPE_hash_whiteout },
{BKEY_TYPE_alloc, 128, KEY_TYPE_alloc },
{BKEY_TYPE_quotas, 128, KEY_TYPE_quota },
};
void bch2_bkey_renumber(enum btree_node_type btree_node_type,
@ -320,7 +320,7 @@ void __bch2_bkey_compat(unsigned level, enum btree_id btree_id,
break;
case 2:
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_id == BTREE_ID_INODES) {
btree_id == BTREE_ID_inodes) {
if (!bkey_packed(k)) {
struct bkey_i *u = packed_to_bkey(k);
swap(u->k.p.inode, u->k.p.offset);

View File

@ -13,13 +13,6 @@
#include <linux/sched/mm.h>
#include <trace/events/bcachefs.h>
const char * const bch2_btree_ids[] = {
#define x(kwd, val, name) name,
BCH_BTREE_IDS()
#undef x
NULL
};
void bch2_recalc_btree_reserve(struct bch_fs *c)
{
unsigned i, reserve = 16;

View File

@ -7,8 +7,6 @@
struct btree_iter;
extern const char * const bch2_btree_ids[];
void bch2_recalc_btree_reserve(struct bch_fs *);
void bch2_btree_node_hash_remove(struct btree_cache *, struct btree *);

View File

@ -57,7 +57,7 @@ static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
{
switch (id) {
#define x(n, v, s) case BTREE_ID_##n: return GC_PHASE_BTREE_##n;
#define x(name, v) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
BCH_BTREE_IDS()
#undef x
default:

View File

@ -949,7 +949,7 @@ int bch2_btree_node_read_done(struct bch_fs *c, struct bch_dev *ca,
bkey_for_each_ptr(bch2_bkey_ptrs(bkey_i_to_s(&b->key)), ptr) {
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
if (ca->mi.state != BCH_MEMBER_STATE_RW)
if (ca->mi.state != BCH_MEMBER_STATE_rw)
set_btree_node_need_rewrite(b);
}
out:
@ -1313,7 +1313,7 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
unsigned whiteout_u64s = 0;
int ret;
if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_BTREE))
if (bch2_bkey_invalid(c, bkey_i_to_s_c(&b->key), BKEY_TYPE_btree))
return -1;
ret = validate_bset(c, NULL, b, i, sectors, WRITE, false) ?:

View File

@ -193,7 +193,7 @@ static inline void compat_bformat(unsigned level, enum btree_id btree_id,
int write, struct bkey_format *f)
{
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_id == BTREE_ID_INODES) {
btree_id == BTREE_ID_inodes) {
swap(f->bits_per_field[BKEY_FIELD_INODE],
f->bits_per_field[BKEY_FIELD_OFFSET]);
swap(f->field_offset[BKEY_FIELD_INODE],
@ -209,7 +209,7 @@ static inline void compat_bpos(unsigned level, enum btree_id btree_id,
bch2_bpos_swab(p);
if (version < bcachefs_metadata_version_inode_btree_change &&
btree_id == BTREE_ID_INODES)
btree_id == BTREE_ID_inodes)
swap(p->inode, p->offset);
}

View File

@ -542,16 +542,16 @@ static inline unsigned bset_byte_offset(struct btree *b, void *i)
}
enum btree_node_type {
#define x(kwd, val, name) BKEY_TYPE_##kwd = val,
#define x(kwd, val) BKEY_TYPE_##kwd = val,
BCH_BTREE_IDS()
#undef x
BKEY_TYPE_BTREE,
BKEY_TYPE_btree,
};
/* Type of a key in btree @id at level @level: */
static inline enum btree_node_type __btree_node_type(unsigned level, enum btree_id id)
{
return level ? BKEY_TYPE_BTREE : (enum btree_node_type) id;
return level ? BKEY_TYPE_btree : (enum btree_node_type) id;
}
/* Type of keys @b contains: */
@ -563,8 +563,8 @@ static inline enum btree_node_type btree_node_type(struct btree *b)
static inline bool btree_node_type_is_extents(enum btree_node_type type)
{
switch (type) {
case BKEY_TYPE_EXTENTS:
case BKEY_TYPE_REFLINK:
case BKEY_TYPE_extents:
case BKEY_TYPE_reflink:
return true;
default:
return false;
@ -587,18 +587,18 @@ static inline bool btree_iter_is_extents(struct btree_iter *iter)
}
#define BTREE_NODE_TYPE_HAS_TRIGGERS \
((1U << BKEY_TYPE_EXTENTS)| \
(1U << BKEY_TYPE_ALLOC)| \
(1U << BKEY_TYPE_INODES)| \
(1U << BKEY_TYPE_REFLINK)| \
(1U << BKEY_TYPE_EC)| \
(1U << BKEY_TYPE_BTREE))
((1U << BKEY_TYPE_extents)| \
(1U << BKEY_TYPE_alloc)| \
(1U << BKEY_TYPE_inodes)| \
(1U << BKEY_TYPE_reflink)| \
(1U << BKEY_TYPE_stripes)| \
(1U << BKEY_TYPE_btree))
#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
((1U << BKEY_TYPE_EXTENTS)| \
(1U << BKEY_TYPE_INODES)| \
(1U << BKEY_TYPE_EC)| \
(1U << BKEY_TYPE_REFLINK))
((1U << BKEY_TYPE_extents)| \
(1U << BKEY_TYPE_inodes)| \
(1U << BKEY_TYPE_stripes)| \
(1U << BKEY_TYPE_reflink))
enum btree_trigger_flags {
__BTREE_TRIGGER_NORUN, /* Don't run triggers at all */

View File

@ -1195,7 +1195,7 @@ static void btree_split_insert_keys(struct btree_update *as, struct btree *b,
struct bkey_packed *src, *dst, *n;
struct bset *i;
BUG_ON(btree_node_type(b) != BKEY_TYPE_BTREE);
BUG_ON(btree_node_type(b) != BKEY_TYPE_btree);
bch2_btree_node_iter_init(&node_iter, b, &k->k.p);

View File

@ -340,7 +340,7 @@ static inline bool iter_has_nontrans_triggers(struct btree_iter *iter)
{
return (((BTREE_NODE_TYPE_HAS_TRIGGERS &
~BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS)) |
(1U << BTREE_ID_EC)) &
(1U << BTREE_ID_stripes)) &
(1U << iter->btree_id);
}

View File

@ -1490,7 +1490,7 @@ static int trans_get_key(struct btree_trans *trans,
struct btree_iter **iter,
struct bkey_s_c *k)
{
unsigned flags = btree_id != BTREE_ID_ALLOC
unsigned flags = btree_id != BTREE_ID_alloc
? BTREE_ITER_SLOTS
: BTREE_ITER_CACHED;
int ret;
@ -1526,11 +1526,11 @@ bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter **_it
if (IS_ERR(a))
return a;
iter = trans_get_update(trans, BTREE_ID_ALLOC, pos, &k);
iter = trans_get_update(trans, BTREE_ID_alloc, pos, &k);
if (iter) {
*u = bch2_alloc_unpack(k);
} else {
iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, pos,
iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, pos,
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
@ -1587,7 +1587,7 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
struct bch_replicas_padded r;
int ret = 0;
ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.ec.idx), &iter, &k);
ret = trans_get_key(trans, BTREE_ID_stripes, POS(0, p.ec.idx), &iter, &k);
if (ret < 0)
return ret;
@ -1811,7 +1811,7 @@ static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
__le64 *refcount;
s64 ret;
ret = trans_get_key(trans, BTREE_ID_REFLINK,
ret = trans_get_key(trans, BTREE_ID_reflink,
POS(0, idx), &iter, &k);
if (ret < 0)
return ret;

View File

@ -77,11 +77,11 @@ static inline enum bch_csum_type bch2_csum_opt_to_type(enum bch_csum_opts type,
bool data)
{
switch (type) {
case BCH_CSUM_OPT_NONE:
case BCH_CSUM_OPT_none:
return BCH_CSUM_NONE;
case BCH_CSUM_OPT_CRC32C:
case BCH_CSUM_OPT_crc32c:
return data ? BCH_CSUM_CRC32C : BCH_CSUM_CRC32C_NONZERO;
case BCH_CSUM_OPT_CRC64:
case BCH_CSUM_OPT_crc64:
return data ? BCH_CSUM_CRC64 : BCH_CSUM_CRC64_NONZERO;
default:
BUG();

View File

@ -64,7 +64,7 @@ static bool dirent_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
}
const struct bch_hash_desc bch2_dirent_hash_desc = {
.btree_id = BTREE_ID_DIRENTS,
.btree_id = BTREE_ID_dirents,
.key_type = KEY_TYPE_dirent,
.hash_key = dirent_hash_key,
.hash_bkey = dirent_hash_bkey,
@ -262,7 +262,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
* overwrite old_dst - just make sure to use a
* whiteout when deleting src:
*/
new_src->k.type = KEY_TYPE_whiteout;
new_src->k.type = KEY_TYPE_hash_whiteout;
}
} else {
/* Check if we need a whiteout to delete src: */
@ -272,7 +272,7 @@ int bch2_dirent_rename(struct btree_trans *trans,
goto out;
if (ret)
new_src->k.type = KEY_TYPE_whiteout;
new_src->k.type = KEY_TYPE_hash_whiteout;
}
}
@ -332,7 +332,7 @@ int bch2_empty_dir_trans(struct btree_trans *trans, u64 dir_inum)
struct bkey_s_c k;
int ret;
for_each_btree_key(trans, iter, BTREE_ID_DIRENTS,
for_each_btree_key(trans, iter, BTREE_ID_dirents,
POS(dir_inum, 0), 0, k, ret) {
if (k.k->p.inode > dir_inum)
break;
@ -357,7 +357,7 @@ int bch2_readdir(struct bch_fs *c, u64 inum, struct dir_context *ctx)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
for_each_btree_key(&trans, iter, BTREE_ID_dirents,
POS(inum, ctx->pos), 0, k, ret) {
if (k.k->p.inode > inum)
break;

View File

@ -433,7 +433,7 @@ static int get_stripe_key(struct bch_fs *c, u64 idx, struct ec_stripe_buf *strip
int ret;
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, idx), BTREE_ITER_SLOTS);
iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS(0, idx), BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
if (ret)
@ -668,7 +668,7 @@ void bch2_stripes_heap_update(struct bch_fs *c,
static int ec_stripe_delete(struct bch_fs *c, size_t idx)
{
return bch2_btree_delete_range(c, BTREE_ID_EC,
return bch2_btree_delete_range(c, BTREE_ID_stripes,
POS(0, idx),
POS(0, idx + 1),
NULL);
@ -713,7 +713,7 @@ static int ec_stripe_bkey_insert(struct bch_fs *c,
retry:
bch2_trans_begin(&trans);
for_each_btree_key(&trans, iter, BTREE_ID_EC, start_pos,
for_each_btree_key(&trans, iter, BTREE_ID_stripes, start_pos,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
if (start_pos.offset) {
@ -765,7 +765,7 @@ static int ec_stripe_bkey_update(struct btree_trans *trans,
unsigned i;
int ret;
iter = bch2_trans_get_iter(trans, BTREE_ID_EC,
iter = bch2_trans_get_iter(trans, BTREE_ID_stripes,
new->k.p, BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
ret = bkey_err(k);
@ -831,7 +831,7 @@ static int ec_stripe_update_ptrs(struct bch_fs *c,
/* XXX this doesn't support the reflink btree */
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
bkey_start_pos(pos),
BTREE_ITER_INTENT);
@ -1604,7 +1604,7 @@ int bch2_stripes_write(struct bch_fs *c, unsigned flags)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS_MIN,
iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
genradix_for_each(&c->stripes[0], giter, m) {
@ -1645,7 +1645,7 @@ static int bch2_stripes_read_fn(struct bch_fs *c, enum btree_id id,
int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
{
int ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_EC,
int ret = bch2_btree_and_journal_walk(c, journal_keys, BTREE_ID_stripes,
NULL, bch2_stripes_read_fn);
if (ret)
bch_err(c, "error reading stripes: %i", ret);
@ -1663,7 +1663,7 @@ int bch2_ec_mem_alloc(struct bch_fs *c, bool gc)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EC, POS(0, U64_MAX), 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_stripes, POS(0, U64_MAX), 0);
k = bch2_btree_iter_prev(iter);
if (!IS_ERR_OR_NULL(k.k))

View File

@ -11,13 +11,13 @@ bool bch2_inconsistent_error(struct bch_fs *c)
set_bit(BCH_FS_ERROR, &c->flags);
switch (c->opts.errors) {
case BCH_ON_ERROR_CONTINUE:
case BCH_ON_ERROR_continue:
return false;
case BCH_ON_ERROR_RO:
case BCH_ON_ERROR_ro:
if (bch2_fs_emergency_read_only(c))
bch_err(c, "emergency read only");
return true;
case BCH_ON_ERROR_PANIC:
case BCH_ON_ERROR_panic:
panic(bch2_fmt(c, "panic after error"));
return true;
default:
@ -38,10 +38,10 @@ void bch2_io_error_work(struct work_struct *work)
bool dev;
down_write(&c->state_lock);
dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_RO,
dev = bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_ro,
BCH_FORCE_IF_DEGRADED);
if (dev
? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_RO,
? __bch2_dev_set_state(c, ca, BCH_MEMBER_STATE_ro,
BCH_FORCE_IF_DEGRADED)
: bch2_fs_emergency_read_only(c))
bch_err(ca,

View File

@ -62,7 +62,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
struct bkey_s_c r_k;
for_each_btree_key(trans, iter,
BTREE_ID_REFLINK, POS(0, idx + offset),
BTREE_ID_reflink, POS(0, idx + offset),
BTREE_ITER_SLOTS, r_k, ret2) {
if (bkey_cmp(bkey_start_pos(r_k.k),
POS(0, idx + sectors)) >= 0)

View File

@ -677,7 +677,7 @@ bool bch2_check_range_allocated(struct bch_fs *c, struct bpos pos, u64 size,
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, pos,
for_each_btree_key(&trans, iter, BTREE_ID_extents, pos,
BTREE_ITER_SLOTS, k, err) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
@ -725,7 +725,7 @@ static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
ca = bch_dev_bkey_exists(c, p.ptr.dev);
if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
if (ca->mi.state != BCH_MEMBER_STATE_failed)
durability = max_t(unsigned, durability, ca->mi.durability);
if (p.has_ec)

View File

@ -882,7 +882,7 @@ void bch2_readahead(struct readahead_control *ractl)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS);
bch2_pagecache_add_get(&inode->ei_pagecache_lock);
@ -928,7 +928,7 @@ static void __bchfs_readpage(struct bch_fs *c, struct bch_read_bio *rbio,
BUG_ON(!bio_add_page(&rbio->bio, page, PAGE_SIZE, 0));
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS);
bchfs_read(&trans, iter, rbio, inum, NULL);
@ -2132,7 +2132,7 @@ static inline int range_has_data(struct bch_fs *c,
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, start, 0, k, ret) {
for_each_btree_key(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
break;
@ -2506,7 +2506,7 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
goto err;
}
src = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
src = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(inode->v.i_ino, src_start >> 9),
BTREE_ITER_INTENT);
dst = bch2_trans_copy_iter(&trans, src);
@ -2661,7 +2661,7 @@ static long bchfs_fallocate(struct bch_inode_info *inode, int mode,
truncate_pagecache_range(&inode->v, offset, end - 1);
}
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(inode->v.i_ino, block_start >> 9),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
end_pos = POS(inode->v.i_ino, block_end >> 9);
@ -2994,7 +2994,7 @@ static loff_t bch2_seek_data(struct file *file, u64 offset)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
for_each_btree_key(&trans, iter, BTREE_ID_extents,
POS(inode->v.i_ino, offset >> 9), 0, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {
break;
@ -3089,7 +3089,7 @@ static loff_t bch2_seek_hole(struct file *file, u64 offset)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
for_each_btree_key(&trans, iter, BTREE_ID_extents,
POS(inode->v.i_ino, offset >> 9),
BTREE_ITER_SLOTS, k, ret) {
if (k.k->p.inode != inode->v.i_ino) {

View File

@ -903,7 +903,7 @@ static int bch2_fiemap(struct inode *vinode, struct fiemap_extent_info *info,
bch2_bkey_buf_init(&prev);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(ei->v.i_ino, start >> 9), 0);
retry:
while ((k = bch2_btree_iter_peek(iter)).k &&

View File

@ -24,7 +24,7 @@ static s64 bch2_count_inode_sectors(struct btree_trans *trans, u64 inum)
u64 sectors = 0;
int ret;
for_each_btree_key(trans, iter, BTREE_ID_EXTENTS,
for_each_btree_key(trans, iter, BTREE_ID_extents,
POS(inum, 0), 0, k, ret) {
if (k.k->p.inode != inum)
break;
@ -257,7 +257,7 @@ static void hash_set_chain_start(struct btree_trans *trans,
struct hash_check *h,
struct btree_iter *k_iter, struct bkey_s_c k)
{
bool hole = (k.k->type != KEY_TYPE_whiteout &&
bool hole = (k.k->type != KEY_TYPE_hash_whiteout &&
k.k->type != desc.key_type);
if (hole || k.k->p.offset > h->chain_end + 1)
@ -396,7 +396,7 @@ err_redo:
if (fsck_err(c, "cannot fix dirent by removing trailing garbage %s (%zu)\n"
"hash table key at wrong offset: btree %u, offset %llu, "
"hashed to %llu chain starts at %llu\n%s",
buf, strlen(buf), BTREE_ID_DIRENTS,
buf, strlen(buf), BTREE_ID_dirents,
k->k->p.offset, hash, h->chain->pos.offset,
(bch2_bkey_val_to_text(&PBUF(buf), c,
*k), buf))) {
@ -415,7 +415,7 @@ err_redo:
static int bch2_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size)
{
return bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
return bch2_btree_delete_range(c, BTREE_ID_extents,
POS(inode_nr, round_up(new_size, block_bytes(c)) >> 9),
POS(inode_nr + 1, 0), NULL);
}
@ -474,7 +474,7 @@ static int check_extents(struct bch_fs *c)
bch_verbose(c, "checking extents");
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(BCACHEFS_ROOT_INO, 0),
BTREE_ITER_INTENT);
retry:
@ -537,7 +537,7 @@ retry:
bch2_inode_pack(c, &p, &w.inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES,
ret = bch2_btree_insert(c, BTREE_ID_inodes,
&p.inode.k_i, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
@ -595,7 +595,7 @@ static int check_dirents(struct bch_fs *c)
hash_check_init(&h);
iter = bch2_trans_get_iter(&trans, BTREE_ID_DIRENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_dirents,
POS(BCACHEFS_ROOT_INO, 0), 0);
retry:
for_each_btree_key_continue(iter, 0, k, ret) {
@ -747,7 +747,7 @@ static int check_xattrs(struct bch_fs *c)
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs,
POS(BCACHEFS_ROOT_INO, 0), 0);
retry:
for_each_btree_key_continue(iter, 0, k, ret) {
@ -808,7 +808,7 @@ create_root:
bch2_inode_pack(c, &packed, root_inode);
return bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
return bch2_btree_insert(c, BTREE_ID_inodes, &packed.inode.k_i,
NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
@ -955,7 +955,7 @@ next:
if (e->offset == U64_MAX)
goto up;
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
for_each_btree_key(&trans, iter, BTREE_ID_dirents,
POS(e->inum, e->offset + 1), 0, k, ret) {
if (k.k->p.inode != e->inum)
break;
@ -1008,7 +1008,7 @@ up:
path.nr--;
}
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS_MIN, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS_MIN, 0);
retry:
for_each_btree_key_continue(iter, 0, k, ret) {
if (k.k->type != KEY_TYPE_inode)
@ -1105,7 +1105,7 @@ static int bch2_gc_walk_dirents(struct bch_fs *c, nlink_table *links,
inc_link(c, links, range_start, range_end, BCACHEFS_ROOT_INO, false);
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS, POS_MIN, 0, k, ret) {
for_each_btree_key(&trans, iter, BTREE_ID_dirents, POS_MIN, 0, k, ret) {
switch (k.k->type) {
case KEY_TYPE_dirent:
d = bkey_s_c_to_dirent(k);
@ -1346,7 +1346,7 @@ static int bch2_gc_walk_inodes(struct bch_fs *c,
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES,
iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes,
POS(0, range_start), 0);
nlinks_iter = genradix_iter_init(links, 0);
@ -1472,7 +1472,7 @@ int bch2_fsck_walk_inodes_only(struct bch_fs *c)
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN, 0, k, ret) {
for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN, 0, k, ret) {
if (k.k->type != KEY_TYPE_inode)
continue;

View File

@ -300,7 +300,7 @@ struct btree_iter *bch2_inode_peek(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
iter = bch2_trans_get_iter(trans, BTREE_ID_INODES, POS(0, inum),
iter = bch2_trans_get_iter(trans, BTREE_ID_inodes, POS(0, inum),
BTREE_ITER_CACHED|flags);
k = bch2_btree_iter_peek_cached(iter);
ret = bkey_err(k);
@ -498,7 +498,7 @@ int bch2_inode_create(struct btree_trans *trans,
if (IS_ERR(inode_p))
return PTR_ERR(inode_p);
again:
for_each_btree_key(trans, iter, BTREE_ID_INODES, POS(0, start),
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, start),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
if (bkey_cmp(iter->pos, POS(0, max)) > 0)
break;
@ -513,7 +513,7 @@ again:
* cache before using a slot:
*/
if (k.k->type != KEY_TYPE_inode &&
!bch2_btree_key_cache_find(c, BTREE_ID_INODES, iter->pos))
!bch2_btree_key_cache_find(c, BTREE_ID_inodes, iter->pos))
goto found_slot;
}
@ -560,11 +560,11 @@ int bch2_inode_rm(struct bch_fs *c, u64 inode_nr, bool cached)
* XXX: the dirent could ideally would delete whiteouts when they're no
* longer needed
*/
ret = bch2_btree_delete_range_trans(&trans, BTREE_ID_EXTENTS,
ret = bch2_btree_delete_range_trans(&trans, BTREE_ID_extents,
start, end, NULL) ?:
bch2_btree_delete_range_trans(&trans, BTREE_ID_XATTRS,
bch2_btree_delete_range_trans(&trans, BTREE_ID_xattrs,
start, end, NULL) ?:
bch2_btree_delete_range_trans(&trans, BTREE_ID_DIRENTS,
bch2_btree_delete_range_trans(&trans, BTREE_ID_dirents,
start, end, NULL);
if (ret)
goto err;
@ -574,11 +574,11 @@ retry:
bi_generation = 0;
if (cached) {
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(0, inode_nr),
iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr),
BTREE_ITER_CACHED|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_cached(iter);
} else {
iter = bch2_trans_get_iter(&trans, BTREE_ID_INODES, POS(0, inode_nr),
iter = bch2_trans_get_iter(&trans, BTREE_ID_inodes, POS(0, inode_nr),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
}
@ -635,7 +635,7 @@ int bch2_inode_find_by_inum_trans(struct btree_trans *trans, u64 inode_nr,
struct bkey_s_c k;
int ret;
iter = bch2_trans_get_iter(trans, BTREE_ID_INODES,
iter = bch2_trans_get_iter(trans, BTREE_ID_inodes,
POS(0, inode_nr), BTREE_ITER_CACHED);
k = bch2_btree_iter_peek_cached(iter);
ret = bkey_err(k);

View File

@ -398,7 +398,7 @@ int bch2_fpunch(struct bch_fs *c, u64 inum, u64 start, u64 end,
int ret = 0;
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(inum, start),
BTREE_ITER_INTENT);
@ -425,7 +425,7 @@ int bch2_write_index_default(struct bch_write_op *op)
bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
bkey_start_pos(&k->k),
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
@ -1538,8 +1538,8 @@ static struct promote_op *promote_alloc(struct bch_fs *c,
promote = __promote_alloc(c,
k.k->type == KEY_TYPE_reflink_v
? BTREE_ID_REFLINK
: BTREE_ID_EXTENTS,
? BTREE_ID_reflink
: BTREE_ID_extents,
k, pos, pick, opts, sectors, rbio);
if (!promote)
return NULL;
@ -1635,7 +1635,7 @@ static void bch2_read_retry_nodecode(struct bch_fs *c, struct bch_read_bio *rbio
bch2_bkey_buf_init(&sk);
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
rbio->pos, BTREE_ITER_SLOTS);
retry:
rbio->bio.bi_status = 0;
@ -1690,7 +1690,7 @@ static void bch2_read_retry(struct bch_fs *c, struct bch_read_bio *rbio,
retry:
bch2_trans_begin(&trans);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
for_each_btree_key(&trans, iter, BTREE_ID_extents,
POS(inode, bvec_iter.bi_sector),
BTREE_ITER_SLOTS, k, ret) {
unsigned bytes, sectors, offset_into_extent;
@ -1809,7 +1809,7 @@ static int __bch2_rbio_narrow_crcs(struct btree_trans *trans,
if (crc_is_compressed(rbio->pick.crc))
return 0;
iter = bch2_trans_get_iter(trans, BTREE_ID_EXTENTS, rbio->pos,
iter = bch2_trans_get_iter(trans, BTREE_ID_extents, rbio->pos,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);
if ((ret = bkey_err(k)))
@ -2019,7 +2019,7 @@ int __bch2_read_indirect_extent(struct btree_trans *trans,
reflink_offset = le64_to_cpu(bkey_i_to_reflink_p(orig_k->k)->v.idx) +
*offset_into_extent;
iter = bch2_trans_get_iter(trans, BTREE_ID_REFLINK,
iter = bch2_trans_get_iter(trans, BTREE_ID_reflink,
POS(0, reflink_offset),
BTREE_ITER_SLOTS);
k = bch2_btree_iter_peek_slot(iter);
@ -2320,7 +2320,7 @@ void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, u64 inode)
retry:
bch2_trans_begin(&trans);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents,
POS(inode, rbio->bio.bi_iter.bi_sector),
BTREE_ITER_SLOTS);
while (1) {

View File

@ -837,13 +837,15 @@ static void bch2_journal_ptrs_to_text(struct printbuf *out, struct bch_fs *c,
for (i = 0; i < j->nr_ptrs; i++) {
struct bch_dev *ca = c->devs[j->ptrs[i].dev];
u64 offset;
div64_u64_rem(j->ptrs[i].offset, ca->mi.bucket_size, &offset);
if (i)
pr_buf(out, " ");
pr_buf(out, "%u:%llu (offset %llu)",
j->ptrs[i].dev,
(u64) j->ptrs[i].offset,
(u64) j->ptrs[i].offset % ca->mi.bucket_size);
(u64) j->ptrs[i].offset, offset);
}
}
@ -869,8 +871,8 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list,
!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_journal)))
continue;
if ((ca->mi.state == BCH_MEMBER_STATE_RW ||
ca->mi.state == BCH_MEMBER_STATE_RO) &&
if ((ca->mi.state == BCH_MEMBER_STATE_rw ||
ca->mi.state == BCH_MEMBER_STATE_ro) &&
percpu_ref_tryget(&ca->io_ref))
closure_call(&ca->journal.read,
bch2_journal_read_device,
@ -1063,7 +1065,7 @@ static void __journal_write_alloc(struct journal *j,
* it:
*/
if (!ca->mi.durability ||
ca->mi.state != BCH_MEMBER_STATE_RW ||
ca->mi.state != BCH_MEMBER_STATE_rw ||
!ja->nr ||
bch2_bkey_has_device(bkey_i_to_s_c(&w->key),
ca->dev_idx) ||

View File

@ -99,8 +99,8 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
{
return __bch2_dev_usrdata_drop(c, dev_idx, flags, BTREE_ID_EXTENTS) ?:
__bch2_dev_usrdata_drop(c, dev_idx, flags, BTREE_ID_REFLINK);
return __bch2_dev_usrdata_drop(c, dev_idx, flags, BTREE_ID_extents) ?:
__bch2_dev_usrdata_drop(c, dev_idx, flags, BTREE_ID_reflink);
}
static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)

View File

@ -579,7 +579,7 @@ peek:
if (!bkey_extent_is_direct_data(k.k))
goto next_nondata;
if (btree_id == BTREE_ID_EXTENTS &&
if (btree_id == BTREE_ID_extents &&
cur_inum != k.k->p.inode) {
struct bch_inode_unpacked inode;
@ -664,9 +664,9 @@ int bch2_move_data(struct bch_fs *c,
stats->data_type = BCH_DATA_user;
ret = __bch2_move_data(c, &ctxt, rate, wp, start, end,
pred, arg, stats, BTREE_ID_EXTENTS) ?:
pred, arg, stats, BTREE_ID_extents) ?:
__bch2_move_data(c, &ctxt, rate, wp, start, end,
pred, arg, stats, BTREE_ID_REFLINK);
pred, arg, stats, BTREE_ID_reflink);
move_ctxt_wait_event(&ctxt, list_empty(&ctxt.reads));
closure_sync(&ctxt.cl);

View File

@ -9,72 +9,54 @@
#include "super-io.h"
#include "util.h"
#define x(t, n) #t,
const char * const bch2_error_actions[] = {
"continue",
"remount-ro",
"panic",
BCH_ERROR_ACTIONS()
NULL
};
const char * const bch2_sb_features[] = {
#define x(f, n) #f,
BCH_SB_FEATURES()
#undef x
NULL
};
const char * const bch2_btree_ids[] = {
BCH_BTREE_IDS()
NULL
};
const char * const bch2_csum_opts[] = {
"none",
"crc32c",
"crc64",
BCH_CSUM_OPTS()
NULL
};
const char * const bch2_compression_opts[] = {
#define x(t, n) #t,
BCH_COMPRESSION_OPTS()
#undef x
NULL
};
const char * const bch2_str_hash_types[] = {
"crc32c",
"crc64",
"siphash",
BCH_STR_HASH_OPTS()
NULL
};
const char * const bch2_data_types[] = {
#define x(t, n) #t,
BCH_DATA_TYPES()
#undef x
NULL
};
const char * const bch2_cache_replacement_policies[] = {
"lru",
"fifo",
"random",
BCH_CACHE_REPLACEMENT_POLICIES()
NULL
};
/* Default is -1; we skip past it for struct cached_dev's cache mode */
const char * const bch2_cache_modes[] = {
"default",
"writethrough",
"writeback",
"writearound",
"none",
const char * const bch2_member_states[] = {
BCH_MEMBER_STATES()
NULL
};
const char * const bch2_dev_state[] = {
"readwrite",
"readonly",
"failed",
"spare",
NULL
};
#undef x
void bch2_opts_apply(struct bch_opts *dst, struct bch_opts src)
{

View File

@ -10,13 +10,13 @@
extern const char * const bch2_error_actions[];
extern const char * const bch2_sb_features[];
extern const char * const bch2_btree_ids[];
extern const char * const bch2_csum_opts[];
extern const char * const bch2_compression_opts[];
extern const char * const bch2_str_hash_types[];
extern const char * const bch2_data_types[];
extern const char * const bch2_cache_replacement_policies[];
extern const char * const bch2_cache_modes[];
extern const char * const bch2_dev_state[];
extern const char * const bch2_member_states[];
/*
* Mount options; we also store defaults in the superblock.
@ -89,7 +89,7 @@ enum opt_type {
x(errors, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_error_actions), \
BCH_SB_ERROR_ACTION, BCH_ON_ERROR_RO, \
BCH_SB_ERROR_ACTION, BCH_ON_ERROR_ro, \
NULL, "Action to take on filesystem error") \
x(metadata_replicas, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
@ -114,12 +114,12 @@ enum opt_type {
x(metadata_checksum, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_csum_opts), \
BCH_SB_META_CSUM_TYPE, BCH_CSUM_OPT_CRC32C, \
BCH_SB_META_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \
NULL, NULL) \
x(data_checksum, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
OPT_STR(bch2_csum_opts), \
BCH_SB_DATA_CSUM_TYPE, BCH_CSUM_OPT_CRC32C, \
BCH_SB_DATA_CSUM_TYPE, BCH_CSUM_OPT_crc32c, \
NULL, NULL) \
x(compression, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \
@ -134,7 +134,7 @@ enum opt_type {
x(str_hash, u8, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME, \
OPT_STR(bch2_str_hash_types), \
BCH_SB_STR_HASH_TYPE, BCH_STR_HASH_OPT_SIPHASH, \
BCH_SB_STR_HASH_TYPE, BCH_STR_HASH_OPT_siphash, \
NULL, "Hash function for directory entries and xattrs")\
x(metadata_target, u16, \
OPT_FORMAT|OPT_MOUNT|OPT_RUNTIME|OPT_INODE, \

View File

@ -363,7 +363,7 @@ static int bch2_quota_init_type(struct bch_fs *c, enum quota_types type)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_QUOTAS, POS(type, 0),
for_each_btree_key(&trans, iter, BTREE_ID_quotas, POS(type, 0),
BTREE_ITER_PREFETCH, k, ret) {
if (k.k->p.inode != type)
break;
@ -435,7 +435,7 @@ int bch2_fs_quota_read(struct bch_fs *c)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_INODES, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_inodes, POS_MIN,
BTREE_ITER_PREFETCH, k, ret) {
switch (k.k->type) {
case KEY_TYPE_inode:
@ -526,7 +526,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
if (c->opts.usrquota)
return -EINVAL;
ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_USR, 0),
POS(QTYP_USR + 1, 0),
NULL);
@ -538,7 +538,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
if (c->opts.grpquota)
return -EINVAL;
ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_GRP, 0),
POS(QTYP_GRP + 1, 0),
NULL);
@ -550,7 +550,7 @@ static int bch2_quota_remove(struct super_block *sb, unsigned uflags)
if (c->opts.prjquota)
return -EINVAL;
ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
ret = bch2_btree_delete_range(c, BTREE_ID_quotas,
POS(QTYP_PRJ, 0),
POS(QTYP_PRJ + 1, 0),
NULL);
@ -718,7 +718,7 @@ static int bch2_set_quota_trans(struct btree_trans *trans,
struct bkey_s_c k;
int ret;
iter = bch2_trans_get_iter(trans, BTREE_ID_QUOTAS, new_quota->k.p,
iter = bch2_trans_get_iter(trans, BTREE_ID_quotas, new_quota->k.p,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
k = bch2_btree_iter_peek_slot(iter);

View File

@ -32,7 +32,7 @@ static void drop_alloc_keys(struct journal_keys *keys)
size_t src, dst;
for (src = 0, dst = 0; src < keys->nr; src++)
if (keys->d[src].btree_id != BTREE_ID_ALLOC)
if (keys->d[src].btree_id != BTREE_ID_alloc)
keys->d[dst++] = keys->d[src];
keys->nr = dst;
@ -548,7 +548,7 @@ static int __bch2_alloc_replay_key(struct btree_trans *trans, struct bkey_i *k)
struct btree_iter *iter;
int ret;
iter = bch2_trans_get_iter(trans, BTREE_ID_ALLOC, k->k.p,
iter = bch2_trans_get_iter(trans, BTREE_ID_alloc, k->k.p,
BTREE_ITER_CACHED|
BTREE_ITER_CACHED_NOFILL|
BTREE_ITER_INTENT);
@ -600,7 +600,7 @@ static int bch2_journal_replay(struct bch_fs *c,
for_each_journal_key(keys, i) {
cond_resched();
if (!i->level && i->btree_id == BTREE_ID_ALLOC) {
if (!i->level && i->btree_id == BTREE_ID_alloc) {
j->replay_journal_seq = keys.journal_seq_base + i->journal_seq;
ret = bch2_alloc_replay_key(c, i->k);
if (ret)
@ -639,7 +639,7 @@ static int bch2_journal_replay(struct bch_fs *c,
for_each_journal_key(keys, i) {
cond_resched();
if (i->level || i->btree_id == BTREE_ID_ALLOC)
if (i->level || i->btree_id == BTREE_ID_alloc)
continue;
replay_now_at(j, keys.journal_seq_base + i->journal_seq);
@ -925,28 +925,28 @@ static int read_btree_roots(struct bch_fs *c)
if (!r->alive)
continue;
if (i == BTREE_ID_ALLOC &&
if (i == BTREE_ID_alloc &&
c->opts.reconstruct_alloc) {
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
continue;
}
if (r->error) {
__fsck_err(c, i == BTREE_ID_ALLOC
__fsck_err(c, i == BTREE_ID_alloc
? FSCK_CAN_IGNORE : 0,
"invalid btree root %s",
bch2_btree_ids[i]);
if (i == BTREE_ID_ALLOC)
if (i == BTREE_ID_alloc)
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
}
ret = bch2_btree_root_read(c, i, &r->key, r->level);
if (ret) {
__fsck_err(c, i == BTREE_ID_ALLOC
__fsck_err(c, i == BTREE_ID_alloc
? FSCK_CAN_IGNORE : 0,
"error reading btree root %s",
bch2_btree_ids[i]);
if (i == BTREE_ID_ALLOC)
if (i == BTREE_ID_alloc)
c->sb.compat &= ~(1ULL << BCH_COMPAT_FEAT_ALLOC_INFO);
}
}
@ -1321,7 +1321,7 @@ int bch2_fs_initialize(struct bch_fs *c)
bch2_inode_pack(c, &packed_inode, &root_inode);
err = "error creating root directory";
ret = bch2_btree_insert(c, BTREE_ID_INODES,
ret = bch2_btree_insert(c, BTREE_ID_inodes,
&packed_inode.inode.k_i,
NULL, NULL, 0);
if (ret)

View File

@ -119,7 +119,7 @@ static int bch2_make_extent_indirect(struct btree_trans *trans,
if (orig->k.type == KEY_TYPE_inline_data)
bch2_check_set_feature(c, BCH_FEATURE_reflink_inline_data);
for_each_btree_key(trans, reflink_iter, BTREE_ID_REFLINK,
for_each_btree_key(trans, reflink_iter, BTREE_ID_reflink,
POS(0, c->reflink_hint),
BTREE_ITER_INTENT|BTREE_ITER_SLOTS, k, ret) {
if (reflink_iter->pos.inode) {
@ -219,9 +219,9 @@ s64 bch2_remap_range(struct bch_fs *c,
bch2_bkey_buf_init(&new_src);
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 4096);
src_iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, src_start,
src_iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, src_start,
BTREE_ITER_INTENT);
dst_iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, dst_start,
dst_iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, dst_start,
BTREE_ITER_INTENT);
while (1) {

View File

@ -18,11 +18,11 @@ static inline enum bch_str_hash_type
bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
{
switch (opt) {
case BCH_STR_HASH_OPT_CRC32C:
case BCH_STR_HASH_OPT_crc32c:
return BCH_STR_HASH_CRC32C;
case BCH_STR_HASH_OPT_CRC64:
case BCH_STR_HASH_OPT_crc64:
return BCH_STR_HASH_CRC64;
case BCH_STR_HASH_OPT_SIPHASH:
case BCH_STR_HASH_OPT_siphash:
return c->sb.features & (1ULL << BCH_FEATURE_new_siphash)
? BCH_STR_HASH_SIPHASH
: BCH_STR_HASH_SIPHASH_OLD;
@ -156,7 +156,7 @@ bch2_hash_lookup(struct btree_trans *trans,
if (k.k->type == desc.key_type) {
if (!desc.cmp_key(k, key))
return iter;
} else if (k.k->type == KEY_TYPE_whiteout) {
} else if (k.k->type == KEY_TYPE_hash_whiteout) {
;
} else {
/* hole, not found */
@ -210,7 +210,7 @@ int bch2_hash_needs_whiteout(struct btree_trans *trans,
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, k, ret) {
if (k.k->type != desc.key_type &&
k.k->type != KEY_TYPE_whiteout)
k.k->type != KEY_TYPE_hash_whiteout)
break;
if (k.k->type == desc.key_type &&
@ -254,7 +254,7 @@ int bch2_hash_set(struct btree_trans *trans,
!(flags & BCH_HASH_SET_MUST_REPLACE))
slot = bch2_trans_copy_iter(trans, iter);
if (k.k->type != KEY_TYPE_whiteout)
if (k.k->type != KEY_TYPE_hash_whiteout)
goto not_found;
}
@ -303,7 +303,7 @@ int bch2_hash_delete_at(struct btree_trans *trans,
bkey_init(&delete->k);
delete->k.p = iter->pos;
delete->k.type = ret ? KEY_TYPE_whiteout : KEY_TYPE_deleted;
delete->k.type = ret ? KEY_TYPE_hash_whiteout : KEY_TYPE_deleted;
bch2_trans_update(trans, iter, delete, 0);
return 0;

View File

@ -939,7 +939,7 @@ void bch2_sb_clean_renumber(struct bch_sb_field_clean *clean, int write)
for (entry = clean->start;
entry < (struct jset_entry *) vstruct_end(&clean->field);
entry = vstruct_next(entry))
bch2_bkey_renumber(BKEY_TYPE_BTREE, bkey_to_packed(entry->start), write);
bch2_bkey_renumber(BKEY_TYPE_btree, bkey_to_packed(entry->start), write);
}
int bch2_fs_mark_dirty(struct bch_fs *c)

View File

@ -1270,16 +1270,16 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
lockdep_assert_held(&c->state_lock);
switch (new_state) {
case BCH_MEMBER_STATE_RW:
case BCH_MEMBER_STATE_rw:
return true;
case BCH_MEMBER_STATE_RO:
if (ca->mi.state != BCH_MEMBER_STATE_RW)
case BCH_MEMBER_STATE_ro:
if (ca->mi.state != BCH_MEMBER_STATE_rw)
return true;
/* do we have enough devices to write to? */
for_each_member_device(ca2, c, i)
if (ca2 != ca)
nr_rw += ca2->mi.state == BCH_MEMBER_STATE_RW;
nr_rw += ca2->mi.state == BCH_MEMBER_STATE_rw;
required = max(!(flags & BCH_FORCE_IF_METADATA_DEGRADED)
? c->opts.metadata_replicas
@ -1289,10 +1289,10 @@ bool bch2_dev_state_allowed(struct bch_fs *c, struct bch_dev *ca,
: c->opts.data_replicas_required);
return nr_rw >= required;
case BCH_MEMBER_STATE_FAILED:
case BCH_MEMBER_STATE_SPARE:
if (ca->mi.state != BCH_MEMBER_STATE_RW &&
ca->mi.state != BCH_MEMBER_STATE_RO)
case BCH_MEMBER_STATE_failed:
case BCH_MEMBER_STATE_spare:
if (ca->mi.state != BCH_MEMBER_STATE_rw &&
ca->mi.state != BCH_MEMBER_STATE_ro)
return true;
/* do we have enough devices to read from? */
@ -1329,8 +1329,8 @@ static bool bch2_fs_may_start(struct bch_fs *c)
ca = bch_dev_locked(c, i);
if (!bch2_dev_is_online(ca) &&
(ca->mi.state == BCH_MEMBER_STATE_RW ||
ca->mi.state == BCH_MEMBER_STATE_RO)) {
(ca->mi.state == BCH_MEMBER_STATE_rw ||
ca->mi.state == BCH_MEMBER_STATE_ro)) {
mutex_unlock(&c->sb_lock);
return false;
}
@ -1363,7 +1363,7 @@ static const char *__bch2_dev_read_write(struct bch_fs *c, struct bch_dev *ca)
{
lockdep_assert_held(&c->state_lock);
BUG_ON(ca->mi.state != BCH_MEMBER_STATE_RW);
BUG_ON(ca->mi.state != BCH_MEMBER_STATE_rw);
bch2_dev_allocator_add(c, ca);
bch2_recalc_capacity(c);
@ -1386,10 +1386,10 @@ int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
if (!bch2_dev_state_allowed(c, ca, new_state, flags))
return -EINVAL;
if (new_state != BCH_MEMBER_STATE_RW)
if (new_state != BCH_MEMBER_STATE_rw)
__bch2_dev_read_only(c, ca);
bch_notice(ca, "%s", bch2_dev_state[new_state]);
bch_notice(ca, "%s", bch2_member_states[new_state]);
mutex_lock(&c->sb_lock);
mi = bch2_sb_get_members(c->disk_sb.sb);
@ -1397,7 +1397,7 @@ int __bch2_dev_set_state(struct bch_fs *c, struct bch_dev *ca,
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
if (new_state == BCH_MEMBER_STATE_RW &&
if (new_state == BCH_MEMBER_STATE_rw &&
__bch2_dev_read_write(c, ca))
ret = -ENOMEM;
@ -1430,7 +1430,7 @@ int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
for (i = 0; i < ca->mi.nbuckets; i++) {
ret = bch2_btree_key_cache_flush(&trans,
BTREE_ID_ALLOC, POS(ca->dev_idx, i));
BTREE_ID_alloc, POS(ca->dev_idx, i));
if (ret)
break;
}
@ -1439,7 +1439,7 @@ int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
if (ret)
return ret;
return bch2_btree_delete_range(c, BTREE_ID_ALLOC,
return bch2_btree_delete_range(c, BTREE_ID_alloc,
POS(ca->dev_idx, 0),
POS(ca->dev_idx + 1, 0),
NULL);
@ -1459,7 +1459,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
*/
percpu_ref_put(&ca->ref);
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot remove without losing data");
goto err;
}
@ -1543,7 +1543,7 @@ int bch2_dev_remove(struct bch_fs *c, struct bch_dev *ca, int flags)
bch2_dev_usage_journal_reserve(c);
return 0;
err:
if (ca->mi.state == BCH_MEMBER_STATE_RW &&
if (ca->mi.state == BCH_MEMBER_STATE_rw &&
!percpu_ref_is_zero(&ca->io_ref))
__bch2_dev_read_write(c, ca);
up_write(&c->state_lock);
@ -1667,7 +1667,7 @@ have_slot:
if (ret)
goto err_late;
if (ca->mi.state == BCH_MEMBER_STATE_RW) {
if (ca->mi.state == BCH_MEMBER_STATE_rw) {
err = __bch2_dev_read_write(c, ca);
if (err)
goto err_late;
@ -1728,7 +1728,7 @@ int bch2_dev_online(struct bch_fs *c, const char *path)
goto err;
}
if (ca->mi.state == BCH_MEMBER_STATE_RW) {
if (ca->mi.state == BCH_MEMBER_STATE_rw) {
err = __bch2_dev_read_write(c, ca);
if (err)
goto err;
@ -1762,7 +1762,7 @@ int bch2_dev_offline(struct bch_fs *c, struct bch_dev *ca, int flags)
return 0;
}
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_FAILED, flags)) {
if (!bch2_dev_state_allowed(c, ca, BCH_MEMBER_STATE_failed, flags)) {
bch_err(ca, "Cannot offline required disk");
up_write(&c->state_lock);
return -EINVAL;

View File

@ -34,7 +34,7 @@ static inline bool bch2_dev_is_online(struct bch_dev *ca)
static inline bool bch2_dev_is_readable(struct bch_dev *ca)
{
return bch2_dev_is_online(ca) &&
ca->mi.state != BCH_MEMBER_STATE_FAILED;
ca->mi.state != BCH_MEMBER_STATE_failed;
}
static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
@ -42,8 +42,8 @@ static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
if (!percpu_ref_tryget(&ca->io_ref))
return false;
if (ca->mi.state == BCH_MEMBER_STATE_RW ||
(ca->mi.state == BCH_MEMBER_STATE_RO && rw == READ))
if (ca->mi.state == BCH_MEMBER_STATE_rw ||
(ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
return true;
percpu_ref_put(&ca->io_ref);
@ -158,11 +158,11 @@ static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
__for_each_online_member(ca, c, iter, ~0)
#define for_each_rw_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_RW)
__for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw)
#define for_each_readable_member(ca, c, iter) \
__for_each_online_member(ca, c, iter, \
(1 << BCH_MEMBER_STATE_RW)|(1 << BCH_MEMBER_STATE_RO))
(1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
/*
* If a key exists that references a device, the device won't be going away and

View File

@ -261,7 +261,7 @@ static int bch2_compression_stats_to_text(struct printbuf *out, struct bch_fs *c
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN, 0, k, ret)
for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN, 0, k, ret)
if (k.k->type == KEY_TYPE_extent) {
struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
const union bch_extent_entry *entry;
@ -889,7 +889,7 @@ SHOW(bch2_dev)
}
if (attr == &sysfs_state_rw) {
bch2_string_opt_to_text(&out, bch2_dev_state,
bch2_string_opt_to_text(&out, bch2_member_states,
ca->mi.state);
pr_buf(&out, "\n");
return out.pos - buf;

View File

@ -13,12 +13,12 @@ static void delete_test_keys(struct bch_fs *c)
{
int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
ret = bch2_btree_delete_range(c, BTREE_ID_extents,
POS(0, 0), POS(0, U64_MAX),
NULL);
BUG_ON(ret);
ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
POS(0, 0), POS(0, U64_MAX),
NULL);
BUG_ON(ret);
@ -37,7 +37,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, k.k.p,
iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
@ -82,7 +82,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, k.k.p,
iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, k.k.p,
BTREE_ITER_INTENT);
ret = bch2_btree_iter_traverse(iter);
@ -130,7 +130,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
bkey_cookie_init(&k.k_i);
k.k.p.offset = i;
ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "insert error in test_iterate: %i", ret);
@ -142,7 +142,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS,
for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
POS_MIN, 0, k, ret) {
if (k.k->p.inode)
break;
@ -184,7 +184,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
k.k.p.offset = i + 8;
k.k.size = 8;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "insert error in test_iterate_extents: %i", ret);
@ -196,7 +196,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS,
for_each_btree_key(&trans, iter, BTREE_ID_extents,
POS_MIN, 0, k, ret) {
BUG_ON(bkey_start_offset(k.k) != i);
i = k.k->p.offset;
@ -237,7 +237,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
bkey_cookie_init(&k.k_i);
k.k.p.offset = i * 2;
ret = bch2_btree_insert(c, BTREE_ID_XATTRS, &k.k_i,
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "insert error in test_iterate_slots: %i", ret);
@ -249,7 +249,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
0, k, ret) {
if (k.k->p.inode)
break;
@ -265,7 +265,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
BTREE_ITER_SLOTS, k, ret) {
BUG_ON(k.k->p.offset != i);
BUG_ON(bkey_deleted(k.k) != (i & 1));
@ -300,7 +300,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
k.k.p.offset = i + 16;
k.k.size = 8;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret) {
bch_err(c, "insert error in test_iterate_slots_extents: %i", ret);
@ -312,7 +312,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
0, k, ret) {
BUG_ON(bkey_start_offset(k.k) != i + 8);
BUG_ON(k.k->size != 8);
@ -326,7 +326,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
i = 0;
for_each_btree_key(&trans, iter, BTREE_ID_EXTENTS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_extents, POS_MIN,
BTREE_ITER_SLOTS, k, ret) {
BUG_ON(bkey_deleted(k.k) != !(i % 16));
@ -354,7 +354,7 @@ static int test_peek_end(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
k = bch2_btree_iter_peek(iter);
BUG_ON(k.k);
@ -374,7 +374,7 @@ static int test_peek_end_extents(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS, POS_MIN, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_extents, POS_MIN, 0);
k = bch2_btree_iter_peek(iter);
BUG_ON(k.k);
@ -403,7 +403,7 @@ static int insert_test_extent(struct bch_fs *c,
k.k_i.k.size = end - start;
k.k_i.k.version.lo = test_version++;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
NULL, NULL, 0);
if (ret)
bch_err(c, "insert error in insert_test_extent: %i", ret);
@ -475,7 +475,7 @@ static int rand_insert(struct bch_fs *c, u64 nr)
k.k.p.offset = test_rand();
ret = __bch2_trans_do(&trans, NULL, NULL, 0,
__bch2_btree_insert(&trans, BTREE_ID_XATTRS, &k.k_i));
__bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i));
if (ret) {
bch_err(c, "error in rand_insert: %i", ret);
break;
@ -495,7 +495,7 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
u64 i;
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
for (i = 0; i < nr; i++) {
bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
@ -522,7 +522,7 @@ static int rand_mixed(struct bch_fs *c, u64 nr)
u64 i;
bch2_trans_init(&trans, c, 0, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_XATTRS, POS_MIN, 0);
iter = bch2_trans_get_iter(&trans, BTREE_ID_xattrs, POS_MIN, 0);
for (i = 0; i < nr; i++) {
bch2_btree_iter_set_pos(iter, POS(0, test_rand()));
@ -561,7 +561,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
struct bkey_s_c k;
int ret = 0;
iter = bch2_trans_get_iter(trans, BTREE_ID_XATTRS, pos,
iter = bch2_trans_get_iter(trans, BTREE_ID_xattrs, pos,
BTREE_ITER_INTENT);
k = bch2_btree_iter_peek(iter);
ret = bkey_err(k);
@ -616,7 +616,7 @@ static int seq_insert(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
insert.k.p = iter->pos;
@ -643,7 +643,7 @@ static int seq_lookup(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN, 0, k, ret)
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN, 0, k, ret)
;
bch2_trans_exit(&trans);
return ret;
@ -658,7 +658,7 @@ static int seq_overwrite(struct bch_fs *c, u64 nr)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_xattrs, POS_MIN,
BTREE_ITER_INTENT, k, ret) {
struct bkey_i_cookie u;
@ -679,7 +679,7 @@ static int seq_delete(struct bch_fs *c, u64 nr)
{
int ret;
ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
POS(0, 0), POS(0, U64_MAX),
NULL);
if (ret)

View File

@ -61,7 +61,7 @@ static bool xattr_cmp_bkey(struct bkey_s_c _l, struct bkey_s_c _r)
}
const struct bch_hash_desc bch2_xattr_hash_desc = {
.btree_id = BTREE_ID_XATTRS,
.btree_id = BTREE_ID_xattrs,
.key_type = KEY_TYPE_xattr,
.hash_key = xattr_hash_key,
.hash_bkey = xattr_hash_bkey,
@ -279,7 +279,7 @@ ssize_t bch2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
bch2_trans_init(&trans, c, 0, 0);
for_each_btree_key(&trans, iter, BTREE_ID_XATTRS,
for_each_btree_key(&trans, iter, BTREE_ID_xattrs,
POS(inum, 0), 0, k, ret) {
BUG_ON(k.k->p.inode < inum);