On disk format updates

This commit is contained in:
Kent Overstreet 2016-01-13 22:18:22 -09:00
parent 009d6db7b0
commit cbde373685
3 changed files with 257 additions and 167 deletions

View File

@ -283,7 +283,7 @@ void write_backingdev_sb(int fd, unsigned block_size, unsigned mode,
uuid_le set_uuid) uuid_le set_uuid)
{ {
char uuid_str[40]; char uuid_str[40];
struct cache_sb sb; struct backingdev_sb sb;
memset(&sb, 0, sizeof(struct cache_sb)); memset(&sb, 0, sizeof(struct cache_sb));
@ -302,7 +302,7 @@ void write_backingdev_sb(int fd, unsigned block_size, unsigned mode,
if (data_offset != BDEV_DATA_START_DEFAULT) { if (data_offset != BDEV_DATA_START_DEFAULT) {
sb.version = BCACHE_SB_VERSION_BDEV_WITH_OFFSET; sb.version = BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
sb.bdev_data_offset = data_offset; sb.data_offset = data_offset;
} }
sb.csum = csum_set(&sb, BCH_CSUM_CRC64); sb.csum = csum_set(&sb, BCH_CSUM_CRC64);

View File

@ -14,18 +14,44 @@ extern "C" {
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <linux/uuid.h> #include <linux/uuid.h>
#define BITMASK(name, type, field, offset, end) \ #define LE32_BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \ static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \ static const unsigned name##_BITS = (end - offset); \
static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \ static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
\ \
static inline __u64 name(const type *k) \ static inline __u64 name(const type *k) \
{ return (k->field >> offset) & ~(~0ULL << (end - offset)); } \ { \
return (__le32_to_cpu(k->field) >> offset) & \
~(~0ULL << (end - offset)); \
} \
\ \
static inline void SET_##name(type *k, __u64 v) \ static inline void SET_##name(type *k, __u64 v) \
{ \ { \
k->field &= ~(~(~0ULL << (end - offset)) << offset); \ __u64 new = __le32_to_cpu(k->field); \
k->field |= (v & ~(~0ULL << (end - offset))) << offset; \ \
new &= ~(~(~0ULL << (end - offset)) << offset); \
new |= (v & ~(~0ULL << (end - offset))) << offset; \
k->field = __cpu_to_le32(new); \
}
#define LE64_BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
\
static inline __u64 name(const type *k) \
{ \
return (__le64_to_cpu(k->field) >> offset) & \
~(~0ULL << (end - offset)); \
} \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
__u64 new = __le64_to_cpu(k->field); \
\
new &= ~(~(~0ULL << (end - offset)) << offset); \
new |= (v & ~(~0ULL << (end - offset))) << offset; \
k->field = __cpu_to_le64(new); \
} }
struct bkey_format { struct bkey_format {
@ -33,7 +59,7 @@ struct bkey_format {
__u8 nr_fields; __u8 nr_fields;
/* One unused slot for now: */ /* One unused slot for now: */
__u8 bits_per_field[6]; __u8 bits_per_field[6];
__u64 field_offset[6]; __le64 field_offset[6];
}; };
/* Btree keys - all units are in sectors */ /* Btree keys - all units are in sectors */
@ -76,29 +102,6 @@ struct bch_val {
__u64 __nothing[0]; __u64 __nothing[0];
}; };
struct bkey_packed {
__u64 _data[0];
/* Size of combined key and value, in u64s */
__u8 u64s;
/* Format of key (0 for format local to btree node */
__u8 format;
/* Type of the value */
__u8 type;
__u8 key_start[0];
/*
* We copy bkeys with struct assignment in various places, and while
* that shouldn't be done with packed bkeys we can't disallow it in C,
* and it's legal to cast a bkey to a bkey_packed - so padding it out
* to the same size as struct bkey should hopefully be safest.
*/
__u8 pad[5];
__u64 pad2[4];
} __attribute__((packed)) __attribute__((aligned(8)));
struct bkey { struct bkey {
__u64 _data[0]; __u64 _data[0];
@ -111,8 +114,9 @@ struct bkey {
/* Type of the value */ /* Type of the value */
__u8 type; __u8 type;
__u8 pad[1];
#if defined(__LITTLE_ENDIAN) #if defined(__LITTLE_ENDIAN)
__u8 pad[1];
__u32 version; __u32 version;
__u32 size; /* extent size, in sectors */ __u32 size; /* extent size, in sectors */
struct bpos p; struct bpos p;
@ -120,9 +124,33 @@ struct bkey {
struct bpos p; struct bpos p;
__u32 size; /* extent size, in sectors */ __u32 size; /* extent size, in sectors */
__u32 version; __u32 version;
__u8 pad[1];
#endif #endif
} __attribute__((packed)) __attribute__((aligned(8))); } __attribute__((packed)) __attribute__((aligned(8)));
struct bkey_packed {
__u64 _data[0];
/* Size of combined key and value, in u64s */
__u8 u64s;
/* Format of key (0 for format local to btree node) */
__u8 format;
/* Type of the value */
__u8 type;
__u8 key_start[0];
/*
* We copy bkeys with struct assignment in various places, and while
* that shouldn't be done with packed bkeys we can't disallow it in C,
* and it's legal to cast a bkey to a bkey_packed - so padding it out
* to the same size as struct bkey should hopefully be safest.
*/
__u8 pad[sizeof(struct bkey) - 3];
} __attribute__((packed)) __attribute__((aligned(8)));
#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64)) #define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
#define KEY_PACKED_BITS_START 24 #define KEY_PACKED_BITS_START 24
@ -237,7 +265,7 @@ struct bkey_i_##name { \
struct bch_cookie { struct bch_cookie {
struct bch_val v; struct bch_val v;
__u64 cookie; __le64 cookie;
}; };
BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE); BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
@ -364,26 +392,27 @@ struct bch_extent_crc64 {
#define CRC64_EXTENT_SIZE_MAX (1U << 17) #define CRC64_EXTENT_SIZE_MAX (1U << 17)
/*
* @reservation - pointer hasn't been written to, just reserved
*/
struct bch_extent_ptr { struct bch_extent_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD) #if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:2, __u64 type:2,
erasure_coded:1, erasure_coded:1,
offset:45, /* 16 petabytes */ reservation:1,
offset:44, /* 8 petabytes */
dev:8, dev:8,
gen:8; gen:8;
#elif defined (__BIG_ENDIAN_BITFIELD) #elif defined (__BIG_ENDIAN_BITFIELD)
__u64 gen:8, __u64 gen:8,
dev:8, dev:8,
offset:45, offset:44,
reservation:1,
erasure_coded:1, erasure_coded:1,
type:2; type:2;
#endif #endif
} __attribute__((packed)) __attribute__((aligned(8))); } __attribute__((packed)) __attribute__((aligned(8)));
/* Dummy DEV numbers: */
#define PTR_LOST_DEV 255 /* XXX: kill */
union bch_extent_entry { union bch_extent_entry {
__u8 type; __u8 type;
struct bch_extent_crc32 crc32; struct bch_extent_crc32 crc32;
@ -400,6 +429,11 @@ enum {
* have the same value type: * have the same value type:
*/ */
BCH_EXTENT_CACHED = 129, BCH_EXTENT_CACHED = 129,
/*
* Persistent reservation:
*/
BCH_RESERVATION = 130,
}; };
struct bch_extent { struct bch_extent {
@ -434,23 +468,23 @@ enum {
struct bch_inode { struct bch_inode {
struct bch_val v; struct bch_val v;
__u16 i_mode; __le16 i_mode;
__u16 pad; __le16 pad;
__u32 i_flags; __le32 i_flags;
/* Nanoseconds */ /* Nanoseconds */
__s64 i_atime; __le64 i_atime;
__s64 i_ctime; __le64 i_ctime;
__s64 i_mtime; __le64 i_mtime;
__u64 i_size; __le64 i_size;
__u32 i_uid; __le32 i_uid;
__u32 i_gid; __le32 i_gid;
__u32 i_nlink; __le32 i_nlink;
__u32 i_dev; __le32 i_dev;
}; } __attribute__((packed));
BKEY_VAL_TYPE(inode, BCH_INODE_FS); BKEY_VAL_TYPE(inode, BCH_INODE_FS);
struct bch_inode_blockdev { struct bch_inode_blockdev {
@ -484,7 +518,7 @@ struct bch_dirent {
struct bch_val v; struct bch_val v;
/* Target inode number: */ /* Target inode number: */
__u64 d_inum; __le64 d_inum;
/* /*
* Copy of mode bits 12-15 from the target inode - so userspace can get * Copy of mode bits 12-15 from the target inode - so userspace can get
@ -513,7 +547,7 @@ struct bch_xattr {
struct bch_val v; struct bch_val v;
__u8 x_type; __u8 x_type;
__u8 x_name_len; __u8 x_name_len;
__u16 x_val_len; __le16 x_val_len;
__u8 x_name[]; __u8 x_name[];
} __attribute__((packed)); } __attribute__((packed));
BKEY_VAL_TYPE(xattr, BCH_XATTR); BKEY_VAL_TYPE(xattr, BCH_XATTR);
@ -545,43 +579,44 @@ BKEY_VAL_TYPE(xattr, BCH_XATTR);
struct cache_member { struct cache_member {
uuid_le uuid; uuid_le uuid;
__u64 nbuckets; /* device size */ __le64 nbuckets; /* device size */
__u16 first_bucket; /* index of first bucket used */ __le16 first_bucket; /* index of first bucket used */
__u16 bucket_size; /* sectors */ __le16 bucket_size; /* sectors */
__u32 last_mount; /* time_t */ __le32 pad;
__le64 last_mount; /* time_t */
__u64 f1; __le64 f1;
__u64 f2; __le64 f2;
}; };
BITMASK(CACHE_STATE, struct cache_member, f1, 0, 4) LE64_BITMASK(CACHE_STATE, struct cache_member, f1, 0, 4)
#define CACHE_ACTIVE 0U #define CACHE_ACTIVE 0U
#define CACHE_RO 1U #define CACHE_RO 1U
#define CACHE_FAILED 2U #define CACHE_FAILED 2U
#define CACHE_SPARE 3U #define CACHE_SPARE 3U
BITMASK(CACHE_TIER, struct cache_member, f1, 4, 8) LE64_BITMASK(CACHE_TIER, struct cache_member, f1, 4, 8)
#define CACHE_TIERS 4U #define CACHE_TIERS 4U
BITMASK(CACHE_REPLICATION_SET, struct cache_member, f1, 8, 16) LE64_BITMASK(CACHE_REPLICATION_SET, struct cache_member, f1, 8, 16)
BITMASK(CACHE_HAS_METADATA, struct cache_member, f1, 24, 25) LE64_BITMASK(CACHE_HAS_METADATA, struct cache_member, f1, 24, 25)
BITMASK(CACHE_HAS_DATA, struct cache_member, f1, 25, 26) LE64_BITMASK(CACHE_HAS_DATA, struct cache_member, f1, 25, 26)
BITMASK(CACHE_REPLACEMENT, struct cache_member, f1, 26, 30) LE64_BITMASK(CACHE_REPLACEMENT, struct cache_member, f1, 26, 30)
#define CACHE_REPLACEMENT_LRU 0U #define CACHE_REPLACEMENT_LRU 0U
#define CACHE_REPLACEMENT_FIFO 1U #define CACHE_REPLACEMENT_FIFO 1U
#define CACHE_REPLACEMENT_RANDOM 2U #define CACHE_REPLACEMENT_RANDOM 2U
BITMASK(CACHE_DISCARD, struct cache_member, f1, 30, 31); LE64_BITMASK(CACHE_DISCARD, struct cache_member, f1, 30, 31);
BITMASK(CACHE_NR_READ_ERRORS, struct cache_member, f2, 0, 20); LE64_BITMASK(CACHE_NR_READ_ERRORS, struct cache_member, f2, 0, 20);
BITMASK(CACHE_NR_WRITE_ERRORS, struct cache_member, f2, 20, 40); LE64_BITMASK(CACHE_NR_WRITE_ERRORS, struct cache_member, f2, 20, 40);
struct cache_sb { struct cache_sb {
__u64 csum; __le64 csum;
__u64 offset; /* sector where this sb was written */ __le64 offset; /* sector where this sb was written */
__u64 version; /* of on disk format */ __le64 version; /* of on disk format */
uuid_le magic; /* bcache superblock UUID */ uuid_le magic; /* bcache superblock UUID */
@ -594,48 +629,37 @@ struct cache_sb {
*/ */
union { union {
uuid_le set_uuid; uuid_le set_uuid;
__u64 set_magic; __le64 set_magic;
}; };
__u8 label[SB_LABEL_SIZE]; __u8 label[SB_LABEL_SIZE];
__u64 flags; __le64 flags;
/* Incremented each time superblock is written: */ /* Incremented each time superblock is written: */
__u64 seq; __le64 seq;
/* /*
* User visible UUID for identifying the cache set the user is allowed * User visible UUID for identifying the cache set the user is allowed
* to change: * to change:
*/ */
uuid_le user_uuid; uuid_le user_uuid;
__u64 pad[6]; __le64 pad1[6];
union { /* Number of cache_member entries: */
struct { __u8 nr_in_set;
/* Cache devices */
/* Number of cache_member entries: */ /*
__u8 nr_in_set; * Index of this device - for PTR_DEV(), and also this device's
* slot in the cache_member array:
*/
__u8 nr_this_dev;
__le16 pad2[3];
/* __le16 block_size; /* sectors */
* Index of this device - for PTR_DEV(), and also this device's __le16 pad3[6];
* slot in the cache_member array:
*/
__u8 nr_this_dev;
};
struct {
/* Backing devices */
__u64 bdev_data_offset;
};
};
__u16 block_size; /* sectors */ __le16 u64s; /* size of variable length portion */
__u16 pad2[3];
__u32 bdev_last_mount; /* time_t */
__u16 pad3;
__u16 u64s; /* size of variable length portion */
union { union {
struct cache_member members[0]; struct cache_member members[0];
@ -643,34 +667,37 @@ struct cache_sb {
* Journal buckets also in the variable length portion, after * Journal buckets also in the variable length portion, after
* the member info: * the member info:
*/ */
__u64 _data[0]; __le64 _data[0];
}; };
}; };
BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1); LE64_BITMASK(CACHE_SYNC, struct cache_sb, flags, 0, 1);
BITMASK(CACHE_ERROR_ACTION, struct cache_sb, flags, 1, 4); LE64_BITMASK(CACHE_ERROR_ACTION, struct cache_sb, flags, 1, 4);
#define BCH_ON_ERROR_CONTINUE 0U #define BCH_ON_ERROR_CONTINUE 0U
#define BCH_ON_ERROR_RO 1U #define BCH_ON_ERROR_RO 1U
#define BCH_ON_ERROR_PANIC 2U #define BCH_ON_ERROR_PANIC 2U
#define BCH_NR_ERROR_ACTIONS 3U
BITMASK(CACHE_SET_META_REPLICAS_WANT, struct cache_sb, flags, 4, 8); LE64_BITMASK(CACHE_SET_META_REPLICAS_WANT,struct cache_sb, flags, 4, 8);
BITMASK(CACHE_SET_DATA_REPLICAS_WANT, struct cache_sb, flags, 8, 12); LE64_BITMASK(CACHE_SET_DATA_REPLICAS_WANT,struct cache_sb, flags, 8, 12);
BITMASK(CACHE_SB_CSUM_TYPE, struct cache_sb, flags, 12, 16); #define BCH_REPLICAS_MAX 4U
BITMASK(CACHE_META_PREFERRED_CSUM_TYPE, struct cache_sb, flags, 16, 20); LE64_BITMASK(CACHE_SB_CSUM_TYPE, struct cache_sb, flags, 12, 16);
LE64_BITMASK(CACHE_META_PREFERRED_CSUM_TYPE,struct cache_sb, flags, 16, 20);
#define BCH_CSUM_NONE 0U #define BCH_CSUM_NONE 0U
#define BCH_CSUM_CRC32C 1U #define BCH_CSUM_CRC32C 1U
#define BCH_CSUM_CRC64 2U #define BCH_CSUM_CRC64 2U
#define BCH_CSUM_NR 3U #define BCH_CSUM_NR 3U
BITMASK(CACHE_BTREE_NODE_SIZE, struct cache_sb, flags, 20, 36); LE64_BITMASK(CACHE_BTREE_NODE_SIZE, struct cache_sb, flags, 20, 36);
BITMASK(CACHE_SET_META_REPLICAS_HAVE, struct cache_sb, flags, 36, 40); LE64_BITMASK(CACHE_SET_META_REPLICAS_HAVE,struct cache_sb, flags, 36, 40);
BITMASK(CACHE_SET_DATA_REPLICAS_HAVE, struct cache_sb, flags, 40, 44); LE64_BITMASK(CACHE_SET_DATA_REPLICAS_HAVE,struct cache_sb, flags, 40, 44);
BITMASK(CACHE_SET_DIRENT_CSUM_TYPE, struct cache_sb, flags, 44, 48); LE64_BITMASK(CACHE_SET_DIRENT_CSUM_TYPE,struct cache_sb, flags, 44, 48);
enum { enum {
BCH_DIRENT_CSUM_CRC32C = 0, BCH_DIRENT_CSUM_CRC32C = 0,
BCH_DIRENT_CSUM_CRC64 = 1, BCH_DIRENT_CSUM_CRC64 = 1,
@ -678,9 +705,9 @@ enum {
BCH_DIRENT_CSUM_SHA1 = 3, BCH_DIRENT_CSUM_SHA1 = 3,
}; };
BITMASK(CACHE_DATA_PREFERRED_CSUM_TYPE, struct cache_sb, flags, 48, 52); LE64_BITMASK(CACHE_DATA_PREFERRED_CSUM_TYPE, struct cache_sb, flags, 48, 52);
BITMASK(CACHE_COMPRESSION_TYPE, struct cache_sb, flags, 52, 56); LE64_BITMASK(CACHE_COMPRESSION_TYPE, struct cache_sb, flags, 52, 56);
enum { enum {
BCH_COMPRESSION_NONE = 0, BCH_COMPRESSION_NONE = 0,
BCH_COMPRESSION_LZO1X = 1, BCH_COMPRESSION_LZO1X = 1,
@ -690,13 +717,57 @@ enum {
/* backing device specific stuff: */ /* backing device specific stuff: */
BITMASK(BDEV_CACHE_MODE, struct cache_sb, flags, 0, 4); struct backingdev_sb {
__le64 csum;
__le64 offset; /* sector where this sb was written */
__le64 version; /* of on disk format */
uuid_le magic; /* bcache superblock UUID */
uuid_le disk_uuid;
/*
* Internal cache set UUID - xored with various magic numbers and thus
* must never change:
*/
union {
uuid_le set_uuid;
__le64 set_magic;
};
__u8 label[SB_LABEL_SIZE];
__le64 flags;
/* Incremented each time superblock is written: */
__le64 seq;
/*
* User visible UUID for identifying the cache set the user is allowed
* to change:
*
* XXX hooked up?
*/
uuid_le user_uuid;
__le64 pad1[6];
__le64 data_offset;
__le16 block_size; /* sectors */
__le16 pad2[3];
__le32 last_mount; /* time_t */
__le16 pad3;
/* size of variable length portion - always 0 for backingdev superblock */
__le16 u64s;
__u64 _data[0];
};
LE64_BITMASK(BDEV_CACHE_MODE, struct backingdev_sb, flags, 0, 4);
#define CACHE_MODE_WRITETHROUGH 0U #define CACHE_MODE_WRITETHROUGH 0U
#define CACHE_MODE_WRITEBACK 1U #define CACHE_MODE_WRITEBACK 1U
#define CACHE_MODE_WRITEAROUND 2U #define CACHE_MODE_WRITEAROUND 2U
#define CACHE_MODE_NONE 3U #define CACHE_MODE_NONE 3U
BITMASK(BDEV_STATE, struct cache_sb, flags, 61, 63); LE64_BITMASK(BDEV_STATE, struct backingdev_sb, flags, 61, 63);
#define BDEV_STATE_NONE 0U #define BDEV_STATE_NONE 0U
#define BDEV_STATE_CLEAN 1U #define BDEV_STATE_CLEAN 1U
#define BDEV_STATE_DIRTY 2U #define BDEV_STATE_DIRTY 2U
@ -709,7 +780,7 @@ static inline unsigned bch_journal_buckets_offset(struct cache_sb *sb)
static inline unsigned bch_nr_journal_buckets(struct cache_sb *sb) static inline unsigned bch_nr_journal_buckets(struct cache_sb *sb)
{ {
return sb->u64s - bch_journal_buckets_offset(sb); return __le16_to_cpu(sb->u64s) - bch_journal_buckets_offset(sb);
} }
static inline _Bool __SB_IS_BDEV(__u64 version) static inline _Bool __SB_IS_BDEV(__u64 version)
@ -744,17 +815,17 @@ static inline _Bool SB_IS_BDEV(const struct cache_sb *sb)
static inline __u64 jset_magic(struct cache_sb *sb) static inline __u64 jset_magic(struct cache_sb *sb)
{ {
return sb->set_magic ^ JSET_MAGIC; return __le64_to_cpu(sb->set_magic) ^ JSET_MAGIC;
} }
static inline __u64 pset_magic(struct cache_sb *sb) static inline __u64 pset_magic(struct cache_sb *sb)
{ {
return sb->set_magic ^ PSET_MAGIC; return __le64_to_cpu(sb->set_magic) ^ PSET_MAGIC;
} }
static inline __u64 bset_magic(struct cache_sb *sb) static inline __u64 bset_magic(struct cache_sb *sb)
{ {
return sb->set_magic ^ BSET_MAGIC; return __le64_to_cpu(sb->set_magic) ^ BSET_MAGIC;
} }
/* /*
@ -775,26 +846,11 @@ static inline __u64 bset_magic(struct cache_sb *sb)
#define BCACHE_JSET_VERSION_JKEYS 2 #define BCACHE_JSET_VERSION_JKEYS 2
#define BCACHE_JSET_VERSION 2 #define BCACHE_JSET_VERSION 2
#define DEFINE_BCH_BTREE_IDS() \
DEF_BTREE_ID(EXTENTS, 0, "extents") \
DEF_BTREE_ID(INODES, 1, "inodes") \
DEF_BTREE_ID(DIRENTS, 2, "dirents") \
DEF_BTREE_ID(XATTRS, 3, "xattrs")
#define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
enum btree_id {
DEFINE_BCH_BTREE_IDS()
BTREE_ID_NR
};
#undef DEF_BTREE_ID
struct jset_entry { struct jset_entry {
__u16 u64s; __le16 u64s;
__u8 btree_id; __u8 btree_id;
__u8 level; __u8 level;
__u32 flags; /* designates what this jset holds */ __le32 flags; /* designates what this jset holds */
union { union {
struct bkey_i start[0]; struct bkey_i start[0];
@ -805,7 +861,7 @@ struct jset_entry {
#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64)) #define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
BITMASK(JKEYS_TYPE, struct jset_entry, flags, 0, 8); LE32_BITMASK(JKEYS_TYPE, struct jset_entry, flags, 0, 8);
enum { enum {
JKEYS_BTREE_KEYS = 0, JKEYS_BTREE_KEYS = 0,
JKEYS_BTREE_ROOT = 1, JKEYS_BTREE_ROOT = 1,
@ -825,18 +881,18 @@ enum {
}; };
struct jset { struct jset {
__u64 csum; __le64 csum;
__u64 magic; __le64 magic;
__u32 version; __le32 version;
__u32 flags; __le32 flags;
/* Sequence number of oldest dirty journal entry */ /* Sequence number of oldest dirty journal entry */
__u64 seq; __le64 seq;
__u64 last_seq; __le64 last_seq;
__u16 read_clock; __le16 read_clock;
__u16 write_clock; __le16 write_clock;
__u32 u64s; /* size of d[] in u64s */ __le32 u64s; /* size of d[] in u64s */
union { union {
struct jset_entry start[0]; struct jset_entry start[0];
@ -844,26 +900,45 @@ struct jset {
}; };
}; };
BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4); LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
/* Bucket prios/gens */ /* Bucket prios/gens */
struct prio_set { struct prio_set {
__u64 csum; __le64 csum;
__u64 magic; __le64 magic;
__u32 version; __le32 version;
__u32 flags; __le32 flags;
__u64 next_bucket; __le64 next_bucket;
struct bucket_disk { struct bucket_disk {
__u16 read_prio; __le16 read_prio;
__u16 write_prio; __le16 write_prio;
__u8 gen; __u8 gen;
} __attribute((packed)) data[]; } __attribute__((packed)) data[];
}; };
BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4); LE32_BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4);
/* Btree: */
#define DEFINE_BCH_BTREE_IDS() \
DEF_BTREE_ID(EXTENTS, 0, "extents") \
DEF_BTREE_ID(INODES, 1, "inodes") \
DEF_BTREE_ID(DIRENTS, 2, "dirents") \
DEF_BTREE_ID(XATTRS, 3, "xattrs")
#define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
enum btree_id {
DEFINE_BCH_BTREE_IDS()
BTREE_ID_NR
};
#undef DEF_BTREE_ID
#define BTREE_MAX_DEPTH 4
/* Btree nodes */ /* Btree nodes */
@ -881,7 +956,7 @@ BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4);
* sorted * sorted
*/ */
struct bset { struct bset {
__u64 seq; __le64 seq;
/* /*
* Highest journal entry this bset contains keys for. * Highest journal entry this bset contains keys for.
@ -890,26 +965,28 @@ struct bset {
* crash, since the journal records a total order of all index updates * crash, since the journal records a total order of all index updates
* and anything that didn't make it to the journal doesn't get used. * and anything that didn't make it to the journal doesn't get used.
*/ */
__u64 journal_seq; __le64 journal_seq;
__u32 flags; __le32 flags;
__u16 version; __le16 version;
__u16 u64s; /* count of d[] in u64s */ __le16 u64s; /* count of d[] in u64s */
union { union {
struct bkey_packed start[0]; struct bkey_packed start[0];
__u64 _data[0]; __u64 _data[0];
}; };
} __attribute((packed)); } __attribute__((packed));
BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4); LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
/* Only used in first bset */ /* Only used in first bset */
BITMASK(BSET_BTREE_LEVEL, struct bset, flags, 4, 8); LE32_BITMASK(BSET_BTREE_LEVEL, struct bset, flags, 4, 8);
LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 8, 9);
struct btree_node { struct btree_node {
__u64 csum; __le64 csum;
__u64 magic; __le64 magic;
/* Closed interval: */ /* Closed interval: */
struct bpos min_key; struct bpos min_key;
@ -917,15 +994,29 @@ struct btree_node {
struct bkey_format format; struct bkey_format format;
struct bset keys; struct bset keys;
} __attribute((packed)); } __attribute__((packed));
struct btree_node_entry { struct btree_node_entry {
__u64 csum; __le64 csum;
struct bset keys; struct bset keys;
} __attribute((packed)); } __attribute__((packed));
/* OBSOLETE */ /* OBSOLETE */
#define BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
\
static inline __u64 name(const type *k) \
{ return (k->field >> offset) & ~(~0ULL << (end - offset)); } \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
k->field &= ~(~(~0ULL << (end - offset)) << offset); \
k->field |= (v & ~(~0ULL << (end - offset))) << offset; \
}
struct bkey_v0 { struct bkey_v0 {
__u64 high; __u64 high;
__u64 low; __u64 low;

View File

@ -60,9 +60,8 @@ const char * const csum_types[] = {
const char * const compression_types[] = { const char * const compression_types[] = {
"none", "none",
"lzo1x", "lz4",
"gzip", "gzip",
"xz",
NULL NULL
}; };