bcachefs-tools/libbcachefs/bcachefs_format.h
2017-03-19 17:31:47 -08:00

1353 lines
34 KiB
C

#ifndef _LINUX_BCACHE_H
#define _LINUX_BCACHE_H
/*
* Bcache on disk data structures
*/
#ifdef __cplusplus
typedef bool _Bool;
extern "C" {
#endif
#include <asm/types.h>
#include <asm/byteorder.h>
#include <linux/uuid.h>
#define LE32_BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
\
static inline __u64 name(const type *k) \
{ \
return (__le32_to_cpu(k->field) >> offset) & \
~(~0ULL << (end - offset)); \
} \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
__u64 new = __le32_to_cpu(k->field); \
\
new &= ~(~(~0ULL << (end - offset)) << offset); \
new |= (v & ~(~0ULL << (end - offset))) << offset; \
k->field = __cpu_to_le32(new); \
}
#define LE64_BITMASK(name, type, field, offset, end) \
static const unsigned name##_OFFSET = offset; \
static const unsigned name##_BITS = (end - offset); \
static const __u64 name##_MAX = (1ULL << (end - offset)) - 1; \
\
static inline __u64 name(const type *k) \
{ \
return (__le64_to_cpu(k->field) >> offset) & \
~(~0ULL << (end - offset)); \
} \
\
static inline void SET_##name(type *k, __u64 v) \
{ \
__u64 new = __le64_to_cpu(k->field); \
\
new &= ~(~(~0ULL << (end - offset)) << offset); \
new |= (v & ~(~0ULL << (end - offset))) << offset; \
k->field = __cpu_to_le64(new); \
}
struct bkey_format {
__u8 key_u64s;
__u8 nr_fields;
/* One unused slot for now: */
__u8 bits_per_field[6];
__le64 field_offset[6];
};
/* Btree keys - all units are in sectors */
struct bpos {
/* Word order matches machine byte order */
#if defined(__LITTLE_ENDIAN)
__u32 snapshot;
__u64 offset;
__u64 inode;
#elif defined(__BIG_ENDIAN)
__u64 inode;
__u64 offset; /* Points to end of extent - sectors */
__u32 snapshot;
#else
#error edit for your odd byteorder.
#endif
} __attribute__((packed, aligned(4)));
#define KEY_INODE_MAX ((__u64)~0ULL)
#define KEY_OFFSET_MAX ((__u64)~0ULL)
#define KEY_SNAPSHOT_MAX ((__u32)~0U)
static inline struct bpos POS(__u64 inode, __u64 offset)
{
struct bpos ret;
ret.inode = inode;
ret.offset = offset;
ret.snapshot = 0;
return ret;
}
#define POS_MIN POS(0, 0)
#define POS_MAX POS(KEY_INODE_MAX, KEY_OFFSET_MAX)
/* Empty placeholder struct, for container_of() */
struct bch_val {
__u64 __nothing[0];
};
struct bversion {
#if defined(__LITTLE_ENDIAN)
__u64 lo;
__u32 hi;
#elif defined(__BIG_ENDIAN)
__u32 hi;
__u64 lo;
#endif
} __attribute__((packed, aligned(4)));
struct bkey {
/* Size of combined key and value, in u64s */
__u8 u64s;
/* Format of key (0 for format local to btree node) */
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 format:7,
needs_whiteout:1;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u8 needs_whiteout:1,
format:7;
#else
#error edit for your odd byteorder.
#endif
/* Type of the value */
__u8 type;
#if defined(__LITTLE_ENDIAN)
__u8 pad[1];
struct bversion version;
__u32 size; /* extent size, in sectors */
struct bpos p;
#elif defined(__BIG_ENDIAN)
struct bpos p;
__u32 size; /* extent size, in sectors */
struct bversion version;
__u8 pad[1];
#endif
} __attribute__((packed, aligned(8)));
struct bkey_packed {
__u64 _data[0];
/* Size of combined key and value, in u64s */
__u8 u64s;
/* Format of key (0 for format local to btree node) */
/*
* XXX: next incompat on disk format change, switch format and
* needs_whiteout - bkey_packed() will be cheaper if format is the high
* bits of the bitfield
*/
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 format:7,
needs_whiteout:1;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u8 needs_whiteout:1,
format:7;
#endif
/* Type of the value */
__u8 type;
__u8 key_start[0];
/*
* We copy bkeys with struct assignment in various places, and while
* that shouldn't be done with packed bkeys we can't disallow it in C,
* and it's legal to cast a bkey to a bkey_packed - so padding it out
* to the same size as struct bkey should hopefully be safest.
*/
__u8 pad[sizeof(struct bkey) - 3];
} __attribute__((packed, aligned(8)));
#define BKEY_U64s (sizeof(struct bkey) / sizeof(__u64))
#define KEY_PACKED_BITS_START 24
#define KEY_SIZE_MAX ((__u32)~0U)
#define KEY_FORMAT_LOCAL_BTREE 0
#define KEY_FORMAT_CURRENT 1
enum bch_bkey_fields {
BKEY_FIELD_INODE,
BKEY_FIELD_OFFSET,
BKEY_FIELD_SNAPSHOT,
BKEY_FIELD_SIZE,
BKEY_FIELD_VERSION_HI,
BKEY_FIELD_VERSION_LO,
BKEY_NR_FIELDS,
};
#define bkey_format_field(name, field) \
[BKEY_FIELD_##name] = (sizeof(((struct bkey *) NULL)->field) * 8)
#define BKEY_FORMAT_CURRENT \
((struct bkey_format) { \
.key_u64s = BKEY_U64s, \
.nr_fields = BKEY_NR_FIELDS, \
.bits_per_field = { \
bkey_format_field(INODE, p.inode), \
bkey_format_field(OFFSET, p.offset), \
bkey_format_field(SNAPSHOT, p.snapshot), \
bkey_format_field(SIZE, size), \
bkey_format_field(VERSION_HI, version.hi), \
bkey_format_field(VERSION_LO, version.lo), \
}, \
})
/* bkey with inline value */
struct bkey_i {
__u64 _data[0];
union {
struct {
/* Size of combined key and value, in u64s */
__u8 u64s;
};
struct {
struct bkey k;
struct bch_val v;
};
};
};
#ifndef __cplusplus
#define KEY(_inode, _offset, _size) \
((struct bkey) { \
.u64s = BKEY_U64s, \
.format = KEY_FORMAT_CURRENT, \
.p = POS(_inode, _offset), \
.size = _size, \
})
#else
static inline struct bkey KEY(__u64 inode, __u64 offset, __u64 size)
{
struct bkey ret;
memset(&ret, 0, sizeof(ret));
ret.u64s = BKEY_U64s;
ret.format = KEY_FORMAT_CURRENT;
ret.p.inode = inode;
ret.p.offset = offset;
ret.size = size;
return ret;
}
#endif
static inline void bkey_init(struct bkey *k)
{
*k = KEY(0, 0, 0);
}
#define bkey_bytes(_k) ((_k)->u64s * sizeof(__u64))
#define __BKEY_PADDED(key, pad) \
struct { struct bkey_i key; __u64 key ## _pad[pad]; }
#define BKEY_VAL_TYPE(name, nr) \
struct bkey_i_##name { \
union { \
struct bkey k; \
struct bkey_i k_i; \
}; \
struct bch_##name v; \
}
/*
* - DELETED keys are used internally to mark keys that should be ignored but
* override keys in composition order. Their version number is ignored.
*
* - DISCARDED keys indicate that the data is all 0s because it has been
* discarded. DISCARDs may have a version; if the version is nonzero the key
* will be persistent, otherwise the key will be dropped whenever the btree
* node is rewritten (like DELETED keys).
*
* - ERROR: any read of the data returns a read error, as the data was lost due
* to a failing device. Like DISCARDED keys, they can be removed (overridden)
* by new writes or cluster-wide GC. Node repair can also overwrite them with
* the same or a more recent version number, but not with an older version
* number.
*/
#define KEY_TYPE_DELETED 0
#define KEY_TYPE_DISCARD 1
#define KEY_TYPE_ERROR 2
#define KEY_TYPE_COOKIE 3
#define KEY_TYPE_PERSISTENT_DISCARD 4
#define KEY_TYPE_GENERIC_NR 128
struct bch_cookie {
struct bch_val v;
__le64 cookie;
};
BKEY_VAL_TYPE(cookie, KEY_TYPE_COOKIE);
/* Extents */
/*
* In extent bkeys, the value is a list of pointers (bch_extent_ptr), optionally
* preceded by checksum/compression information (bch_extent_crc32 or
* bch_extent_crc64).
*
* One major determining factor in the format of extents is how we handle and
* represent extents that have been partially overwritten and thus trimmed:
*
* If an extent is not checksummed or compressed, when the extent is trimmed we
* don't have to remember the extent we originally allocated and wrote: we can
* merely adjust ptr->offset to point to the start of the start of the data that
* is currently live. The size field in struct bkey records the current (live)
* size of the extent, and is also used to mean "size of region on disk that we
* point to" in this case.
*
* Thus an extent that is not checksummed or compressed will consist only of a
* list of bch_extent_ptrs, with none of the fields in
* bch_extent_crc32/bch_extent_crc64.
*
* When an extent is checksummed or compressed, it's not possible to read only
* the data that is currently live: we have to read the entire extent that was
* originally written, and then return only the part of the extent that is
* currently live.
*
* Thus, in addition to the current size of the extent in struct bkey, we need
* to store the size of the originally allocated space - this is the
* compressed_size and uncompressed_size fields in bch_extent_crc32/64. Also,
* when the extent is trimmed, instead of modifying the offset field of the
* pointer, we keep a second smaller offset field - "offset into the original
* extent of the currently live region".
*
* The other major determining factor is replication and data migration:
*
* Each pointer may have its own bch_extent_crc32/64. When doing a replicated
* write, we will initially write all the replicas in the same format, with the
* same checksum type and compression format - however, when copygc runs later (or
* tiering/cache promotion, anything that moves data), it is not in general
* going to rewrite all the pointers at once - one of the replicas may be in a
* bucket on one device that has very little fragmentation while another lives
* in a bucket that has become heavily fragmented, and thus is being rewritten
* sooner than the rest.
*
* Thus it will only move a subset of the pointers (or in the case of
* tiering/cache promotion perhaps add a single pointer without dropping any
* current pointers), and if the extent has been partially overwritten it must
* write only the currently live portion (or copygc would not be able to reduce
* fragmentation!) - which necessitates a different bch_extent_crc format for
* the new pointer.
*
* But in the interests of space efficiency, we don't want to store one
* bch_extent_crc for each pointer if we don't have to.
*
* Thus, a bch_extent consists of bch_extent_crc32s, bch_extent_crc64s, and
* bch_extent_ptrs appended arbitrarily one after the other. We determine the
* type of a given entry with a scheme similar to utf8 (except we're encoding a
* type, not a size), encoding the type in the position of the first set bit:
*
* bch_extent_crc32 - 0b1
* bch_extent_ptr - 0b10
* bch_extent_crc64 - 0b100
*
* We do it this way because bch_extent_crc32 is _very_ constrained on bits (and
* bch_extent_crc64 is the least constrained).
*
* Then, each bch_extent_crc32/64 applies to the pointers that follow after it,
* until the next bch_extent_crc32/64.
*
* If there are no bch_extent_crcs preceding a bch_extent_ptr, then that pointer
* is neither checksummed nor compressed.
*/
/* 128 bits, sufficient for cryptographic MACs: */
struct bch_csum {
__le64 lo;
__le64 hi;
} __attribute__((packed, aligned(8)));
#define BCH_CSUM_NONE 0U
#define BCH_CSUM_CRC32C 1U
#define BCH_CSUM_CRC64 2U
#define BCH_CSUM_CHACHA20_POLY1305_80 3U
#define BCH_CSUM_CHACHA20_POLY1305_128 4U
#define BCH_CSUM_NR 5U
static inline _Bool bch2_csum_type_is_encryption(unsigned type)
{
switch (type) {
case BCH_CSUM_CHACHA20_POLY1305_80:
case BCH_CSUM_CHACHA20_POLY1305_128:
return true;
default:
return false;
}
}
enum bch_extent_entry_type {
BCH_EXTENT_ENTRY_ptr = 0,
BCH_EXTENT_ENTRY_crc32 = 1,
BCH_EXTENT_ENTRY_crc64 = 2,
BCH_EXTENT_ENTRY_crc128 = 3,
};
#define BCH_EXTENT_ENTRY_MAX 4
/* Compressed/uncompressed size are stored biased by 1: */
struct bch_extent_crc32 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u32 type:2,
_compressed_size:7,
_uncompressed_size:7,
offset:7,
_unused:1,
csum_type:4,
compression_type:4;
__u32 csum;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u32 csum;
__u32 compression_type:4,
csum_type:4,
_unused:1,
offset:7,
_uncompressed_size:7,
_compressed_size:7,
type:2;
#endif
} __attribute__((packed, aligned(8)));
#define CRC32_SIZE_MAX (1U << 7)
#define CRC32_NONCE_MAX 0
struct bch_extent_crc64 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:3,
_compressed_size:9,
_uncompressed_size:9,
offset:9,
nonce:10,
csum_type:4,
compression_type:4,
csum_hi:16;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 csum_hi:16,
compression_type:4,
csum_type:4,
nonce:10,
offset:9,
_uncompressed_size:9,
_compressed_size:9,
type:3;
#endif
__u64 csum_lo;
} __attribute__((packed, aligned(8)));
#define CRC64_SIZE_MAX (1U << 9)
#define CRC64_NONCE_MAX ((1U << 10) - 1)
struct bch_extent_crc128 {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:4,
_compressed_size:13,
_uncompressed_size:13,
offset:13,
nonce:13,
csum_type:4,
compression_type:4;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 compression_type:4,
csum_type:4,
nonce:14,
offset:13,
_uncompressed_size:13,
_compressed_size:13,
type:3;
#endif
struct bch_csum csum;
} __attribute__((packed, aligned(8)));
#define CRC128_SIZE_MAX (1U << 13)
#define CRC128_NONCE_MAX ((1U << 13) - 1)
/*
* Max size of an extent that may require bouncing to read or write
* (checksummed, compressed): 64k
*/
#define BCH_ENCODED_EXTENT_MAX 128U
/*
* @reservation - pointer hasn't been written to, just reserved
*/
struct bch_extent_ptr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:1,
cached:1,
erasure_coded:1,
reservation:1,
offset:44, /* 8 petabytes */
dev:8,
gen:8;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 gen:8,
dev:8,
offset:44,
reservation:1,
erasure_coded:1,
cached:1,
type:1;
#endif
} __attribute__((packed, aligned(8)));
struct bch_extent_reservation {
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u64 type:5,
unused:23,
replicas:4,
generation:32;
#elif defined (__BIG_ENDIAN_BITFIELD)
__u64 generation:32,
replicas:4,
unused:23,
type:5;
#endif
};
union bch_extent_entry {
#if defined(__LITTLE_ENDIAN) || __BITS_PER_LONG == 64
unsigned long type;
#elif __BITS_PER_LONG == 32
struct {
unsigned long pad;
unsigned long type;
};
#else
#error edit for your odd byteorder.
#endif
struct bch_extent_crc32 crc32;
struct bch_extent_crc64 crc64;
struct bch_extent_crc128 crc128;
struct bch_extent_ptr ptr;
};
enum {
BCH_EXTENT = 128,
/*
* This is kind of a hack, we're overloading the type for a boolean that
* really should be part of the value - BCH_EXTENT and BCH_EXTENT_CACHED
* have the same value type:
*/
BCH_EXTENT_CACHED = 129,
/*
* Persistent reservation:
*/
BCH_RESERVATION = 130,
};
struct bch_extent {
struct bch_val v;
union bch_extent_entry start[0];
__u64 _data[0];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(extent, BCH_EXTENT);
struct bch_reservation {
struct bch_val v;
__le32 generation;
__u8 nr_replicas;
__u8 pad[3];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(reservation, BCH_RESERVATION);
/* Maximum size (in u64s) a single pointer could be: */
#define BKEY_EXTENT_PTR_U64s_MAX\
((sizeof(struct bch_extent_crc128) + \
sizeof(struct bch_extent_ptr)) / sizeof(u64))
/* Maximum possible size of an entire extent value: */
/* There's a hack in the keylist code that needs to be fixed.. */
#define BKEY_EXTENT_VAL_U64s_MAX \
(BKEY_EXTENT_PTR_U64s_MAX * BCH_REPLICAS_MAX)
/* * Maximum possible size of an entire extent, key + value: */
#define BKEY_EXTENT_U64s_MAX (BKEY_U64s + BKEY_EXTENT_VAL_U64s_MAX)
/* Btree pointers don't carry around checksums: */
#define BKEY_BTREE_PTR_VAL_U64s_MAX \
((sizeof(struct bch_extent_ptr)) / sizeof(u64) * BCH_REPLICAS_MAX)
#define BKEY_BTREE_PTR_U64s_MAX \
(BKEY_U64s + BKEY_BTREE_PTR_VAL_U64s_MAX)
/* Inodes */
#define BLOCKDEV_INODE_MAX 4096
#define BCACHE_ROOT_INO 4096
enum bch_inode_types {
BCH_INODE_FS = 128,
BCH_INODE_BLOCKDEV = 129,
};
struct bch_inode {
struct bch_val v;
__le64 i_hash_seed;
__le32 i_flags;
__le16 i_mode;
__u8 fields[0];
} __attribute__((packed));
BKEY_VAL_TYPE(inode, BCH_INODE_FS);
#define BCH_INODE_FIELDS() \
BCH_INODE_FIELD(i_atime, 64) \
BCH_INODE_FIELD(i_ctime, 64) \
BCH_INODE_FIELD(i_mtime, 64) \
BCH_INODE_FIELD(i_otime, 64) \
BCH_INODE_FIELD(i_size, 64) \
BCH_INODE_FIELD(i_sectors, 64) \
BCH_INODE_FIELD(i_uid, 32) \
BCH_INODE_FIELD(i_gid, 32) \
BCH_INODE_FIELD(i_nlink, 32) \
BCH_INODE_FIELD(i_generation, 32) \
BCH_INODE_FIELD(i_dev, 32)
enum {
/*
* User flags (get/settable with FS_IOC_*FLAGS, correspond to FS_*_FL
* flags)
*/
__BCH_INODE_SYNC = 0,
__BCH_INODE_IMMUTABLE = 1,
__BCH_INODE_APPEND = 2,
__BCH_INODE_NODUMP = 3,
__BCH_INODE_NOATIME = 4,
__BCH_INODE_I_SIZE_DIRTY= 5,
__BCH_INODE_I_SECTORS_DIRTY= 6,
/* not implemented yet: */
__BCH_INODE_HAS_XATTRS = 7, /* has xattrs in xattr btree */
/* bits 20+ reserved for packed fields below: */
};
#define BCH_INODE_SYNC (1 << __BCH_INODE_SYNC)
#define BCH_INODE_IMMUTABLE (1 << __BCH_INODE_IMMUTABLE)
#define BCH_INODE_APPEND (1 << __BCH_INODE_APPEND)
#define BCH_INODE_NODUMP (1 << __BCH_INODE_NODUMP)
#define BCH_INODE_NOATIME (1 << __BCH_INODE_NOATIME)
#define BCH_INODE_I_SIZE_DIRTY (1 << __BCH_INODE_I_SIZE_DIRTY)
#define BCH_INODE_I_SECTORS_DIRTY (1 << __BCH_INODE_I_SECTORS_DIRTY)
#define BCH_INODE_HAS_XATTRS (1 << __BCH_INODE_HAS_XATTRS)
LE32_BITMASK(INODE_STR_HASH, struct bch_inode, i_flags, 20, 24);
LE32_BITMASK(INODE_NR_FIELDS, struct bch_inode, i_flags, 24, 32);
struct bch_inode_blockdev {
struct bch_val v;
__le64 i_size;
__le64 i_flags;
/* Seconds: */
__le64 i_ctime;
__le64 i_mtime;
uuid_le i_uuid;
__u8 i_label[32];
} __attribute__((packed, aligned(8)));
BKEY_VAL_TYPE(inode_blockdev, BCH_INODE_BLOCKDEV);
/* Thin provisioned volume, or cache for another block device? */
LE64_BITMASK(CACHED_DEV, struct bch_inode_blockdev, i_flags, 0, 1)
/* Dirents */
/*
* Dirents (and xattrs) have to implement string lookups; since our b-tree
* doesn't support arbitrary length strings for the key, we instead index by a
* 64 bit hash (currently truncated sha1) of the string, stored in the offset
* field of the key - using linear probing to resolve hash collisions. This also
* provides us with the readdir cookie posix requires.
*
* Linear probing requires us to use whiteouts for deletions, in the event of a
* collision:
*/
enum {
BCH_DIRENT = 128,
BCH_DIRENT_WHITEOUT = 129,
};
struct bch_dirent {
struct bch_val v;
/* Target inode number: */
__le64 d_inum;
/*
* Copy of mode bits 12-15 from the target inode - so userspace can get
* the filetype without having to do a stat()
*/
__u8 d_type;
__u8 d_name[];
} __attribute__((packed));
BKEY_VAL_TYPE(dirent, BCH_DIRENT);
/* Xattrs */
enum {
BCH_XATTR = 128,
BCH_XATTR_WHITEOUT = 129,
};
#define BCH_XATTR_INDEX_USER 0
#define BCH_XATTR_INDEX_POSIX_ACL_ACCESS 1
#define BCH_XATTR_INDEX_POSIX_ACL_DEFAULT 2
#define BCH_XATTR_INDEX_TRUSTED 3
#define BCH_XATTR_INDEX_SECURITY 4
struct bch_xattr {
struct bch_val v;
__u8 x_type;
__u8 x_name_len;
__le16 x_val_len;
__u8 x_name[];
} __attribute__((packed));
BKEY_VAL_TYPE(xattr, BCH_XATTR);
/* Superblock */
/* Version 0: Cache device
* Version 1: Backing device
* Version 2: Seed pointer into btree node checksum
* Version 3: Cache device with new UUID format
* Version 4: Backing device with data offset
* Version 5: All the incompat changes
* Version 6: Cache device UUIDs all in superblock, another incompat bset change
* Version 7: Encryption (expanded checksum fields), other random things
*/
#define BCACHE_SB_VERSION_CDEV_V0 0
#define BCACHE_SB_VERSION_BDEV 1
#define BCACHE_SB_VERSION_CDEV_WITH_UUID 3
#define BCACHE_SB_VERSION_BDEV_WITH_OFFSET 4
#define BCACHE_SB_VERSION_CDEV_V2 5
#define BCACHE_SB_VERSION_CDEV_V3 6
#define BCACHE_SB_VERSION_CDEV_V4 7
#define BCACHE_SB_VERSION_CDEV 7
#define BCACHE_SB_MAX_VERSION 7
#define BCH_SB_SECTOR 8
#define BCH_SB_LABEL_SIZE 32
#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
struct bch_member {
uuid_le uuid;
__le64 nbuckets; /* device size */
__le16 first_bucket; /* index of first bucket used */
__le16 bucket_size; /* sectors */
__le32 pad;
__le64 last_mount; /* time_t */
__le64 flags[2];
};
LE64_BITMASK(BCH_MEMBER_STATE, struct bch_member, flags[0], 0, 4)
LE64_BITMASK(BCH_MEMBER_TIER, struct bch_member, flags[0], 4, 8)
LE64_BITMASK(BCH_MEMBER_HAS_METADATA, struct bch_member, flags[0], 8, 9)
LE64_BITMASK(BCH_MEMBER_HAS_DATA, struct bch_member, flags[0], 9, 10)
LE64_BITMASK(BCH_MEMBER_REPLACEMENT, struct bch_member, flags[0], 10, 14)
LE64_BITMASK(BCH_MEMBER_DISCARD, struct bch_member, flags[0], 14, 15);
#if 0
LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);
LE64_BITMASK(BCH_MEMBER_NR_WRITE_ERRORS,struct bch_member, flags[1], 20, 40);
#endif
enum bch_member_state {
BCH_MEMBER_STATE_RW = 0,
BCH_MEMBER_STATE_RO = 1,
BCH_MEMBER_STATE_FAILED = 2,
BCH_MEMBER_STATE_SPARE = 3,
BCH_MEMBER_STATE_NR = 4,
};
#define BCH_TIER_MAX 4U
enum cache_replacement {
CACHE_REPLACEMENT_LRU = 0,
CACHE_REPLACEMENT_FIFO = 1,
CACHE_REPLACEMENT_RANDOM = 2,
CACHE_REPLACEMENT_NR = 3,
};
struct bch_sb_layout {
uuid_le magic; /* bcachefs superblock UUID */
__u8 layout_type;
__u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
__u8 nr_superblocks;
__u8 pad[5];
__u64 sb_offset[61];
} __attribute__((packed));
#define BCH_SB_LAYOUT_SECTOR 7
struct bch_sb_field {
__u64 _data[0];
__le32 u64s;
__le32 type;
};
enum bch_sb_field_type {
BCH_SB_FIELD_journal = 0,
BCH_SB_FIELD_members = 1,
BCH_SB_FIELD_crypt = 2,
BCH_SB_FIELD_NR = 3,
};
struct bch_sb_field_journal {
struct bch_sb_field field;
__le64 buckets[0];
};
struct bch_sb_field_members {
struct bch_sb_field field;
struct bch_member members[0];
};
/* Crypto: */
struct nonce {
__le32 d[4];
};
struct bch_key {
__le64 key[4];
};
#define BCH_KEY_MAGIC \
(((u64) 'b' << 0)|((u64) 'c' << 8)| \
((u64) 'h' << 16)|((u64) '*' << 24)| \
((u64) '*' << 32)|((u64) 'k' << 40)| \
((u64) 'e' << 48)|((u64) 'y' << 56))
struct bch_encrypted_key {
__le64 magic;
struct bch_key key;
};
/*
* If this field is present in the superblock, it stores an encryption key which
* is used encrypt all other data/metadata. The key will normally be encrypted
* with the key userspace provides, but if encryption has been turned off we'll
* just store the master key unencrypted in the superblock so we can access the
* previously encrypted data.
*/
struct bch_sb_field_crypt {
struct bch_sb_field field;
__le64 flags;
__le64 kdf_flags;
struct bch_encrypted_key key;
};
LE64_BITMASK(BCH_CRYPT_KDF_TYPE, struct bch_sb_field_crypt, flags, 0, 4);
enum bch_kdf_types {
BCH_KDF_SCRYPT = 0,
BCH_KDF_NR = 1,
};
/* stored as base 2 log of scrypt params: */
LE64_BITMASK(BCH_KDF_SCRYPT_N, struct bch_sb_field_crypt, kdf_flags, 0, 16);
LE64_BITMASK(BCH_KDF_SCRYPT_R, struct bch_sb_field_crypt, kdf_flags, 16, 32);
LE64_BITMASK(BCH_KDF_SCRYPT_P, struct bch_sb_field_crypt, kdf_flags, 32, 48);
struct bch_sb_field_replication {
struct bch_sb_field field;
};
/*
* @offset - sector where this sb was written
* @version - on disk format version
* @magic - identifies as a bcachefs superblock (BCACHE_MAGIC)
* @seq - incremented each time superblock is written
* @uuid - used for generating various magic numbers and identifying
* member devices, never changes
* @user_uuid - user visible UUID, may be changed
* @label - filesystem label
* @seq - identifies most recent superblock, incremented each time
* superblock is written
* @features - enabled incompatible features
*/
struct bch_sb {
struct bch_csum csum;
__le64 version;
uuid_le magic;
uuid_le uuid;
uuid_le user_uuid;
__u8 label[BCH_SB_LABEL_SIZE];
__le64 offset;
__le64 seq;
__le16 block_size;
__u8 dev_idx;
__u8 nr_devices;
__le32 u64s;
__le64 time_base_lo;
__le32 time_base_hi;
__le32 time_precision;
__le64 flags[8];
__le64 features[2];
__le64 compat[2];
struct bch_sb_layout layout;
union {
struct bch_sb_field start[0];
__le64 _data[0];
};
} __attribute__((packed, aligned(8)));
/*
* Flags:
* BCH_SB_INITALIZED - set on first mount
* BCH_SB_CLEAN - did we shut down cleanly? Just a hint, doesn't affect
* behaviour of mount/recovery path:
* BCH_SB_INODE_32BIT - limit inode numbers to 32 bits
* BCH_SB_128_BIT_MACS - 128 bit macs instead of 80
* BCH_SB_ENCRYPTION_TYPE - if nonzero encryption is enabled; overrides
* DATA/META_CSUM_TYPE. Also indicates encryption
* algorithm in use, if/when we get more than one
*/
LE64_BITMASK(BCH_SB_INITIALIZED, struct bch_sb, flags[0], 0, 1);
LE64_BITMASK(BCH_SB_CLEAN, struct bch_sb, flags[0], 1, 2);
LE64_BITMASK(BCH_SB_CSUM_TYPE, struct bch_sb, flags[0], 2, 8);
LE64_BITMASK(BCH_SB_ERROR_ACTION, struct bch_sb, flags[0], 8, 12);
LE64_BITMASK(BCH_SB_BTREE_NODE_SIZE, struct bch_sb, flags[0], 12, 28);
LE64_BITMASK(BCH_SB_GC_RESERVE, struct bch_sb, flags[0], 28, 33);
LE64_BITMASK(BCH_SB_ROOT_RESERVE, struct bch_sb, flags[0], 33, 40);
LE64_BITMASK(BCH_SB_META_CSUM_TYPE, struct bch_sb, flags[0], 40, 44);
LE64_BITMASK(BCH_SB_DATA_CSUM_TYPE, struct bch_sb, flags[0], 44, 48);
LE64_BITMASK(BCH_SB_META_REPLICAS_WANT, struct bch_sb, flags[0], 48, 52);
LE64_BITMASK(BCH_SB_DATA_REPLICAS_WANT, struct bch_sb, flags[0], 52, 56);
LE64_BITMASK(BCH_SB_META_REPLICAS_HAVE, struct bch_sb, flags[0], 56, 60);
LE64_BITMASK(BCH_SB_DATA_REPLICAS_HAVE, struct bch_sb, flags[0], 60, 64);
LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);
LE64_BITMASK(BCH_SB_INODE_32BIT, struct bch_sb, flags[1], 8, 9);
LE64_BITMASK(BCH_SB_128_BIT_MACS, struct bch_sb, flags[1], 9, 10);
LE64_BITMASK(BCH_SB_ENCRYPTION_TYPE, struct bch_sb, flags[1], 10, 14);
LE64_BITMASK(BCH_SB_JOURNAL_ENTRY_SIZE, struct bch_sb, flags[1], 14, 20);
LE64_BITMASK(BCH_SB_META_REPLICAS_REQ, struct bch_sb, flags[1], 20, 24);
LE64_BITMASK(BCH_SB_DATA_REPLICAS_REQ, struct bch_sb, flags[1], 24, 28);
/* Features: */
enum bch_sb_features {
BCH_FEATURE_LZ4 = 0,
BCH_FEATURE_GZIP = 1,
};
/* options: */
#define BCH_REPLICAS_MAX 4U
#if 0
#define BCH_ERROR_ACTIONS() \
x(BCH_ON_ERROR_CONTINUE, 0, "continue") \
x(BCH_ON_ERROR_RO, 1, "remount-ro") \
x(BCH_ON_ERROR_PANIC, 2, "panic") \
x(BCH_NR_ERROR_ACTIONS, 3, NULL)
enum bch_error_actions {
#define x(_opt, _nr, _str) _opt = _nr,
BCH_ERROR_ACTIONS()
#undef x
};
#endif
enum bch_error_actions {
BCH_ON_ERROR_CONTINUE = 0,
BCH_ON_ERROR_RO = 1,
BCH_ON_ERROR_PANIC = 2,
BCH_NR_ERROR_ACTIONS = 3,
};
enum bch_csum_opts {
BCH_CSUM_OPT_NONE = 0,
BCH_CSUM_OPT_CRC32C = 1,
BCH_CSUM_OPT_CRC64 = 2,
BCH_CSUM_OPT_NR = 3,
};
enum bch_str_hash_opts {
BCH_STR_HASH_CRC32C = 0,
BCH_STR_HASH_CRC64 = 1,
BCH_STR_HASH_SIPHASH = 2,
BCH_STR_HASH_NR = 3,
};
enum bch_compression_opts {
BCH_COMPRESSION_NONE = 0,
BCH_COMPRESSION_LZ4 = 1,
BCH_COMPRESSION_GZIP = 2,
BCH_COMPRESSION_NR = 3,
};
/* backing device specific stuff: */
struct backingdev_sb {
__le64 csum;
__le64 offset; /* sector where this sb was written */
__le64 version; /* of on disk format */
uuid_le magic; /* bcachefs superblock UUID */
uuid_le disk_uuid;
/*
* Internal cache set UUID - xored with various magic numbers and thus
* must never change:
*/
union {
uuid_le set_uuid;
__le64 set_magic;
};
__u8 label[BCH_SB_LABEL_SIZE];
__le64 flags;
/* Incremented each time superblock is written: */
__le64 seq;
/*
* User visible UUID for identifying the cache set the user is allowed
* to change:
*
* XXX hooked up?
*/
uuid_le user_uuid;
__le64 pad1[6];
__le64 data_offset;
__le16 block_size; /* sectors */
__le16 pad2[3];
__le32 last_mount; /* time_t */
__le16 pad3;
/* size of variable length portion - always 0 for backingdev superblock */
__le16 u64s;
__u64 _data[0];
};
LE64_BITMASK(BDEV_CACHE_MODE, struct backingdev_sb, flags, 0, 4);
#define CACHE_MODE_WRITETHROUGH 0U
#define CACHE_MODE_WRITEBACK 1U
#define CACHE_MODE_WRITEAROUND 2U
#define CACHE_MODE_NONE 3U
LE64_BITMASK(BDEV_STATE, struct backingdev_sb, flags, 61, 63);
#define BDEV_STATE_NONE 0U
#define BDEV_STATE_CLEAN 1U
#define BDEV_STATE_DIRTY 2U
#define BDEV_STATE_STALE 3U
#define BDEV_DATA_START_DEFAULT 16 /* sectors */
static inline _Bool __SB_IS_BDEV(__u64 version)
{
return version == BCACHE_SB_VERSION_BDEV
|| version == BCACHE_SB_VERSION_BDEV_WITH_OFFSET;
}
static inline _Bool SB_IS_BDEV(const struct bch_sb *sb)
{
return __SB_IS_BDEV(sb->version);
}
/*
* Magic numbers
*
* The various other data structures have their own magic numbers, which are
* xored with the first part of the cache set's UUID
*/
#define BCACHE_MAGIC \
UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
#define BCACHE_STATFS_MAGIC 0xca451a4e
#define JSET_MAGIC __cpu_to_le64(0x245235c1a3625032ULL)
#define PSET_MAGIC __cpu_to_le64(0x6750e15f87337f91ULL)
#define BSET_MAGIC __cpu_to_le64(0x90135c78b99e07f5ULL)
static inline __le64 __bch2_sb_magic(struct bch_sb *sb)
{
__le64 ret;
memcpy(&ret, &sb->uuid, sizeof(ret));
return ret;
}
static inline __u64 __jset_magic(struct bch_sb *sb)
{
return __le64_to_cpu(__bch2_sb_magic(sb) ^ JSET_MAGIC);
}
static inline __u64 __pset_magic(struct bch_sb *sb)
{
return __le64_to_cpu(__bch2_sb_magic(sb) ^ PSET_MAGIC);
}
static inline __u64 __bset_magic(struct bch_sb *sb)
{
return __le64_to_cpu(__bch2_sb_magic(sb) ^ BSET_MAGIC);
}
/* Journal */
#define BCACHE_JSET_VERSION_UUIDv1 1
#define BCACHE_JSET_VERSION_UUID 1 /* Always latest UUID format */
#define BCACHE_JSET_VERSION_JKEYS 2
#define BCACHE_JSET_VERSION 2
struct jset_entry {
__le16 u64s;
__u8 btree_id;
__u8 level;
__le32 flags; /* designates what this jset holds */
union {
struct bkey_i start[0];
__u64 _data[0];
};
};
#define JSET_KEYS_U64s (sizeof(struct jset_entry) / sizeof(__u64))
LE32_BITMASK(JOURNAL_ENTRY_TYPE, struct jset_entry, flags, 0, 8);
enum {
JOURNAL_ENTRY_BTREE_KEYS = 0,
JOURNAL_ENTRY_BTREE_ROOT = 1,
JOURNAL_ENTRY_PRIO_PTRS = 2,
/*
* Journal sequence numbers can be blacklisted: bsets record the max
* sequence number of all the journal entries they contain updates for,
* so that on recovery we can ignore those bsets that contain index
* updates newer that what made it into the journal.
*
* This means that we can't reuse that journal_seq - we have to skip it,
* and then record that we skipped it so that the next time we crash and
* recover we don't think there was a missing journal entry.
*/
JOURNAL_ENTRY_JOURNAL_SEQ_BLACKLISTED = 3,
};
/*
* On disk format for a journal entry:
* seq is monotonically increasing; every journal entry has its own unique
* sequence number.
*
* last_seq is the oldest journal entry that still has keys the btree hasn't
* flushed to disk yet.
*
* version is for on disk format changes.
*/
struct jset {
struct bch_csum csum;
__le64 magic;
__le64 seq;
__le32 version;
__le32 flags;
__le32 u64s; /* size of d[] in u64s */
__u8 encrypted_start[0];
__le16 read_clock;
__le16 write_clock;
/* Sequence number of oldest dirty journal entry */
__le64 last_seq;
union {
struct jset_entry start[0];
__u64 _data[0];
};
} __attribute__((packed));
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
#define BCH_JOURNAL_BUCKETS_MIN 20
/* Bucket prios/gens */
struct prio_set {
struct bch_csum csum;
__le64 magic;
__le32 nonce[3];
__le16 version;
__le16 flags;
__u8 encrypted_start[0];
__le64 next_bucket;
struct bucket_disk {
__le16 read_prio;
__le16 write_prio;
__u8 gen;
} __attribute__((packed)) data[];
} __attribute__((packed));
LE32_BITMASK(PSET_CSUM_TYPE, struct prio_set, flags, 0, 4);
/* Btree: */
#define DEFINE_BCH_BTREE_IDS() \
DEF_BTREE_ID(EXTENTS, 0, "extents") \
DEF_BTREE_ID(INODES, 1, "inodes") \
DEF_BTREE_ID(DIRENTS, 2, "dirents") \
DEF_BTREE_ID(XATTRS, 3, "xattrs")
#define DEF_BTREE_ID(kwd, val, name) BTREE_ID_##kwd = val,
enum btree_id {
DEFINE_BCH_BTREE_IDS()
BTREE_ID_NR
};
#undef DEF_BTREE_ID
#define BTREE_MAX_DEPTH 4U
/* Btree nodes */
/* Version 1: Seed pointer into btree node checksum
*/
#define BCACHE_BSET_CSUM 1
#define BCACHE_BSET_KEY_v1 2
#define BCACHE_BSET_JOURNAL_SEQ 3
#define BCACHE_BSET_VERSION 3
/*
* Btree nodes
*
* On disk a btree node is a list/log of these; within each set the keys are
* sorted
*/
struct bset {
__le64 seq;
/*
* Highest journal entry this bset contains keys for.
* If on recovery we don't see that journal entry, this bset is ignored:
* this allows us to preserve the order of all index updates after a
* crash, since the journal records a total order of all index updates
* and anything that didn't make it to the journal doesn't get used.
*/
__le64 journal_seq;
__le32 flags;
__le16 version;
__le16 u64s; /* count of d[] in u64s */
union {
struct bkey_packed start[0];
__u64 _data[0];
};
} __attribute__((packed));
LE32_BITMASK(BSET_CSUM_TYPE, struct bset, flags, 0, 4);
LE32_BITMASK(BSET_BIG_ENDIAN, struct bset, flags, 4, 5);
LE32_BITMASK(BSET_SEPARATE_WHITEOUTS,
struct bset, flags, 5, 6);
struct btree_node {
struct bch_csum csum;
__le64 magic;
/* this flags field is encrypted, unlike bset->flags: */
__le64 flags;
/* Closed interval: */
struct bpos min_key;
struct bpos max_key;
struct bch_extent_ptr ptr;
struct bkey_format format;
union {
struct bset keys;
struct {
__u8 pad[22];
__le16 u64s;
__u64 _data[0];
};
};
} __attribute__((packed));
LE64_BITMASK(BTREE_NODE_ID, struct btree_node, flags, 0, 4);
LE64_BITMASK(BTREE_NODE_LEVEL, struct btree_node, flags, 4, 8);
struct btree_node_entry {
struct bch_csum csum;
union {
struct bset keys;
struct {
__u8 pad[22];
__le16 u64s;
__u64 _data[0];
};
};
} __attribute__((packed));
#ifdef __cplusplus
}
#endif
#endif /* _LINUX_BCACHE_H */
/* vim: set foldnestmax=2: */