bcachefs-tools/libbcache.c

424 lines
11 KiB
C
Raw Normal View History

2016-08-18 00:23:03 +03:00
#include <errno.h>
#include <fcntl.h>
#include <stdbool.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <sys/types.h>
2016-10-04 06:22:17 +03:00
#include <time.h>
2016-08-18 00:23:03 +03:00
#include <unistd.h>
#include <uuid/uuid.h>
2017-01-08 12:13:18 +03:00
#include "linux/bcache.h"
2016-08-18 00:23:03 +03:00
#include "libbcache.h"
2017-01-08 12:13:18 +03:00
#include "checksum.h"
2016-10-04 06:22:17 +03:00
#include "crypto.h"
2017-02-02 06:16:42 +03:00
#include "opts.h"
2016-10-04 06:22:17 +03:00
#include "super-io.h"
#define NSEC_PER_SEC 1000000000L
2016-08-26 07:55:43 +03:00
#define BCH_MIN_NR_NBUCKETS (1 << 10)
2016-08-31 03:31:34 +03:00
/* minimum size filesystem we can create, given a bucket size: */
static u64 min_size(unsigned bucket_size)
{
2017-03-01 13:45:15 +03:00
return BCH_MIN_NR_NBUCKETS * bucket_size;
2016-08-31 03:31:34 +03:00
}
2017-03-01 13:45:15 +03:00
static void init_layout(struct bch_sb_layout *l, unsigned block_size,
u64 start, u64 end)
2016-10-04 06:22:17 +03:00
{
2017-03-01 13:45:15 +03:00
unsigned sb_size;
u64 backup; /* offset of 2nd sb */
2016-10-04 06:22:17 +03:00
memset(l, 0, sizeof(*l));
2017-03-01 13:45:15 +03:00
if (start != BCH_SB_SECTOR)
start = round_up(start, block_size);
end = round_down(end, block_size);
if (start >= end)
die("insufficient space for superblocks");
/*
* Create two superblocks in the allowed range: reserve a maximum of 64k
*/
sb_size = min_t(u64, 128, end - start / 2);
backup = start + sb_size;
backup = round_up(backup, block_size);
backup = min(backup, end);
sb_size = min(end - backup, backup- start);
sb_size = rounddown_pow_of_two(sb_size);
if (sb_size < 8)
die("insufficient space for superblocks");
2016-10-04 06:22:17 +03:00
l->magic = BCACHE_MAGIC;
l->layout_type = 0;
l->nr_superblocks = 2;
2017-03-01 13:45:15 +03:00
l->sb_max_size_bits = ilog2(sb_size);
l->sb_offset[0] = cpu_to_le64(start);
l->sb_offset[1] = cpu_to_le64(backup);
2016-10-04 06:22:17 +03:00
}
2017-03-01 13:45:15 +03:00
struct bch_sb *bcache_format(struct format_opts opts,
struct dev_opts *devs, size_t nr_devs)
2016-08-18 00:23:03 +03:00
{
2016-10-04 06:22:17 +03:00
struct bch_sb *sb;
2016-08-18 00:23:03 +03:00
struct dev_opts *i;
2016-10-04 06:22:17 +03:00
struct bch_sb_field_members *mi;
2017-03-01 13:45:15 +03:00
unsigned u64s;
2016-08-18 00:23:03 +03:00
/* calculate block size: */
2017-03-01 13:45:15 +03:00
if (!opts.block_size)
2016-08-18 00:23:03 +03:00
for (i = devs; i < devs + nr_devs; i++)
2017-03-01 13:45:15 +03:00
opts.block_size = max(opts.block_size,
get_blocksize(i->path, i->fd));
2016-08-18 00:23:03 +03:00
/* calculate bucket sizes: */
for (i = devs; i < devs + nr_devs; i++) {
2017-03-01 13:45:15 +03:00
if (!i->sb_offset) {
i->sb_offset = BCH_SB_SECTOR;
i->sb_end = BCH_SB_SECTOR + 256;
}
2016-08-18 00:23:03 +03:00
if (!i->size)
2017-02-02 06:16:42 +03:00
i->size = get_size(i->path, i->fd) >> 9;
2016-08-18 00:23:03 +03:00
if (!i->bucket_size) {
2017-03-01 13:45:15 +03:00
if (i->size < min_size(opts.block_size))
2016-08-26 07:55:43 +03:00
die("cannot format %s, too small (%llu sectors, min %llu)",
2017-03-01 13:45:15 +03:00
i->path, i->size, min_size(opts.block_size));
2016-08-26 07:55:43 +03:00
2017-03-09 21:13:45 +03:00
/* Bucket size must be >= block size: */
i->bucket_size = opts.block_size;
/* Bucket size must be >= btree node size: */
i->bucket_size = max(i->bucket_size, opts.btree_node_size);
2016-08-26 07:55:43 +03:00
/* Want a bucket size of at least 128k, if possible: */
2017-03-09 21:13:45 +03:00
i->bucket_size = max(i->bucket_size, 256U);
2016-08-26 07:55:43 +03:00
if (i->size >= min_size(i->bucket_size)) {
2017-01-08 12:13:18 +03:00
unsigned scale = max(1,
2016-08-26 07:55:43 +03:00
ilog2(i->size / min_size(i->bucket_size)) / 4);
2016-09-06 05:20:21 +03:00
scale = rounddown_pow_of_two(scale);
2016-08-26 07:55:43 +03:00
/* max bucket size 1 mb */
i->bucket_size = min(i->bucket_size * scale, 1U << 11);
} else {
do {
i->bucket_size /= 2;
} while (i->size < min_size(i->bucket_size));
}
2016-08-18 00:23:03 +03:00
}
2016-08-26 07:55:43 +03:00
i->nbuckets = i->size / i->bucket_size;
2017-03-01 13:45:15 +03:00
if (i->bucket_size < opts.block_size)
2016-08-18 00:23:03 +03:00
die("Bucket size cannot be smaller than block size");
2017-03-09 21:13:45 +03:00
if (i->bucket_size < opts.btree_node_size)
die("Bucket size cannot be smaller than btree node size");
2017-03-01 13:45:15 +03:00
if (i->nbuckets < BCH_MIN_NR_NBUCKETS)
2016-08-26 07:55:43 +03:00
die("Not enough buckets: %llu, need %u (bucket size %u)",
2017-03-01 13:45:15 +03:00
i->nbuckets, BCH_MIN_NR_NBUCKETS, i->bucket_size);
2016-08-18 00:23:03 +03:00
}
/* calculate btree node size: */
2017-03-01 13:45:15 +03:00
if (!opts.btree_node_size) {
2016-08-18 00:23:03 +03:00
/* 256k default btree node size */
2017-03-01 13:45:15 +03:00
opts.btree_node_size = 512;
2016-08-18 00:23:03 +03:00
for (i = devs; i < devs + nr_devs; i++)
2017-03-01 13:45:15 +03:00
opts.btree_node_size =
min(opts.btree_node_size, i->bucket_size);
2016-08-18 00:23:03 +03:00
}
2017-03-01 13:45:15 +03:00
if (!opts.max_journal_entry_size) {
/* 2 MB default: */
2017-03-01 13:45:15 +03:00
opts.max_journal_entry_size = 4096;
}
2017-03-01 13:45:15 +03:00
opts.max_journal_entry_size =
roundup_pow_of_two(opts.max_journal_entry_size);
if (uuid_is_null(opts.uuid.b))
uuid_generate(opts.uuid.b);
2016-10-04 06:22:17 +03:00
sb = calloc(1, sizeof(*sb) +
sizeof(struct bch_sb_field_members) +
sizeof(struct bch_member) * nr_devs +
sizeof(struct bch_sb_field_crypt));
2016-08-18 00:23:03 +03:00
2016-10-04 06:22:17 +03:00
sb->version = cpu_to_le64(BCACHE_SB_VERSION_CDEV_V4);
2016-08-18 00:23:03 +03:00
sb->magic = BCACHE_MAGIC;
2017-03-01 13:45:15 +03:00
sb->block_size = cpu_to_le16(opts.block_size);
sb->user_uuid = opts.uuid;
2016-10-04 06:22:17 +03:00
sb->nr_devices = nr_devs;
uuid_generate(sb->uuid.b);
2016-08-18 00:23:03 +03:00
2017-03-01 13:45:15 +03:00
if (opts.label)
strncpy((char *) sb->label, opts.label, sizeof(sb->label));
2016-08-18 00:23:03 +03:00
2017-03-01 13:45:15 +03:00
SET_BCH_SB_CSUM_TYPE(sb, opts.meta_csum_type);
SET_BCH_SB_META_CSUM_TYPE(sb, opts.meta_csum_type);
SET_BCH_SB_DATA_CSUM_TYPE(sb, opts.data_csum_type);
SET_BCH_SB_COMPRESSION_TYPE(sb, opts.compression_type);
2016-10-04 06:22:17 +03:00
2017-03-01 13:45:15 +03:00
SET_BCH_SB_BTREE_NODE_SIZE(sb, opts.btree_node_size);
2016-10-04 06:22:17 +03:00
SET_BCH_SB_GC_RESERVE(sb, 8);
2017-03-01 13:45:15 +03:00
SET_BCH_SB_META_REPLICAS_WANT(sb, opts.meta_replicas);
SET_BCH_SB_META_REPLICAS_HAVE(sb, opts.meta_replicas);
SET_BCH_SB_META_REPLICAS_REQ(sb, opts.meta_replicas_required);
2017-03-01 13:45:15 +03:00
SET_BCH_SB_DATA_REPLICAS_WANT(sb, opts.data_replicas);
SET_BCH_SB_DATA_REPLICAS_HAVE(sb, opts.data_replicas);
SET_BCH_SB_DATA_REPLICAS_REQ(sb, opts.data_replicas_required);
2017-03-01 13:45:15 +03:00
SET_BCH_SB_ERROR_ACTION(sb, opts.on_error_action);
2016-10-04 06:22:17 +03:00
SET_BCH_SB_STR_HASH_TYPE(sb, BCH_STR_HASH_SIPHASH);
2017-03-01 13:45:15 +03:00
SET_BCH_SB_JOURNAL_ENTRY_SIZE(sb, ilog2(opts.max_journal_entry_size));
2016-10-04 06:22:17 +03:00
struct timespec now;
if (clock_gettime(CLOCK_REALTIME, &now))
die("error getting current time: %s", strerror(errno));
sb->time_base_lo = cpu_to_le64(now.tv_sec * NSEC_PER_SEC + now.tv_nsec);
sb->time_precision = cpu_to_le32(1);
2017-03-01 13:45:15 +03:00
if (opts.encrypted) {
2016-10-04 06:22:17 +03:00
struct bch_sb_field_crypt *crypt = vstruct_end(sb);
u64s = sizeof(struct bch_sb_field_crypt) / sizeof(u64);
le32_add_cpu(&sb->u64s, u64s);
crypt->field.u64s = cpu_to_le32(u64s);
crypt->field.type = BCH_SB_FIELD_crypt;
2017-03-01 13:45:15 +03:00
bch_sb_crypt_init(sb, crypt, opts.passphrase);
2016-10-04 06:22:17 +03:00
SET_BCH_SB_ENCRYPTION_TYPE(sb, 1);
}
mi = vstruct_end(sb);
u64s = (sizeof(struct bch_sb_field_members) +
sizeof(struct bch_member) * nr_devs) / sizeof(u64);
le32_add_cpu(&sb->u64s, u64s);
mi->field.u64s = cpu_to_le32(u64s);
mi->field.type = BCH_SB_FIELD_members;
2016-08-18 00:23:03 +03:00
for (i = devs; i < devs + nr_devs; i++) {
2016-10-04 06:22:17 +03:00
struct bch_member *m = mi->members + (i - devs);
2016-08-18 00:23:03 +03:00
uuid_generate(m->uuid.b);
2016-10-04 06:22:17 +03:00
m->nbuckets = cpu_to_le64(i->nbuckets);
2017-03-01 13:45:15 +03:00
m->first_bucket = 0;
2016-10-04 06:22:17 +03:00
m->bucket_size = cpu_to_le16(i->bucket_size);
2016-08-18 00:23:03 +03:00
2016-10-04 06:22:17 +03:00
SET_BCH_MEMBER_TIER(m, i->tier);
SET_BCH_MEMBER_REPLACEMENT(m, CACHE_REPLACEMENT_LRU);
SET_BCH_MEMBER_DISCARD(m, i->discard);
2016-08-18 00:23:03 +03:00
}
for (i = devs; i < devs + nr_devs; i++) {
2016-10-04 06:22:17 +03:00
sb->dev_idx = i - devs;
2017-03-01 13:45:15 +03:00
init_layout(&sb->layout, opts.block_size,
i->sb_offset, i->sb_end);
2016-10-04 06:22:17 +03:00
2017-03-01 13:45:15 +03:00
if (i->sb_offset == BCH_SB_SECTOR) {
/* Zero start of disk */
static const char zeroes[BCH_SB_SECTOR << 9];
2016-10-04 06:22:17 +03:00
2017-03-01 13:45:15 +03:00
xpwrite(i->fd, zeroes, BCH_SB_SECTOR << 9, 0);
2016-10-04 06:22:17 +03:00
}
2017-03-01 13:45:15 +03:00
bcache_super_write(i->fd, sb);
2016-10-04 06:22:17 +03:00
close(i->fd);
2016-08-18 00:23:03 +03:00
}
2017-03-01 13:45:15 +03:00
return sb;
}
void bcache_super_write(int fd, struct bch_sb *sb)
{
struct nonce nonce = { 0 };
for (unsigned i = 0; i < sb->layout.nr_superblocks; i++) {
sb->offset = sb->layout.sb_offset[i];
if (sb->offset == BCH_SB_SECTOR) {
/* Write backup layout */
xpwrite(fd, &sb->layout, sizeof(sb->layout),
BCH_SB_LAYOUT_SECTOR << 9);
}
sb->csum = csum_vstruct(NULL, BCH_SB_CSUM_TYPE(sb), nonce, sb);
xpwrite(fd, sb, vstruct_bytes(sb),
le64_to_cpu(sb->offset) << 9);
}
2017-03-01 13:45:15 +03:00
fsync(fd);
2016-08-18 00:23:03 +03:00
}
2017-03-01 13:45:15 +03:00
struct bch_sb *__bcache_super_read(int fd, u64 sector)
{
2016-10-04 06:22:17 +03:00
struct bch_sb sb, *ret;
2017-03-01 13:45:15 +03:00
xpread(fd, &sb, sizeof(sb), sector << 9);
2016-10-04 06:22:17 +03:00
if (memcmp(&sb.magic, &BCACHE_MAGIC, sizeof(sb.magic)))
die("not a bcache superblock");
size_t bytes = vstruct_bytes(&sb);
ret = malloc(bytes);
2017-03-01 13:45:15 +03:00
xpread(fd, ret, bytes, sector << 9);
2016-10-04 06:22:17 +03:00
return ret;
}
2017-03-01 13:45:15 +03:00
struct bch_sb *bcache_super_read(const char *path)
{
int fd = xopen(path, O_RDONLY);
struct bch_sb *sb = __bcache_super_read(fd, BCH_SB_SECTOR);
close(fd);
return sb;
}
2016-10-04 06:22:17 +03:00
void bcache_super_print(struct bch_sb *sb, int units)
{
struct bch_sb_field_members *mi;
char user_uuid_str[40], internal_uuid_str[40], member_uuid_str[40];
2016-10-04 06:22:17 +03:00
char label[BCH_SB_LABEL_SIZE + 1];
unsigned i;
memset(label, 0, sizeof(label));
memcpy(label, sb->label, sizeof(sb->label));
uuid_unparse(sb->user_uuid.b, user_uuid_str);
2016-10-04 06:22:17 +03:00
uuid_unparse(sb->uuid.b, internal_uuid_str);
printf("External UUID: %s\n"
"Internal UUID: %s\n"
"Label: %s\n"
"Version: %llu\n"
"Block_size: %s\n"
"Btree node size: %s\n"
"Max journal entry size: %s\n"
"Error action: %s\n"
"Clean: %llu\n"
"Metadata replicas: have %llu, want %llu\n"
"Data replicas: have %llu, want %llu\n"
"Metadata checksum type: %s\n"
"Data checksum type: %s\n"
"Compression type: %s\n"
"String hash type: %s\n"
"32 bit inodes: %llu\n"
"GC reserve percentage: %llu%%\n"
"Root reserve percentage: %llu%%\n"
"Devices: %u\n",
user_uuid_str,
internal_uuid_str,
label,
le64_to_cpu(sb->version),
2017-02-02 06:16:42 +03:00
pr_units(le16_to_cpu(sb->block_size), units),
2016-10-04 06:22:17 +03:00
pr_units(BCH_SB_BTREE_NODE_SIZE(sb), units),
pr_units(1U << BCH_SB_JOURNAL_ENTRY_SIZE(sb), units),
2016-10-04 06:22:17 +03:00
BCH_SB_ERROR_ACTION(sb) < BCH_NR_ERROR_ACTIONS
? bch_error_actions[BCH_SB_ERROR_ACTION(sb)]
: "unknown",
2016-10-04 06:22:17 +03:00
BCH_SB_CLEAN(sb),
2016-10-04 06:22:17 +03:00
BCH_SB_META_REPLICAS_HAVE(sb),
BCH_SB_META_REPLICAS_WANT(sb),
BCH_SB_DATA_REPLICAS_HAVE(sb),
BCH_SB_DATA_REPLICAS_WANT(sb),
2016-10-04 06:22:17 +03:00
BCH_SB_META_CSUM_TYPE(sb) < BCH_CSUM_NR
? bch_csum_types[BCH_SB_META_CSUM_TYPE(sb)]
: "unknown",
2016-10-04 06:22:17 +03:00
BCH_SB_DATA_CSUM_TYPE(sb) < BCH_CSUM_NR
? bch_csum_types[BCH_SB_DATA_CSUM_TYPE(sb)]
: "unknown",
2016-10-04 06:22:17 +03:00
BCH_SB_COMPRESSION_TYPE(sb) < BCH_COMPRESSION_NR
? bch_compression_types[BCH_SB_COMPRESSION_TYPE(sb)]
: "unknown",
2016-10-04 06:22:17 +03:00
BCH_SB_STR_HASH_TYPE(sb) < BCH_STR_HASH_NR
? bch_str_hash_types[BCH_SB_STR_HASH_TYPE(sb)]
: "unknown",
2016-10-04 06:22:17 +03:00
BCH_SB_INODE_32BIT(sb),
BCH_SB_GC_RESERVE(sb),
BCH_SB_ROOT_RESERVE(sb),
2016-10-04 06:22:17 +03:00
sb->nr_devices);
2016-10-04 06:22:17 +03:00
mi = bch_sb_get_members(sb);
if (!mi) {
printf("Member info section missing\n");
return;
}
for (i = 0; i < sb->nr_devices; i++) {
struct bch_member *m = mi->members + i;
time_t last_mount = le64_to_cpu(m->last_mount);
uuid_unparse(m->uuid.b, member_uuid_str);
printf("\n"
"Device %u:\n"
" UUID: %s\n"
" Size: %s\n"
" Bucket size: %s\n"
" First bucket: %u\n"
" Buckets: %llu\n"
" Last mount: %s\n"
" State: %s\n"
" Tier: %llu\n"
" Has metadata: %llu\n"
" Has data: %llu\n"
" Replacement policy: %s\n"
" Discard: %llu\n",
i, member_uuid_str,
pr_units(le16_to_cpu(m->bucket_size) *
2017-02-02 06:16:42 +03:00
le64_to_cpu(m->nbuckets), units),
pr_units(le16_to_cpu(m->bucket_size), units),
le16_to_cpu(m->first_bucket),
le64_to_cpu(m->nbuckets),
last_mount ? ctime(&last_mount) : "(never)",
2016-10-04 06:22:17 +03:00
BCH_MEMBER_STATE(m) < BCH_MEMBER_STATE_NR
? bch_dev_state[BCH_MEMBER_STATE(m)]
: "unknown",
2016-10-04 06:22:17 +03:00
BCH_MEMBER_TIER(m),
BCH_MEMBER_HAS_METADATA(m),
BCH_MEMBER_HAS_DATA(m),
2016-10-04 06:22:17 +03:00
BCH_MEMBER_REPLACEMENT(m) < CACHE_REPLACEMENT_NR
? bch_cache_replacement_policies[BCH_MEMBER_REPLACEMENT(m)]
: "unknown",
2016-10-04 06:22:17 +03:00
BCH_MEMBER_DISCARD(m));
}
}