Update bcachefs sources to 2afdc642c2 bcachefs: kill bucket_data_type, improve disk usage stats

This commit is contained in:
Kent Overstreet 2017-12-22 20:37:52 -05:00
parent da37a5f204
commit 14117dcdfb
18 changed files with 418 additions and 219 deletions

View File

@ -1 +1 @@
b2f5acc6709a25f6134714d763e3f6ace1e2cc55
2afdc642c2ab4d629993c7f064765ecf25ee483f

View File

@ -109,7 +109,7 @@ static void pd_controllers_update(struct work_struct *work)
u64 size = bucket_to_sector(ca, ca->mi.nbuckets -
ca->mi.first_bucket) << 9;
u64 dirty = bucket_to_sector(ca,
stats.buckets[S_DIRTY]) << 9;
stats.buckets[BCH_DATA_USER]) << 9;
u64 free = bucket_to_sector(ca,
__dev_buckets_free(ca, stats)) << 9;
/*
@ -117,10 +117,10 @@ static void pd_controllers_update(struct work_struct *work)
* reclaimed by copy GC
*/
s64 fragmented = (bucket_to_sector(ca,
stats.buckets[S_DIRTY] +
stats.buckets_cached) -
(stats.sectors[S_DIRTY] +
stats.sectors_cached)) << 9;
stats.buckets[BCH_DATA_USER] +
stats.buckets[BCH_DATA_CACHED]) -
(stats.sectors[BCH_DATA_USER] +
stats.sectors[BCH_DATA_CACHED])) << 9;
fragmented = max(0LL, fragmented);

View File

@ -883,7 +883,8 @@ enum bch_data_type {
BCH_DATA_JOURNAL = 2,
BCH_DATA_BTREE = 3,
BCH_DATA_USER = 4,
BCH_DATA_NR = 5,
BCH_DATA_CACHED = 5,
BCH_DATA_NR = 6,
};
struct bch_replicas_entry {

View File

@ -45,6 +45,7 @@ struct bch_ioctl_incremental {
#define BCH_IOCTL_DISK_SET_STATE _IOW(0xbc, 8, struct bch_ioctl_disk_set_state)
#define BCH_IOCTL_DISK_EVACUATE _IOW(0xbc, 9, struct bch_ioctl_disk)
#define BCH_IOCTL_DATA _IOW(0xbc, 10, struct bch_ioctl_data)
#define BCH_IOCTL_USAGE _IOWR(0xbc, 11, struct bch_ioctl_usage)
struct bch_ioctl_query_uuid {
uuid_le uuid;
@ -93,4 +94,33 @@ struct bch_ioctl_data {
__u64 end_offset;
};
struct bch_ioctl_dev_usage {
__u8 state;
__u8 alive;
__u8 pad[6];
__u32 dev;
__u32 bucket_size;
__u64 nr_buckets;
__u64 buckets[BCH_DATA_NR];
__u64 sectors[BCH_DATA_NR];
};
struct bch_ioctl_fs_usage {
__u64 capacity;
__u64 used;
__u64 online_reserved;
__u64 persistent_reserved[BCH_REPLICAS_MAX];
__u64 sectors[BCH_DATA_NR][BCH_REPLICAS_MAX];
};
struct bch_ioctl_usage {
__u16 nr_devices;
__u16 pad[3];
struct bch_ioctl_fs_usage fs;
struct bch_ioctl_dev_usage devs[0];
};
#endif /* _BCACHEFS_IOCTL_H */

View File

@ -157,10 +157,9 @@ int bch2_btree_mark_key_initial(struct bch_fs *c, enum bkey_type type,
const struct bch_extent_ptr *ptr;
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
(!c->opts.nofsck &&
fsck_err_on(!bch2_sb_has_replicas(c, e, data_type), c,
"superblock not marked as containing replicas (type %u)",
data_type))) {
fsck_err_on(!bch2_sb_has_replicas(c, e, data_type), c,
"superblock not marked as containing replicas (type %u)",
data_type)) {
ret = bch2_check_mark_super(c, e, data_type);
if (ret)
return ret;
@ -294,16 +293,21 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id)
static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
u64 start, u64 end,
enum bucket_data_type type,
enum bch_data_type type,
unsigned flags)
{
u64 b = sector_to_bucket(ca, start);
do {
bch2_mark_metadata_bucket(c, ca, ca->buckets + b, type,
unsigned sectors =
min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
bch2_mark_metadata_bucket(c, ca, ca->buckets + b,
type, sectors,
gc_phase(GC_PHASE_SB), flags);
b++;
} while (b < sector_to_bucket(ca, end));
start += sectors;
} while (start < end);
}
void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
@ -320,11 +324,11 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
if (offset == BCH_SB_SECTOR)
mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
BUCKET_SB, flags);
BCH_DATA_SB, flags);
mark_metadata_sectors(c, ca, offset,
offset + (1 << layout->sb_max_size_bits),
BUCKET_SB, flags);
BCH_DATA_SB, flags);
}
spin_lock(&c->journal.lock);
@ -332,7 +336,8 @@ void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
for (i = 0; i < ca->journal.nr; i++) {
b = ca->journal.buckets[i];
bch2_mark_metadata_bucket(c, ca, ca->buckets + b,
BUCKET_JOURNAL,
BCH_DATA_JOURNAL,
ca->mi.bucket_size,
gc_phase(GC_PHASE_SB), flags);
}

View File

@ -82,7 +82,7 @@ static void bch2_fs_stats_verify(struct bch_fs *c)
__bch2_fs_usage_read(c);
unsigned i;
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
for (i = 0; i < ARRAY_SIZE(stats.s); i++) {
if ((s64) stats.s[i].data[S_META] < 0)
panic("replicas %u meta underflow: %lli\n",
i + 1, stats.s[i].data[S_META]);
@ -106,10 +106,10 @@ static void bch2_dev_stats_verify(struct bch_dev *ca)
struct bch_dev_usage stats =
__bch2_dev_usage_read(ca);
u64 n = ca->mi.nbuckets - ca->mi.first_bucket;
unsigned i;
BUG_ON(stats.buckets[S_META] > n);
BUG_ON(stats.buckets[S_DIRTY] > n);
BUG_ON(stats.buckets_cached > n);
for (i = 0; i < ARRAY_SIZE(stats.buckets); i++)
BUG_ON(stats.buckets[i] > n);
BUG_ON(stats.buckets_alloc > n);
BUG_ON(stats.buckets_unavailable > n);
}
@ -224,20 +224,43 @@ bch2_fs_usage_read(struct bch_fs *c)
c->usage_percpu);
}
static inline int is_meta_bucket(struct bucket_mark m)
struct fs_usage_sum {
u64 data;
u64 reserved;
};
static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats)
{
return m.data_type != BUCKET_DATA;
struct fs_usage_sum sum = { 0 };
unsigned i;
for (i = 0; i < ARRAY_SIZE(stats.s); i++) {
sum.data += (stats.s[i].data[S_META] +
stats.s[i].data[S_DIRTY]) * (i + 1);
sum.reserved += stats.s[i].persistent_reserved * (i + 1);
}
sum.reserved += stats.online_reserved;
return sum;
}
static inline int is_dirty_bucket(struct bucket_mark m)
#define RESERVE_FACTOR 6
static u64 reserve_factor(u64 r)
{
return m.data_type == BUCKET_DATA && !!m.dirty_sectors;
return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
}
static inline int is_cached_bucket(struct bucket_mark m)
u64 __bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
{
return m.data_type == BUCKET_DATA &&
!m.dirty_sectors && !!m.cached_sectors;
struct fs_usage_sum sum = __fs_usage_sum(stats);
return sum.data + reserve_factor(sum.reserved);
}
u64 bch2_fs_sectors_used(struct bch_fs *c, struct bch_fs_usage stats)
{
return min(c->capacity, __bch2_fs_sectors_used(c, stats));
}
static inline int is_unavailable_bucket(struct bucket_mark m)
@ -245,9 +268,11 @@ static inline int is_unavailable_bucket(struct bucket_mark m)
return !is_available_bucket(m);
}
static inline enum s_alloc bucket_type(struct bucket_mark m)
static inline enum bch_data_type bucket_type(struct bucket_mark m)
{
return is_meta_bucket(m) ? S_META : S_DIRTY;
return m.cached_sectors && !m.dirty_sectors
? BCH_DATA_CACHED
: m.data_type;
}
static bool bucket_became_unavailable(struct bch_fs *c,
@ -304,26 +329,23 @@ static void bch2_dev_usage_update(struct bch_fs *c, struct bch_dev *ca,
bch2_fs_inconsistent_on(old.data_type && new.data_type &&
old.data_type != new.data_type, c,
"different types of metadata in same bucket: %u, %u",
"different types of data in same bucket: %u, %u",
old.data_type, new.data_type);
preempt_disable();
dev_usage = this_cpu_ptr(ca->usage_percpu);
dev_usage->buckets[S_META] +=
is_meta_bucket(new) - is_meta_bucket(old);
dev_usage->buckets[S_DIRTY] +=
is_dirty_bucket(new) - is_dirty_bucket(old);
dev_usage->buckets_cached +=
is_cached_bucket(new) - is_cached_bucket(old);
dev_usage->buckets[bucket_type(old)]--;
dev_usage->buckets[bucket_type(new)]++;
dev_usage->buckets_alloc +=
(int) new.owned_by_allocator - (int) old.owned_by_allocator;
dev_usage->buckets_unavailable +=
is_unavailable_bucket(new) - is_unavailable_bucket(old);
dev_usage->sectors[bucket_type(old)] -= old.dirty_sectors;
dev_usage->sectors[bucket_type(new)] += new.dirty_sectors;
dev_usage->sectors_cached +=
dev_usage->sectors[old.data_type] -= old.dirty_sectors;
dev_usage->sectors[new.data_type] += new.dirty_sectors;
dev_usage->sectors[BCH_DATA_CACHED] +=
(int) new.cached_sectors - (int) old.cached_sectors;
preempt_enable();
@ -348,8 +370,10 @@ bool bch2_invalidate_bucket(struct bch_fs *c, struct bch_dev *ca,
lg_local_lock(&c->usage_lock);
*old = bucket_data_cmpxchg(c, ca, g, new, ({
if (!is_available_bucket(new))
if (!is_available_bucket(new)) {
lg_local_unlock(&c->usage_lock);
return false;
}
new.owned_by_allocator = 1;
new.touched_this_mount = 1;
@ -374,8 +398,10 @@ bool bch2_mark_alloc_bucket_startup(struct bch_fs *c, struct bch_dev *ca,
lg_local_lock(&c->usage_lock);
old = bucket_data_cmpxchg(c, ca, g, new, ({
if (new.touched_this_mount ||
!is_available_bucket(new))
!is_available_bucket(new)) {
lg_local_unlock(&c->usage_lock);
return false;
}
new.owned_by_allocator = 1;
new.touched_this_mount = 1;
@ -422,8 +448,9 @@ do { \
} while (0)
void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
struct bucket *g, enum bucket_data_type type,
struct gc_pos pos, unsigned flags)
struct bucket *g, enum bch_data_type type,
unsigned sectors, struct gc_pos pos,
unsigned flags)
{
struct bucket_mark old, new;
@ -437,20 +464,13 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
}
old = bucket_data_cmpxchg(c, ca, g, new, ({
saturated_add(ca, new.dirty_sectors, ca->mi.bucket_size,
saturated_add(ca, new.dirty_sectors, sectors,
GC_MAX_SECTORS_USED);
new.data_type = type;
new.touched_this_mount = 1;
}));
lg_local_unlock(&c->usage_lock);
if (old.data_type != type &&
(old.data_type ||
old.cached_sectors ||
old.dirty_sectors))
bch_err(c, "bucket %zu has multiple types of data (%u, %u)",
g - ca->buckets, old.data_type, new.data_type);
BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
bucket_became_unavailable(c, old, new));
}
@ -483,8 +503,8 @@ static void bch2_mark_pointer(struct bch_fs *c,
unsigned saturated;
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
struct bucket *g = ca->buckets + PTR_BUCKET_NR(ca, ptr);
unsigned data_type = type == S_META
? BUCKET_BTREE : BUCKET_DATA;
enum bch_data_type data_type = type == S_META
? BCH_DATA_BTREE : BCH_DATA_USER;
u64 v;
if (crc.compression_type) {
@ -566,13 +586,6 @@ static void bch2_mark_pointer(struct bch_fs *c,
bch2_dev_usage_update(c, ca, g, old, new);
if (old.data_type != data_type &&
(old.data_type ||
old.cached_sectors ||
old.dirty_sectors))
bch_err(c, "bucket %zu has multiple types of data (%u, %u)",
g - ca->buckets, old.data_type, new.data_type);
BUG_ON(!(flags & BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE) &&
bucket_became_unavailable(c, old, new));
@ -644,17 +657,19 @@ void bch2_mark_key(struct bch_fs *c, struct bkey_s_c k,
replicas += !ptr->cached;
}
BUG_ON(replicas >= BCH_REPLICAS_MAX);
if (replicas)
if (replicas) {
BUG_ON(replicas - 1 > ARRAY_SIZE(stats->s));
stats->s[replicas - 1].data[type] += sectors;
}
break;
}
case BCH_RESERVATION: {
struct bkey_s_c_reservation r = bkey_s_c_to_reservation(k);
if (r.v->nr_replicas)
if (r.v->nr_replicas) {
BUG_ON(r.v->nr_replicas - 1 > ARRAY_SIZE(stats->s));
stats->s[r.v->nr_replicas - 1].persistent_reserved += sectors;
}
break;
}
}
@ -671,7 +686,7 @@ static u64 __recalc_sectors_available(struct bch_fs *c)
for_each_possible_cpu(cpu)
per_cpu_ptr(c->usage_percpu, cpu)->available_cache = 0;
avail = c->capacity - bch2_fs_sectors_used(c);
avail = c->capacity - bch2_fs_sectors_used(c, bch2_fs_usage_read(c));
avail <<= RESERVE_FACTOR;
avail /= (1 << RESERVE_FACTOR) + 1;

View File

@ -92,7 +92,7 @@ static inline bool bucket_unused(struct bucket_mark mark)
!bucket_sectors_used(mark);
}
/* Per device stats: */
/* Device usage: */
struct bch_dev_usage __bch2_dev_usage_read(struct bch_dev *);
struct bch_dev_usage bch2_dev_usage_read(struct bch_fs *, struct bch_dev *);
@ -130,56 +130,31 @@ static inline u64 dev_buckets_free(struct bch_fs *c, struct bch_dev *ca)
return __dev_buckets_free(ca, bch2_dev_usage_read(c, ca));
}
/* Cache set stats: */
/* Filesystem usage: */
static inline enum bch_data_type s_alloc_to_data_type(enum s_alloc s)
{
switch (s) {
case S_META:
return BCH_DATA_BTREE;
case S_DIRTY:
return BCH_DATA_USER;
default:
BUG();
}
}
struct bch_fs_usage __bch2_fs_usage_read(struct bch_fs *);
struct bch_fs_usage bch2_fs_usage_read(struct bch_fs *);
void bch2_fs_usage_apply(struct bch_fs *, struct bch_fs_usage *,
struct disk_reservation *, struct gc_pos);
struct fs_usage_sum {
u64 data;
u64 reserved;
};
static inline struct fs_usage_sum __fs_usage_sum(struct bch_fs_usage stats)
{
struct fs_usage_sum sum = { 0 };
unsigned i;
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
sum.data += (stats.s[i].data[S_META] +
stats.s[i].data[S_DIRTY]) * (i + 1);
sum.reserved += stats.s[i].persistent_reserved * (i + 1);
}
sum.reserved += stats.online_reserved;
return sum;
}
#define RESERVE_FACTOR 6
static u64 reserve_factor(u64 r)
{
return r + (round_up(r, (1 << RESERVE_FACTOR)) >> RESERVE_FACTOR);
}
static inline u64 __bch2_fs_sectors_used(struct bch_fs *c)
{
struct fs_usage_sum sum = __fs_usage_sum(__bch2_fs_usage_read(c));
return sum.data + reserve_factor(sum.reserved);
}
static inline u64 bch2_fs_sectors_used(struct bch_fs *c)
{
return min(c->capacity, __bch2_fs_sectors_used(c));
}
u64 __bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
u64 bch2_fs_sectors_used(struct bch_fs *, struct bch_fs_usage);
static inline bool is_available_bucket(struct bucket_mark mark)
{
return (!mark.owned_by_allocator &&
mark.data_type == BUCKET_DATA &&
!mark.dirty_sectors &&
!mark.nouse);
}
@ -201,8 +176,8 @@ void bch2_mark_alloc_bucket(struct bch_fs *, struct bch_dev *,
struct bucket *, bool,
struct gc_pos, unsigned);
void bch2_mark_metadata_bucket(struct bch_fs *, struct bch_dev *,
struct bucket *, enum bucket_data_type,
struct gc_pos, unsigned);
struct bucket *, enum bch_data_type,
unsigned, struct gc_pos, unsigned);
#define BCH_BUCKET_MARK_NOATOMIC (1 << 0)
#define BCH_BUCKET_MARK_MAY_MAKE_UNAVAILABLE (1 << 1)

View File

@ -3,14 +3,6 @@
#include "util.h"
/* kill, switch to bch_data_type */
enum bucket_data_type {
BUCKET_DATA = 0,
BUCKET_BTREE,
BUCKET_JOURNAL,
BUCKET_SB,
};
struct bucket_mark {
union {
struct {
@ -48,22 +40,20 @@ struct bucket {
};
};
/* kill, switch to bucket_data_type */
enum s_alloc {
S_META,
S_DIRTY,
S_ALLOC_NR,
};
struct bch_dev_usage {
u64 buckets[S_ALLOC_NR];
u64 buckets_cached;
u64 buckets[BCH_DATA_NR];
u64 buckets_alloc;
u64 buckets_unavailable;
/* _compressed_ sectors: */
u64 sectors[S_ALLOC_NR];
u64 sectors_cached;
u64 sectors[BCH_DATA_NR];
};
/* kill, switch to bch_data_type? */
enum s_alloc {
S_META,
S_DIRTY,
S_ALLOC_NR,
};
struct bch_fs_usage {

View File

@ -2,6 +2,7 @@
#include "bcachefs.h"
#include "bcachefs_ioctl.h"
#include "buckets.h"
#include "chardev.h"
#include "super.h"
#include "super-io.h"
@ -289,6 +290,79 @@ static long bch2_ioctl_disk_evacuate(struct bch_fs *c,
return ret;
}
static long bch2_ioctl_usage(struct bch_fs *c,
struct bch_ioctl_usage __user *user_arg)
{
struct bch_ioctl_usage arg;
struct bch_dev *ca;
unsigned i, j;
int ret;
if (copy_from_user(&arg, user_arg, sizeof(arg)))
return -EFAULT;
for (i = 0; i < arg.nr_devices; i++) {
struct bch_ioctl_dev_usage dst = { .alive = 0 };
ret = copy_to_user(&user_arg->devs[i], &dst, sizeof(dst));
if (ret)
return ret;
}
{
struct bch_fs_usage src = bch2_fs_usage_read(c);
struct bch_ioctl_fs_usage dst = {
.capacity = c->capacity,
.used = bch2_fs_sectors_used(c, src),
.online_reserved = src.online_reserved,
};
for (i = 0; i < BCH_REPLICAS_MAX; i++) {
dst.persistent_reserved[i] =
src.s[i].persistent_reserved;
for (j = 0; j < S_ALLOC_NR; j++)
dst.sectors[s_alloc_to_data_type(j)][i] =
src.s[i].data[j];
}
ret = copy_to_user(&user_arg->fs, &dst, sizeof(dst));
if (ret)
return ret;
}
for_each_member_device(ca, c, i) {
struct bch_dev_usage src = bch2_dev_usage_read(c, ca);
struct bch_ioctl_dev_usage dst = {
.alive = 1,
.state = ca->mi.state,
.bucket_size = ca->mi.bucket_size,
.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket,
};
if (ca->dev_idx >= arg.nr_devices) {
percpu_ref_put(&ca->ref);
return -ENOSPC;
}
if (percpu_ref_tryget(&ca->io_ref)) {
dst.dev = huge_encode_dev(ca->disk_sb.bdev->bd_dev);
percpu_ref_put(&ca->io_ref);
}
for (j = 0; j < BCH_DATA_NR; j++) {
dst.buckets[j] = src.buckets[j];
dst.sectors[j] = src.sectors[j];
}
ret = copy_to_user(&user_arg->devs[i], &dst, sizeof(dst));
if (ret)
return ret;
}
return 0;
}
#define BCH_IOCTL(_name, _argtype) \
do { \
_argtype i; \
@ -304,6 +378,8 @@ long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
switch (cmd) {
case BCH_IOCTL_QUERY_UUID:
return bch2_ioctl_query_uuid(c, arg);
case BCH_IOCTL_USAGE:
return bch2_ioctl_usage(c, arg);
}
if (!capable(CAP_SYS_ADMIN))

View File

@ -654,7 +654,7 @@ static void btree_ptr_debugcheck(struct bch_fs *c, struct btree *b,
do {
seq = read_seqcount_begin(&c->gc_pos_lock);
bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
(g->mark.data_type != BUCKET_BTREE ||
(g->mark.data_type != BCH_DATA_BTREE ||
g->mark.dirty_sectors < c->opts.btree_node_size);
} while (read_seqcount_retry(&c->gc_pos_lock, seq));
@ -1731,6 +1731,7 @@ static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
const struct bch_extent_ptr *ptr;
struct bch_dev *ca;
struct bucket *g;
struct bucket_mark mark;
unsigned seq, stale;
char buf[160];
bool bad;
@ -1764,8 +1765,6 @@ static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
stale = 0;
do {
struct bucket_mark mark;
seq = read_seqcount_begin(&c->gc_pos_lock);
mark = READ_ONCE(g->mark);
@ -1784,12 +1783,11 @@ static void bch2_extent_debugcheck_extent(struct bch_fs *c, struct btree *b,
if (stale)
break;
bad = (mark.data_type != BUCKET_DATA ||
(gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
!mark.owned_by_allocator &&
!(ptr->cached
? mark.cached_sectors
: mark.dirty_sectors)));
bad = gc_pos_cmp(c->gc_pos, gc_pos_btree_node(b)) > 0 &&
(mark.data_type != BCH_DATA_USER ||
!(ptr->cached
? mark.cached_sectors
: mark.dirty_sectors));
} while (read_seqcount_retry(&c->gc_pos_lock, seq));
if (bad)
@ -1821,10 +1819,10 @@ bad_ptr:
bch2_bkey_val_to_text(c, btree_node_type(b), buf,
sizeof(buf), e.s_c);
bch2_fs_bug(c, "extent pointer bad gc mark: %s:\nbucket %zu "
"gen %i last_gc %i mark 0x%08x",
buf, PTR_BUCKET_NR(ca, ptr), PTR_BUCKET(ca, ptr)->mark.gen,
"gen %i last_gc %i type %u",
buf, PTR_BUCKET_NR(ca, ptr), mark.gen,
ca->oldest_gens[PTR_BUCKET_NR(ca, ptr)],
(unsigned) g->mark.counter);
mark.data_type);
return;
}

View File

@ -413,6 +413,18 @@ static inline struct bch_devs_list bch2_extent_devs(struct bkey_s_c_extent e)
return ret;
}
static inline struct bch_devs_list bch2_extent_dirty_devs(struct bkey_s_c_extent e)
{
struct bch_devs_list ret = (struct bch_devs_list) { 0 };
const struct bch_extent_ptr *ptr;
extent_for_each_ptr(e, ptr)
if (!ptr->cached)
ret.devs[ret.nr++] = ptr->dev;
return ret;
}
bool bch2_can_narrow_extent_crcs(struct bkey_s_c_extent,
struct bch_extent_crc_unpacked);
bool bch2_extent_narrow_crcs(struct bkey_i_extent *, struct bch_extent_crc_unpacked);

View File

@ -1009,7 +1009,9 @@ static int bch2_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_type = BCACHEFS_STATFS_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = c->capacity >> PAGE_SECTOR_SHIFT;
buf->f_bfree = (c->capacity - bch2_fs_sectors_used(c)) >> PAGE_SECTOR_SHIFT;
buf->f_bfree = (c->capacity -
bch2_fs_sectors_used(c, bch2_fs_usage_read(c))) >>
PAGE_SECTOR_SHIFT;
buf->f_bavail = buf->f_bfree;
buf->f_files = atomic_long_read(&c->nr_inodes);
buf->f_ffree = U64_MAX;

View File

@ -992,6 +992,17 @@ int bch2_journal_read(struct bch_fs *c, struct list_head *list)
ret = journal_entry_validate_entries(c, &i->j, READ);
if (ret)
goto fsck_err;
if (test_bit(BCH_FS_REBUILD_REPLICAS, &c->flags) ||
fsck_err_on(!bch2_sb_has_replicas_devlist(c, &i->devs,
BCH_DATA_JOURNAL), c,
"superblock not marked as containing replicas (type %u)",
BCH_DATA_JOURNAL)) {
ret = bch2_check_mark_super_devlist(c, &i->devs,
BCH_DATA_JOURNAL);
if (ret)
return ret;
}
}
i = list_last_entry(list, struct journal_replay, list);
@ -1619,7 +1630,8 @@ static int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
spin_unlock(&j->lock);
bch2_mark_metadata_bucket(c, ca, &ca->buckets[bucket],
BUCKET_JOURNAL,
BCH_DATA_JOURNAL,
ca->mi.bucket_size,
gc_phase(GC_PHASE_SB), 0);
bch2_open_bucket_put(c, ob);

View File

@ -125,7 +125,7 @@ static void bch2_copygc(struct bch_fs *c, struct bch_dev *ca)
struct copygc_heap_entry e;
if (m.owned_by_allocator ||
m.data_type != BUCKET_DATA ||
m.data_type != BCH_DATA_USER ||
!bucket_sectors_used(m) ||
bucket_sectors_used(m) >= ca->mi.bucket_size)
continue;

View File

@ -38,6 +38,7 @@ const char * const bch2_data_types[] = {
"journal",
"btree",
"data",
"cached",
NULL
};

View File

@ -877,10 +877,40 @@ static inline unsigned replicas_dev_slots(struct bch_replicas_cpu *r)
offsetof(struct bch_replicas_cpu_entry, devs)) * 8;
}
static unsigned bkey_to_replicas(struct bkey_s_c_extent e,
enum bch_data_type data_type,
struct bch_replicas_cpu_entry *r,
unsigned *max_dev)
int bch2_cpu_replicas_to_text(struct bch_replicas_cpu *r,
char *buf, size_t size)
{
char *out = buf, *end = out + size;
struct bch_replicas_cpu_entry *e;
bool first = true;
unsigned i;
for_each_cpu_replicas_entry(r, e) {
bool first_e = true;
if (!first)
out += scnprintf(out, end - out, " ");
first = false;
out += scnprintf(out, end - out, "%u: [", e->data_type);
for (i = 0; i < replicas_dev_slots(r); i++)
if (replicas_test_dev(e, i)) {
if (!first_e)
out += scnprintf(out, end - out, " ");
first_e = false;
out += scnprintf(out, end - out, "%u", i);
}
out += scnprintf(out, end - out, "]");
}
return out - buf;
}
static inline unsigned bkey_to_replicas(struct bkey_s_c_extent e,
enum bch_data_type data_type,
struct bch_replicas_cpu_entry *r,
unsigned *max_dev)
{
const struct bch_extent_ptr *ptr;
unsigned nr = 0;
@ -903,6 +933,28 @@ static unsigned bkey_to_replicas(struct bkey_s_c_extent e,
return nr;
}
static inline void devlist_to_replicas(struct bch_devs_list devs,
enum bch_data_type data_type,
struct bch_replicas_cpu_entry *r,
unsigned *max_dev)
{
unsigned i;
BUG_ON(!data_type ||
data_type == BCH_DATA_SB ||
data_type >= BCH_DATA_NR);
memset(r, 0, sizeof(*r));
r->data_type = data_type;
*max_dev = 0;
for (i = 0; i < devs.nr; i++) {
*max_dev = max_t(unsigned, *max_dev, devs.devs[i]);
replicas_set_dev(r, devs.devs[i]);
}
}
static struct bch_replicas_cpu *
cpu_replicas_add_entry(struct bch_replicas_cpu *old,
struct bch_replicas_cpu_entry new_entry,
@ -952,7 +1004,7 @@ static int bch2_check_mark_super_slowpath(struct bch_fs *c,
struct bch_replicas_cpu_entry new_entry,
unsigned max_dev)
{
struct bch_replicas_cpu *old_gc, *new_gc = NULL, *old_r, *new_r;
struct bch_replicas_cpu *old_gc, *new_gc = NULL, *old_r, *new_r = NULL;
int ret = -ENOMEM;
mutex_lock(&c->sb_lock);
@ -967,31 +1019,37 @@ static int bch2_check_mark_super_slowpath(struct bch_fs *c,
old_r = rcu_dereference_protected(c->replicas,
lockdep_is_held(&c->sb_lock));
/* recheck, might have raced */
if (replicas_has_entry(old_r, new_entry, max_dev))
goto out;
if (!replicas_has_entry(old_r, new_entry, max_dev)) {
new_r = cpu_replicas_add_entry(old_r, new_entry, max_dev);
if (!new_r)
goto err;
new_r = cpu_replicas_add_entry(old_r, new_entry, max_dev);
if (!new_r)
goto err;
ret = bch2_cpu_replicas_to_sb_replicas(c, new_r);
if (ret)
goto err;
}
ret = bch2_cpu_replicas_to_sb_replicas(c, new_r);
if (ret)
goto err;
/* allocations done, now commit: */
if (new_gc) {
rcu_assign_pointer(c->replicas_gc, new_gc);
kfree_rcu(old_gc, rcu);
}
rcu_assign_pointer(c->replicas, new_r);
kfree_rcu(old_r, rcu);
if (new_r) {
rcu_assign_pointer(c->replicas, new_r);
kfree_rcu(old_r, rcu);
bch2_write_super(c);
}
bch2_write_super(c);
out:
ret = 0;
mutex_unlock(&c->sb_lock);
return 0;
err:
mutex_unlock(&c->sb_lock);
if (new_gc)
kfree(new_gc);
if (new_r)
kfree(new_r);
return ret;
}
@ -1029,17 +1087,13 @@ int bch2_check_mark_super_devlist(struct bch_fs *c,
struct bch_devs_list *devs,
enum bch_data_type data_type)
{
struct bch_replicas_cpu_entry search = { .data_type = data_type };
unsigned i, max_dev = 0;
struct bch_replicas_cpu_entry search;
unsigned max_dev;
if (!devs->nr)
return 0;
for (i = 0; i < devs->nr; i++) {
max_dev = max_t(unsigned, max_dev, devs->devs[i]);
replicas_set_dev(&search, devs->devs[i]);
}
devlist_to_replicas(*devs, data_type, &search, &max_dev);
return __bch2_check_mark_super(c, search, max_dev);
}
@ -1300,18 +1354,42 @@ err:
return err;
}
int bch2_sb_replicas_to_text(struct bch_sb_field_replicas *r, char *buf, size_t size)
{
char *out = buf, *end = out + size;
struct bch_replicas_entry *e;
bool first = true;
unsigned i;
if (!r) {
out += scnprintf(out, end - out, "(no replicas section found)");
return out - buf;
}
for_each_replicas_entry(r, e) {
if (!first)
out += scnprintf(out, end - out, " ");
first = false;
out += scnprintf(out, end - out, "%u: [", e->data_type);
for (i = 0; i < e->nr; i++)
out += scnprintf(out, end - out,
i ? " %u" : "%u", e->devs[i]);
out += scnprintf(out, end - out, "]");
}
return out - buf;
}
/* Query replicas: */
bool bch2_sb_has_replicas(struct bch_fs *c, struct bkey_s_c_extent e,
enum bch_data_type data_type)
static bool __bch2_sb_has_replicas(struct bch_fs *c,
struct bch_replicas_cpu_entry search,
unsigned max_dev)
{
struct bch_replicas_cpu_entry search;
unsigned max_dev;
bool ret;
if (!bkey_to_replicas(e, data_type, &search, &max_dev))
return true;
rcu_read_lock();
ret = replicas_has_entry(rcu_dereference(c->replicas),
search, max_dev);
@ -1320,6 +1398,31 @@ bool bch2_sb_has_replicas(struct bch_fs *c, struct bkey_s_c_extent e,
return ret;
}
bool bch2_sb_has_replicas(struct bch_fs *c, struct bkey_s_c_extent e,
enum bch_data_type data_type)
{
struct bch_replicas_cpu_entry search;
unsigned max_dev;
if (!bkey_to_replicas(e, data_type, &search, &max_dev))
return true;
return __bch2_sb_has_replicas(c, search, max_dev);
}
bool bch2_sb_has_replicas_devlist(struct bch_fs *c, struct bch_devs_list *devs,
enum bch_data_type data_type)
{
struct bch_replicas_cpu_entry search;
unsigned max_dev;
if (!devs->nr)
return true;
devlist_to_replicas(*devs, data_type, &search, &max_dev);
return __bch2_sb_has_replicas(c, search, max_dev);
}
struct replicas_status __bch2_replicas_status(struct bch_fs *c,
struct bch_devs_mask online_devs)
{

View File

@ -127,11 +127,16 @@ void bch2_write_super(struct bch_fs *);
bool bch2_sb_has_replicas(struct bch_fs *, struct bkey_s_c_extent,
enum bch_data_type);
bool bch2_sb_has_replicas_devlist(struct bch_fs *, struct bch_devs_list *,
enum bch_data_type);
int bch2_check_mark_super(struct bch_fs *, struct bkey_s_c_extent,
enum bch_data_type);
int bch2_check_mark_super_devlist(struct bch_fs *, struct bch_devs_list *,
enum bch_data_type);
int bch2_cpu_replicas_to_text(struct bch_replicas_cpu *, char *, size_t);
int bch2_sb_replicas_to_text(struct bch_sb_field_replicas *, char *, size_t);
struct replicas_status {
struct {
unsigned nr_online;

View File

@ -149,16 +149,6 @@ read_attribute(journal_pins);
read_attribute(internal_uuid);
read_attribute(available_buckets);
read_attribute(free_buckets);
read_attribute(dirty_data);
read_attribute(dirty_bytes);
read_attribute(dirty_buckets);
read_attribute(cached_data);
read_attribute(cached_bytes);
read_attribute(cached_buckets);
read_attribute(meta_buckets);
read_attribute(alloc_buckets);
read_attribute(has_data);
read_attribute(alloc_debug);
write_attribute(wake_allocator);
@ -712,12 +702,17 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
"buckets:\n"
" capacity: %llu\n"
" alloc: %llu\n"
" sb: %llu\n"
" journal: %llu\n"
" meta: %llu\n"
" dirty: %llu\n"
" user: %llu\n"
" cached: %llu\n"
" available: %llu\n"
"sectors:\n"
" sb: %llu\n"
" journal: %llu\n"
" meta: %llu\n"
" dirty: %llu\n"
" user: %llu\n"
" cached: %llu\n"
"freelist_wait: %s\n"
"open buckets: %u/%u (reserved %u)\n"
@ -728,12 +723,17 @@ static ssize_t show_dev_alloc_debug(struct bch_dev *ca, char *buf)
fifo_used(&ca->free[RESERVE_NONE]), ca->free[RESERVE_NONE].size,
ca->mi.nbuckets - ca->mi.first_bucket,
stats.buckets_alloc,
stats.buckets[S_META],
stats.buckets[S_DIRTY],
stats.buckets[BCH_DATA_SB],
stats.buckets[BCH_DATA_JOURNAL],
stats.buckets[BCH_DATA_BTREE],
stats.buckets[BCH_DATA_USER],
stats.buckets[BCH_DATA_CACHED],
__dev_buckets_available(ca, stats),
stats.sectors[S_META],
stats.sectors[S_DIRTY],
stats.sectors_cached,
stats.sectors[BCH_DATA_SB],
stats.sectors[BCH_DATA_JOURNAL],
stats.sectors[BCH_DATA_BTREE],
stats.sectors[BCH_DATA_USER],
stats.sectors[BCH_DATA_CACHED],
c->freelist_wait.list.first ? "waiting" : "empty",
c->open_buckets_nr_free, OPEN_BUCKETS_COUNT, BTREE_NODE_RESERVE,
c->open_buckets_wait.list.first ? "waiting" : "empty");
@ -771,7 +771,6 @@ SHOW(bch2_dev)
{
struct bch_dev *ca = container_of(kobj, struct bch_dev, kobj);
struct bch_fs *c = ca->fs;
struct bch_dev_usage stats = bch2_dev_usage_read(c, ca);
char *out = buf, *end = buf + PAGE_SIZE;
sysfs_printf(uuid, "%pU\n", ca->uuid.b);
@ -782,17 +781,6 @@ SHOW(bch2_dev)
sysfs_print(nbuckets, ca->mi.nbuckets);
sysfs_print(discard, ca->mi.discard);
sysfs_hprint(dirty_data, stats.sectors[S_DIRTY] << 9);
sysfs_print(dirty_bytes, stats.sectors[S_DIRTY] << 9);
sysfs_print(dirty_buckets, stats.buckets[S_DIRTY]);
sysfs_hprint(cached_data, stats.sectors_cached << 9);
sysfs_print(cached_bytes, stats.sectors_cached << 9);
sysfs_print(cached_buckets, stats.buckets_cached);
sysfs_print(meta_buckets, stats.buckets[S_META]);
sysfs_print(alloc_buckets, stats.buckets_alloc);
sysfs_print(available_buckets, __dev_buckets_available(ca, stats));
sysfs_print(free_buckets, __dev_buckets_free(ca, stats));
if (attr == &sysfs_has_data) {
out += bch2_scnprint_flag_list(out, end - out,
bch2_data_types,
@ -924,20 +912,6 @@ struct attribute *bch2_dev_files[] = {
&sysfs_has_data,
&sysfs_iostats,
/* alloc info - data: */
&sysfs_dirty_data,
&sysfs_dirty_bytes,
&sysfs_cached_data,
&sysfs_cached_bytes,
/* alloc info - buckets: */
&sysfs_available_buckets,
&sysfs_free_buckets,
&sysfs_dirty_buckets,
&sysfs_cached_buckets,
&sysfs_meta_buckets,
&sysfs_alloc_buckets,
/* alloc info - other stats: */
&sysfs_read_priority_stats,
&sysfs_write_priority_stats,