Update bcachefs sources to d800fc8b69ff bcachefs: BTREE_ID_reconcile_(work|hipri)_phys
Some checks failed
Nix Flake actions / nix-matrix (push) Has been cancelled
Nix Flake actions / ${{ matrix.name }} (${{ matrix.system }}) (push) Has been cancelled
build / bcachefs-tools-msrv (push) Has been cancelled
.deb build orchestrator / source-only (push) Has been cancelled
.deb build orchestrator / obs (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:forky], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / publish (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:forky], map[build-arch:amd64 host-arch:ppc64el machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:forky], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:trixie], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:trixie], map[build-arch:amd64 host-arch:ppc64el machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:trixie], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:unstable], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:unstable], map[build-arch:amd64 host-arch:ppc64el machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:debian version:unstable], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:plucky], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:plucky], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:questing], map[build-arch:amd64 host-arch:amd64 machine-arch:amd64 runs-on:ubuntu-24.04]) (push) Has been cancelled
.deb build orchestrator / buildd (map[name:ubuntu version:questing], map[build-arch:arm64 host-arch:arm64 machine-arch:arm64 runs-on:ubuntu-24.04-arm]) (push) Has been cancelled
.deb build orchestrator / reprotest (push) Has been cancelled

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2025-11-26 19:44:45 -05:00
parent eeaf00a23b
commit 5155014de4
19 changed files with 511 additions and 205 deletions

View File

@ -1 +1 @@
2e9d7e867ec236de11ebf725da352cd6bf7aaa86
d800fc8b69ff90543270cdad1ef95c18371d4168

View File

@ -62,6 +62,7 @@ int blkdev_issue_zeroout(struct block_device *, sector_t, sector_t, gfp_t, unsig
#define blk_queue_nonrot(q) ((void) (q), 0)
unsigned bdev_logical_block_size(struct block_device *bdev);
bool bdev_nonrot(struct block_device *);
sector_t get_capacity(struct gendisk *disk);
struct blk_holder_ops {

View File

@ -79,6 +79,7 @@ void bch2_backpointer_swab(const struct bch_fs *c, struct bkey_s k)
{
struct bkey_s_backpointer bp = bkey_s_to_backpointer(k);
bp.v->flags = swab32(bp.v->flags);
bp.v->bucket_len = swab32(bp.v->bucket_len);
bch2_bpos_swab(&bp.v->pos);
}
@ -865,7 +866,7 @@ static int check_bucket_backpointer_mismatch(struct btree_trans *trans, struct b
if (c->sb.version_upgrade_complete < bcachefs_metadata_version_backpointer_bucket_gen &&
(bp.v->bucket_gen != a->gen ||
bp.v->pad)) {
bp.v->flags)) {
try(bch2_backpointer_del(trans, bp_k.k->p));
nr_deletes++;

View File

@ -3,9 +3,13 @@
#define _BCACHEFS_BACKPOINTERS_H
#include "alloc/buckets.h"
#include "btree/cache.h"
#include "btree/iter.h"
#include "btree/update.h"
#include "data/reconcile.h"
#include "init/error.h"
static inline u64 swab40(u64 x)
@ -100,6 +104,11 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
struct bkey_i_backpointer *bp,
bool insert)
{
if (BACKPOINTER_RECONCILE_PHYS(&bp->v))
try(bch2_btree_bit_mod_buffered(trans,
reconcile_work_phys_btree[BACKPOINTER_RECONCILE_PHYS(&bp->v)],
bp->k.p, insert));
if (static_branch_unlikely(&bch2_backpointers_no_use_write_buffer))
return bch2_bucket_backpointer_mod_nowritebuffer(trans, orig_k, bp, insert);
@ -143,6 +152,23 @@ static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
}
}
static inline struct bpos bch2_extent_ptr_to_bp_pos(struct bkey_s_c k, struct extent_ptr_decoded p)
{
if (k.k->type != KEY_TYPE_stripe)
return POS(p.ptr.dev,
((u64) p.ptr.offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + p.crc.offset);
else {
/*
* Put stripe backpointers where they won't collide with the
* extent backpointers within the stripe:
*/
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
return POS(p.ptr.dev,
((u64) (p.ptr.offset + le16_to_cpu(s.v->sectors)) <<
MAX_EXTENT_COMPRESS_RATIO_SHIFT) - 1);
}
}
static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
enum btree_id btree_id, unsigned level,
struct bkey_s_c k, struct extent_ptr_decoded p,
@ -150,20 +176,7 @@ static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
struct bkey_i_backpointer *bp)
{
bkey_backpointer_init(&bp->k_i);
bp->k.p.inode = p.ptr.dev;
if (k.k->type != KEY_TYPE_stripe)
bp->k.p.offset = ((u64) p.ptr.offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) + p.crc.offset;
else {
/*
* Put stripe backpointers where they won't collide with the
* extent backpointers within the stripe:
*/
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
bp->k.p.offset = ((u64) (p.ptr.offset + le16_to_cpu(s.v->sectors)) <<
MAX_EXTENT_COMPRESS_RATIO_SHIFT) - 1;
}
bp->k.p = bch2_extent_ptr_to_bp_pos(k, p);
bp->v = (struct bch_backpointer) {
.btree_id = btree_id,
.level = level,
@ -172,6 +185,10 @@ static inline void bch2_extent_ptr_to_bp(struct bch_fs *c,
.bucket_len = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p),
.pos = k.k->p,
};
if (!level && bch2_dev_rotational(c, p.ptr.dev))
SET_BACKPOINTER_RECONCILE_PHYS(&bp->v,
rb_work_id_phys(bch2_bkey_reconcile_opts(c, k)));
}
struct wb_maybe_flush;

View File

@ -818,6 +818,7 @@ struct bch_fs {
struct bch_dev __rcu *devs[BCH_SB_MEMBERS_MAX];
struct bch_devs_mask devs_removed;
struct bch_devs_mask devs_rotational;
u8 extent_type_u64s[31];
u8 extent_types_known;
@ -1318,4 +1319,9 @@ static inline const char *bch2_dev_name(const struct bch_dev *ca)
return ca->name;
}
static inline bool bch2_dev_rotational(struct bch_fs *c, unsigned dev)
{
return dev != BCH_SB_MEMBER_INVALID && test_bit(dev, c->devs_rotational.d);
}
#endif /* _BCACHEFS_H */

View File

@ -475,11 +475,13 @@ struct bch_backpointer {
__u8 level;
__u8 data_type;
__u8 bucket_gen;
__u32 pad;
__u32 flags;
__u32 bucket_len;
struct bpos pos;
} __packed __aligned(8);
BITMASK(BACKPOINTER_RECONCILE_PHYS, struct bch_backpointer, flags, 0, 2);
/* Optional/variable size superblock sections: */
struct bch_sb_field {
@ -507,6 +509,123 @@ struct bch_sb_field {
x(recovery_passes, 15) \
x(extent_type_u64s, 16)
enum btree_id_flags {
BTREE_IS_extents = BIT(0),
BTREE_IS_snapshots = BIT(1),
BTREE_IS_snapshot_field = BIT(2),
BTREE_IS_data = BIT(3),
BTREE_IS_write_buffer = BIT(4),
};
#define BCH_BTREE_IDS() \
x(extents, 0, \
BTREE_IS_extents| \
BTREE_IS_snapshots| \
BTREE_IS_data, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_extent_whiteout)| \
BIT_ULL(KEY_TYPE_error)| \
BIT_ULL(KEY_TYPE_cookie)| \
BIT_ULL(KEY_TYPE_extent)| \
BIT_ULL(KEY_TYPE_reservation)| \
BIT_ULL(KEY_TYPE_reflink_p)| \
BIT_ULL(KEY_TYPE_inline_data)) \
x(inodes, 1, \
BTREE_IS_snapshots, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_inode)| \
BIT_ULL(KEY_TYPE_inode_v2)| \
BIT_ULL(KEY_TYPE_inode_v3)| \
BIT_ULL(KEY_TYPE_inode_generation)) \
x(dirents, 2, \
BTREE_IS_snapshots, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_hash_whiteout)| \
BIT_ULL(KEY_TYPE_dirent)) \
x(xattrs, 3, \
BTREE_IS_snapshots, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_cookie)| \
BIT_ULL(KEY_TYPE_hash_whiteout)| \
BIT_ULL(KEY_TYPE_xattr)) \
x(alloc, 4, 0, \
BIT_ULL(KEY_TYPE_alloc)| \
BIT_ULL(KEY_TYPE_alloc_v2)| \
BIT_ULL(KEY_TYPE_alloc_v3)| \
BIT_ULL(KEY_TYPE_alloc_v4)) \
x(quotas, 5, 0, \
BIT_ULL(KEY_TYPE_quota)) \
x(stripes, 6, \
BTREE_IS_data, \
BIT_ULL(KEY_TYPE_stripe)) \
x(reflink, 7, \
BTREE_IS_extents| \
BTREE_IS_data, \
BIT_ULL(KEY_TYPE_reflink_v)| \
BIT_ULL(KEY_TYPE_indirect_inline_data)| \
BIT_ULL(KEY_TYPE_error)) \
x(subvolumes, 8, 0, \
BIT_ULL(KEY_TYPE_subvolume)) \
x(snapshots, 9, 0, \
BIT_ULL(KEY_TYPE_snapshot)) \
x(lru, 10, \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)) \
x(freespace, 11, \
BTREE_IS_extents, \
BIT_ULL(KEY_TYPE_set)) \
x(need_discard, 12, 0, \
BIT_ULL(KEY_TYPE_set)) \
x(backpointers, 13, \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_backpointer)) \
x(bucket_gens, 14, 0, \
BIT_ULL(KEY_TYPE_bucket_gens)) \
x(snapshot_trees, 15, 0, \
BIT_ULL(KEY_TYPE_snapshot_tree)) \
x(deleted_inodes, 16, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)) \
x(logged_ops, 17, 0, \
BIT_ULL(KEY_TYPE_logged_op_truncate)| \
BIT_ULL(KEY_TYPE_logged_op_finsert)| \
BIT_ULL(KEY_TYPE_inode_alloc_cursor)) \
x(reconcile_work, 18, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie)) \
x(subvolume_children, 19, 0, \
BIT_ULL(KEY_TYPE_set)) \
x(accounting, 20, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_accounting)) \
x(reconcile_hipri, 21, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)) \
x(reconcile_pending, 22, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)) \
x(reconcile_scan, 23, 0, \
BIT_ULL(KEY_TYPE_cookie)| \
BIT_ULL(KEY_TYPE_backpointer)) \
x(reconcile_work_phys, 24, \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)) \
x(reconcile_hipri_phys, 25, \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set))
enum btree_id {
#define x(name, nr, ...) BTREE_ID_##name = nr,
BCH_BTREE_IDS()
#undef x
BTREE_ID_NR
};
#include "alloc/accounting_format.h"
#include "alloc/disk_groups_format.h"
#include "alloc/lru_format.h"
@ -1339,117 +1458,6 @@ LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
/* Btree: */
enum btree_id_flags {
BTREE_IS_extents = BIT(0),
BTREE_IS_snapshots = BIT(1),
BTREE_IS_snapshot_field = BIT(2),
BTREE_IS_data = BIT(3),
BTREE_IS_write_buffer = BIT(4),
};
#define BCH_BTREE_IDS() \
x(extents, 0, \
BTREE_IS_extents| \
BTREE_IS_snapshots| \
BTREE_IS_data, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_extent_whiteout)| \
BIT_ULL(KEY_TYPE_error)| \
BIT_ULL(KEY_TYPE_cookie)| \
BIT_ULL(KEY_TYPE_extent)| \
BIT_ULL(KEY_TYPE_reservation)| \
BIT_ULL(KEY_TYPE_reflink_p)| \
BIT_ULL(KEY_TYPE_inline_data)) \
x(inodes, 1, \
BTREE_IS_snapshots, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_inode)| \
BIT_ULL(KEY_TYPE_inode_v2)| \
BIT_ULL(KEY_TYPE_inode_v3)| \
BIT_ULL(KEY_TYPE_inode_generation)) \
x(dirents, 2, \
BTREE_IS_snapshots, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_hash_whiteout)| \
BIT_ULL(KEY_TYPE_dirent)) \
x(xattrs, 3, \
BTREE_IS_snapshots, \
BIT_ULL(KEY_TYPE_whiteout)| \
BIT_ULL(KEY_TYPE_cookie)| \
BIT_ULL(KEY_TYPE_hash_whiteout)| \
BIT_ULL(KEY_TYPE_xattr)) \
x(alloc, 4, 0, \
BIT_ULL(KEY_TYPE_alloc)| \
BIT_ULL(KEY_TYPE_alloc_v2)| \
BIT_ULL(KEY_TYPE_alloc_v3)| \
BIT_ULL(KEY_TYPE_alloc_v4)) \
x(quotas, 5, 0, \
BIT_ULL(KEY_TYPE_quota)) \
x(stripes, 6, \
BTREE_IS_data, \
BIT_ULL(KEY_TYPE_stripe)) \
x(reflink, 7, \
BTREE_IS_extents| \
BTREE_IS_data, \
BIT_ULL(KEY_TYPE_reflink_v)| \
BIT_ULL(KEY_TYPE_indirect_inline_data)| \
BIT_ULL(KEY_TYPE_error)) \
x(subvolumes, 8, 0, \
BIT_ULL(KEY_TYPE_subvolume)) \
x(snapshots, 9, 0, \
BIT_ULL(KEY_TYPE_snapshot)) \
x(lru, 10, \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)) \
x(freespace, 11, \
BTREE_IS_extents, \
BIT_ULL(KEY_TYPE_set)) \
x(need_discard, 12, 0, \
BIT_ULL(KEY_TYPE_set)) \
x(backpointers, 13, \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_backpointer)) \
x(bucket_gens, 14, 0, \
BIT_ULL(KEY_TYPE_bucket_gens)) \
x(snapshot_trees, 15, 0, \
BIT_ULL(KEY_TYPE_snapshot_tree)) \
x(deleted_inodes, 16, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)) \
x(logged_ops, 17, 0, \
BIT_ULL(KEY_TYPE_logged_op_truncate)| \
BIT_ULL(KEY_TYPE_logged_op_finsert)| \
BIT_ULL(KEY_TYPE_inode_alloc_cursor)) \
x(reconcile_work, 18, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)|BIT_ULL(KEY_TYPE_cookie)) \
x(subvolume_children, 19, 0, \
BIT_ULL(KEY_TYPE_set)) \
x(accounting, 20, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_accounting)) \
x(reconcile_hipri, 21, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)) \
x(reconcile_pending, 22, \
BTREE_IS_snapshot_field| \
BTREE_IS_write_buffer, \
BIT_ULL(KEY_TYPE_set)) \
x(reconcile_scan, 23, 0, \
BIT_ULL(KEY_TYPE_cookie)| \
BIT_ULL(KEY_TYPE_backpointer))
enum btree_id {
#define x(name, nr, ...) BTREE_ID_##name = nr,
BCH_BTREE_IDS()
#undef x
BTREE_ID_NR
};
/*
* Maximum number of btrees that we will _ever_ have under the current scheme,
* where we refer to them with 64 bit bitfields - and we also need a bit for

View File

@ -34,18 +34,6 @@
#include <linux/kthread.h>
#include <linux/sched/cputime.h>
#define RECONCILE_WORK_IDS() \
x(none) \
x(hipri) \
x(normal) \
x(pending)
enum reconcile_work_id {
#define x(t) RECONCILE_WORK_##t,
RECONCILE_WORK_IDS()
#undef x
};
#define x(n) #n,
static const char * const reconcile_opts[] = {
@ -69,11 +57,11 @@ static const char * const rebalance_scan_strs[] = {
#define RECONCILE_SCAN_COOKIE_metadata 1
#define RECONCILE_SCAN_COOKIE_fs 0
static const enum btree_id reconcile_work_btree[] = {
[RECONCILE_WORK_hipri] = BTREE_ID_reconcile_hipri,
[RECONCILE_WORK_normal] = BTREE_ID_reconcile_work,
[RECONCILE_WORK_pending] = BTREE_ID_reconcile_pending,
};
static bool btree_is_reconcile_phys(enum btree_id btree)
{
return btree == BTREE_ID_reconcile_hipri_phys ||
btree == BTREE_ID_reconcile_work_phys;
}
static enum reconcile_work_id btree_to_reconcile_work_id(enum btree_id btree)
{
@ -241,17 +229,6 @@ void bch2_extent_reconcile_to_text(struct printbuf *out, struct bch_fs *c,
}
}
static enum reconcile_work_id rb_work_id(const struct bch_extent_reconcile *r)
{
if (!r || !r->need_rb)
return RECONCILE_WORK_none;
if (r->pending)
return RECONCILE_WORK_pending;
if (r->hipri)
return RECONCILE_WORK_hipri;
return RECONCILE_WORK_normal;
}
static inline unsigned rb_accounting_counters(const struct bch_extent_reconcile *r)
{
if (!r)
@ -476,6 +453,22 @@ static inline struct bbpos rb_work_to_data_pos(struct bpos pos)
return BBPOS(BTREE_ID_extents, pos);
}
static inline bool extent_has_rotational(struct bch_fs *c, struct bkey_s_c k)
{
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
bkey_for_each_ptr(ptrs, ptr)
if (bch2_dev_rotational(c, ptr->dev))
return true;
return false;
}
static int reconcile_work_mod(struct btree_trans *trans, struct bkey_s_c k,
enum reconcile_work_id w, struct bpos pos, bool set)
{
return w ? bch2_btree_bit_mod_buffered(trans, reconcile_work_btree[w], pos, set) : 0;
}
int __bch2_trigger_extent_reconcile(struct btree_trans *trans,
enum btree_id btree, unsigned level,
struct bkey_s_c old, struct bkey_s new,
@ -488,14 +481,13 @@ int __bch2_trigger_extent_reconcile(struct btree_trans *trans,
enum reconcile_work_id new_work = rb_work_id(new_r);
if (!level) {
/* adjust reflink pos */
struct bpos pos = data_to_rb_work_pos(btree, new.k->p);
if (old_work != new_work) {
/* adjust reflink pos */
struct bpos pos = data_to_rb_work_pos(btree, new.k->p);
if (old_work && old_work != new_work)
try(bch2_btree_bit_mod_buffered(trans, reconcile_work_btree[old_work], pos, false));
if (new_work && old_work != new_work)
try(bch2_btree_bit_mod_buffered(trans, reconcile_work_btree[new_work], pos, true));
try(reconcile_work_mod(trans, old, old_work, pos, false));
try(reconcile_work_mod(trans, new.s_c, new_work, pos, true));
}
} else {
struct bch_fs *c = trans->c;
struct bpos bp = POS(old_work, bch2_bkey_get_reconcile_bp(c, old));
@ -1355,25 +1347,21 @@ static int reconcile_set_data_opts(struct btree_trans *trans,
return ret;
}
static void bkey_set_rb_pending(struct bch_fs *c, struct bkey_i *k)
static void bkey_reconcile_pending_mod(struct bch_fs *c, struct bkey_i *k, bool set)
{
struct bch_extent_reconcile *r = (struct bch_extent_reconcile *)
bch2_bkey_reconcile_opts(c, bkey_i_to_s_c(k));
BUG_ON(!r);
r->pending = true;
r->pending = set;
}
static int bch2_extent_set_rb_pending(struct btree_trans *trans,
struct btree_iter *iter,
struct bkey_s_c k)
static int bch2_extent_reconcile_pending_mod(struct btree_trans *trans, struct btree_iter *iter,
struct bkey_s_c k, bool set)
{
struct bch_fs *c = trans->c;
event_add_trace(c, reconcile_set_pending, k.k->size, buf,
bch2_bkey_val_to_text(&buf, c, k));
if (rb_work_id(bch2_bkey_reconcile_opts(c, k)) == RECONCILE_WORK_pending)
if ((rb_work_id(bch2_bkey_reconcile_opts(c, k)) == RECONCILE_WORK_pending) == set)
return 0;
try(bch2_trans_relock(trans));
@ -1382,7 +1370,7 @@ static int bch2_extent_set_rb_pending(struct btree_trans *trans,
bkey_reassemble(n, k);
if (!iter->min_depth) {
bkey_set_rb_pending(c, n);
bkey_reconcile_pending_mod(c, n, set);
return bch2_trans_update(trans, iter, n, 0) ?:
bch2_trans_commit(trans, NULL, NULL,
@ -1400,12 +1388,25 @@ static int bch2_extent_set_rb_pending(struct btree_trans *trans,
panic("\n%s\n", buf.buf);
}
bkey_set_rb_pending(c, n);
bkey_reconcile_pending_mod(c, n, set);
return bch2_btree_node_update_key(trans, &iter2, b, n, BCH_TRANS_COMMIT_no_enospc, false);
}
}
static bool is_reconcile_pending_err(struct bch_fs *c, struct bkey_s_c k, int err)
{
bool ret = (bch2_err_matches(err, BCH_ERR_data_update_fail_no_rw_devs) ||
bch2_err_matches(err, BCH_ERR_insufficient_devices) ||
bch2_err_matches(err, ENOSPC));
if (ret)
event_add_trace(c, reconcile_set_pending, k.k->size, buf, ({
prt_printf(&buf, "%s\n", bch2_err_str(err));
bch2_bkey_val_to_text(&buf, c, k);
}));
return ret;
}
static int __do_reconcile_extent(struct moving_context *ctxt,
struct per_snapshot_io_opts *snapshot_io_opts,
struct btree_iter *iter, struct bkey_s_c k)
@ -1419,14 +1420,11 @@ static int __do_reconcile_extent(struct moving_context *ctxt,
int ret = bch2_move_extent(ctxt, NULL, snapshot_io_opts,
reconcile_set_data_opts, NULL,
iter, iter->min_depth, k);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
bch2_err_matches(ret, EROFS))
return ret;
if (bch2_err_matches(ret, EROFS))
return ret;
if (bch2_err_matches(ret, BCH_ERR_data_update_fail_no_rw_devs) ||
bch2_err_matches(ret, BCH_ERR_insufficient_devices) ||
bch2_err_matches(ret, ENOSPC))
return bch2_extent_set_rb_pending(trans, iter, k);
if (is_reconcile_pending_err(c, k, ret))
return bch2_extent_reconcile_pending_mod(trans, iter, k, true);
if (ret) {
WARN_ONCE(ret != -BCH_ERR_data_update_fail_no_snapshot,
"unhandled error from move_extent: %s", bch2_err_str(ret));
@ -1443,23 +1441,66 @@ static int __do_reconcile_extent(struct moving_context *ctxt,
static int do_reconcile_extent(struct moving_context *ctxt,
struct per_snapshot_io_opts *snapshot_io_opts,
struct bpos work_pos)
struct bbpos work)
{
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
struct bbpos data_pos = rb_work_to_data_pos(work_pos);
struct bbpos data_pos = rb_work_to_data_pos(work.pos);
CLASS(btree_iter, iter)(trans, data_pos.btree, data_pos.pos, BTREE_ITER_all_snapshots);
struct bkey_s_c k = bkey_try(bch2_btree_iter_peek_slot(&iter));
if (!k.k)
return 0;
if (work.btree == BTREE_ID_reconcile_pending) {
struct bch_inode_opts opts;
try(bch2_bkey_get_io_opts(trans, snapshot_io_opts, k, &opts));
struct data_update_opts data_opts = { .read_dev = -1 };
reconcile_set_data_opts(trans, NULL, data_pos.btree, k, &opts, &data_opts);
struct bch_devs_list devs_have = bch2_data_update_devs_keeping(c, &data_opts, k);
int ret = bch2_can_do_write(c, &data_opts, &devs_have);
if (ret) {
if (is_reconcile_pending_err(c, k, ret))
return 0;
return ret;
}
if (extent_has_rotational(c, k))
return bch2_extent_reconcile_pending_mod(trans, &iter, k, false);
}
event_add_trace(c, reconcile_data, k.k->size, buf,
bch2_bkey_val_to_text(&buf, c, k));
return __do_reconcile_extent(ctxt, snapshot_io_opts, &iter, k);
}
static int do_reconcile_phys(struct moving_context *ctxt,
struct per_snapshot_io_opts *snapshot_io_opts,
struct bpos bp_pos, struct wb_maybe_flush *last_flushed)
{
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
CLASS(btree_iter, bp_iter)(trans, BTREE_ID_backpointers, bp_pos, 0);
struct bkey_s_c bp_k = bkey_try(bch2_btree_iter_peek_slot(&bp_iter));
if (!bp_k.k || bp_k.k->type != KEY_TYPE_backpointer) /* write buffer race */
return 0;
CLASS(btree_iter_uninit, iter)(trans);
struct bkey_s_c k = bkey_try(bch2_backpointer_get_key(trans, bkey_s_c_to_backpointer(bp_k),
&iter, 0, last_flushed));
if (!k.k)
return 0;
event_add_trace(c, reconcile_phys, k.k->size, buf,
bch2_bkey_val_to_text(&buf, c, k));
return __do_reconcile_extent(ctxt, snapshot_io_opts, &iter, k);
}
noinline_for_stack
static int do_reconcile_btree(struct moving_context *ctxt,
struct per_snapshot_io_opts *snapshot_io_opts,
@ -1657,7 +1698,8 @@ static int do_reconcile_scan_fs(struct moving_context *ctxt, struct reconcile_sc
noinline_for_stack
static int do_reconcile_scan(struct moving_context *ctxt,
struct per_snapshot_io_opts *snapshot_io_opts,
struct bpos cookie_pos, u64 cookie, u64 *sectors_scanned)
struct bpos cookie_pos, u64 cookie, u64 *sectors_scanned,
struct wb_maybe_flush *last_flushed)
{
struct btree_trans *trans = ctxt->trans;
struct bch_fs *c = trans->c;
@ -1675,9 +1717,6 @@ static int do_reconcile_scan(struct moving_context *ctxt,
r->scan_start = BBPOS(BTREE_ID_backpointers, POS(s.dev, 0));
r->scan_end = BBPOS(BTREE_ID_backpointers, POS(s.dev, U64_MAX));
struct wb_maybe_flush last_flushed __cleanup(wb_maybe_flush_exit);
wb_maybe_flush_init(&last_flushed);
bch2_btree_write_buffer_flush_sync(trans);
CLASS(disk_reservation, res)(c);
@ -1692,7 +1731,7 @@ static int do_reconcile_scan(struct moving_context *ctxt,
continue;
bch2_disk_reservation_put(c, &res.r);
do_reconcile_scan_bp(trans, s, bkey_s_c_to_backpointer(k), &last_flushed);
do_reconcile_scan_bp(trans, s, bkey_s_c_to_backpointer(k), last_flushed);
})));
} else if (s.type == RECONCILE_SCAN_inum) {
r->scan_start = BBPOS(BTREE_ID_extents, POS(s.inum, 0));
@ -1762,7 +1801,9 @@ static int do_reconcile(struct moving_context *ctxt)
static enum btree_id scan_btrees[] = {
BTREE_ID_reconcile_scan,
BTREE_ID_reconcile_hipri_phys,
BTREE_ID_reconcile_hipri,
BTREE_ID_reconcile_work_phys,
BTREE_ID_reconcile_work,
BTREE_ID_reconcile_pending,
};
@ -1776,6 +1817,9 @@ static int do_reconcile(struct moving_context *ctxt)
bch2_moving_ctxt_flush_all(ctxt);
bch2_btree_write_buffer_flush_sync(trans);
struct wb_maybe_flush last_flushed __cleanup(wb_maybe_flush_exit);
wb_maybe_flush_init(&last_flushed);
while (!bch2_move_ratelimit(ctxt)) {
if (!bch2_reconcile_enabled(c)) {
bch2_moving_ctxt_flush_all(ctxt);
@ -1808,6 +1852,10 @@ static int do_reconcile(struct moving_context *ctxt)
if (r->work_pos.btree == BTREE_ID_reconcile_pending &&
bkey_deleted(&pending_cookie.k))
break;
/* Avoid conflicts when switching between phys/normal */
bch2_moving_ctxt_flush_all(ctxt);
bch2_btree_write_buffer_flush_sync(trans);
continue;
}
@ -1822,7 +1870,7 @@ static int do_reconcile(struct moving_context *ctxt)
ret = do_reconcile_scan(ctxt, &snapshot_io_opts,
k.k->p,
le64_to_cpu(bkey_s_c_to_cookie(k).v->cookie),
&sectors_scanned);
&sectors_scanned, &last_flushed);
} else if (k.k->type == KEY_TYPE_backpointer) {
if (k.k->p.inode == RECONCILE_WORK_pending &&
bkey_deleted(&pending_cookie.k)) {
@ -1832,9 +1880,12 @@ static int do_reconcile(struct moving_context *ctxt)
ret = do_reconcile_btree(ctxt, &snapshot_io_opts,
bkey_s_c_to_backpointer(k));
} else if (btree_is_reconcile_phys(r->work_pos.btree)) {
ret = lockrestart_do(trans,
do_reconcile_phys(ctxt, &snapshot_io_opts, k.k->p, &last_flushed));
} else {
ret = lockrestart_do(trans,
do_reconcile_extent(ctxt, &snapshot_io_opts, k.k->p));
do_reconcile_extent(ctxt, &snapshot_io_opts, r->work_pos));
}
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
@ -1953,10 +2004,16 @@ void bch2_reconcile_status_to_text(struct printbuf *out, struct bch_fs *c)
work_pos.pos.offset);
} else {
prt_printf(out, "processing data: %s ",
reconcile_work_ids[btree_to_reconcile_work_id(work_pos.btree)]);
prt_printf(out, "processing data: ");
bch2_bbpos_to_text(out, rb_work_to_data_pos(work_pos.pos));
if (btree_is_reconcile_phys(work_pos.btree)) {
bch2_bbpos_to_text(out, work_pos);
} else {
prt_printf(out, " %s ",
reconcile_work_ids[btree_to_reconcile_work_id(work_pos.btree)]);
bch2_bbpos_to_text(out, rb_work_to_data_pos(work_pos.pos));
}
prt_newline(out);
}
}
@ -2201,6 +2258,103 @@ static int check_reconcile_work_data_btree(struct btree_trans *trans,
}
}
static int check_reconcile_work_phys_one(struct btree_trans *trans,
struct btree_iter *bp_iter,
struct btree_iter *r_w,
struct btree_iter *r_h,
struct wb_maybe_flush *last_flushed,
struct bpos *cur_pos)
{
bch2_btree_iter_set_pos(bp_iter, *cur_pos);
bch2_btree_iter_set_pos(r_w, *cur_pos);
bch2_btree_iter_set_pos(r_h, *cur_pos);
struct bkey_s_c bp = bkey_try(bch2_btree_iter_peek(bp_iter));
bkey_try(bch2_btree_iter_peek(r_w));
bkey_try(bch2_btree_iter_peek(r_h));
*cur_pos = bpos_min(bpos_min(r_w->pos, r_h->pos), bp_iter->pos);
struct bkey deleted;
bkey_init(&deleted);
deleted.p = *cur_pos;
if (bpos_lt(*cur_pos, bp_iter->pos)) {
bp.k = &deleted;
bp_iter->k = deleted;
}
if (bpos_lt(*cur_pos, r_w->pos))
r_w->k = deleted;
if (bpos_lt(*cur_pos, r_h->pos))
r_h->k = deleted;
enum reconcile_work_id w = bp.k && bp.k->type == KEY_TYPE_backpointer
? BACKPOINTER_RECONCILE_PHYS(bkey_s_c_to_backpointer(bp).v)
: 0;
enum btree_id btree_want_set = w < ARRAY_SIZE(reconcile_work_phys_btree)
? reconcile_work_phys_btree[w]
: 0;
u64 btrees_set =
(r_w->k.type ? BIT_ULL(r_w->btree_id) : 0)|
(r_h->k.type ? BIT_ULL(r_h->btree_id) : 0);
u64 btree_want_set_mask = btree_want_set ? BIT_ULL(btree_want_set) : 0;
if (btrees_set != btree_want_set_mask) {
try(bch2_btree_write_buffer_maybe_flush(trans, bp, last_flushed));
CLASS(printbuf, buf)();
prt_str(&buf, "backpointer should be set in ");
if (btree_want_set)
bch2_btree_id_str(btree_want_set);
else
prt_str(&buf, "(none)");
prt_printf(&buf, "\nbut set in: ");
bch2_prt_bitflags(&buf, __bch2_btree_ids, btrees_set);
prt_newline(&buf);
bch2_bkey_val_to_text(&buf, trans->c, bp);
if (ret_fsck_err(trans, reconcile_work_phys_incorrectly_set, "%s", buf.buf)) {
try(fix_reconcile_work_btree(trans, btree_want_set, *cur_pos, r_w));
try(fix_reconcile_work_btree(trans, btree_want_set, *cur_pos, r_h));
}
}
return 0;
}
noinline_for_stack
static int check_reconcile_work_phys(struct btree_trans *trans)
{
struct bch_fs *c = trans->c;
struct progress_indicator progress;
bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_backpointers));
struct bpos cur_pos = POS_MIN;
CLASS(btree_iter, bp)(trans, BTREE_ID_backpointers, POS_MIN, BTREE_ITER_prefetch);
CLASS(btree_iter, r_w)(trans, BTREE_ID_reconcile_work_phys, POS_MIN, BTREE_ITER_prefetch);
CLASS(btree_iter, r_h)(trans, BTREE_ID_reconcile_hipri_phys, POS_MIN, BTREE_ITER_prefetch);
struct wb_maybe_flush last_flushed __cleanup(wb_maybe_flush_exit);
wb_maybe_flush_init(&last_flushed);
while (true) {
try(bch2_progress_update_iter(trans, &progress, &bp, "check_reconcile_work_phys"));
try(commit_do(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
check_reconcile_work_phys_one(trans, &bp, &r_w, &r_h,
&last_flushed, &cur_pos)));
if (bpos_eq(cur_pos, POS_MAX))
return 0;
cur_pos = bpos_nosnap_successor(cur_pos);
wb_maybe_flush_inc(&last_flushed);
}
}
static int check_reconcile_work_btree_key(struct btree_trans *trans,
struct btree_iter *iter, struct bkey_s_c k)
{
@ -2362,7 +2516,7 @@ int bch2_check_reconcile_work(struct bch_fs *c)
&rb_w, &rb_h, &rb_p,
&snapshot_io_opts, &progress, &last_flushed));
/* progress indicator for metadata? */
try(check_reconcile_work_phys(trans));
try(check_reconcile_work_btrees(trans));
try(check_reconcile_btree_bps(trans));

View File

@ -10,6 +10,23 @@ int bch2_extent_reconcile_validate(struct bch_fs *, struct bkey_s_c,
struct bkey_validate_context,
const struct bch_extent_reconcile *);
static inline enum reconcile_work_id rb_work_id(const struct bch_extent_reconcile *r)
{
if (!r || !r->need_rb)
return RECONCILE_WORK_none;
if (r->pending)
return RECONCILE_WORK_pending;
if (r->hipri)
return RECONCILE_WORK_hipri;
return RECONCILE_WORK_normal;
}
static inline enum reconcile_work_id rb_work_id_phys(const struct bch_extent_reconcile *r)
{
enum reconcile_work_id w = rb_work_id(r);
return w == RECONCILE_WORK_pending ? RECONCILE_WORK_none : w;
}
static inline struct bch_extent_reconcile io_opts_to_reconcile_opts(struct bch_fs *c,
struct bch_inode_opts *opts)
{

View File

@ -174,5 +174,30 @@ enum bch_reconcile_accounting_type {
BCH_REBALANCE_ACCOUNTING_NR,
};
#define RECONCILE_WORK_IDS() \
x(none) \
x(hipri) \
x(normal) \
x(pending)
enum reconcile_work_id {
#define x(t) RECONCILE_WORK_##t,
RECONCILE_WORK_IDS()
#undef x
};
__maybe_unused
static const enum btree_id reconcile_work_btree[] = {
[RECONCILE_WORK_hipri] = BTREE_ID_reconcile_hipri,
[RECONCILE_WORK_normal] = BTREE_ID_reconcile_work,
[RECONCILE_WORK_pending] = BTREE_ID_reconcile_pending,
};
__maybe_unused
static const enum btree_id reconcile_work_phys_btree[] = {
[RECONCILE_WORK_hipri] = BTREE_ID_reconcile_hipri_phys,
[RECONCILE_WORK_normal] = BTREE_ID_reconcile_work_phys,
};
#endif /* _BCACHEFS_REBALANCE_FORMAT_H */

View File

@ -830,6 +830,10 @@ int bch2_dev_add(struct bch_fs *c, const char *path, struct printbuf *err)
goto err_late;
}
bool write_sb = false;
__bch2_dev_mi_field_upgrades(c, ca, &write_sb);
bch2_write_super(c);
}
@ -918,6 +922,8 @@ int bch2_dev_online(struct bch_fs *c, const char *path, struct printbuf *err)
struct bch_dev *ca = bch2_dev_locked(c, dev_idx);
bch2_dev_mi_field_upgrades(ca);
ret = bch2_trans_mark_dev_sb(c, ca, BTREE_TRIGGER_transactional);
if (ret) {
prt_printf(err, "bch2_trans_mark_dev_sb() error: %s\n", bch2_err_str(ret));

View File

@ -1016,9 +1016,10 @@ static int bch2_fs_opt_version_init(struct bch_fs *c, struct printbuf *out)
prt_str_indented(out, "filesystem needs upgrade from older version; run fsck from older bcachefs-tools to fix\n");
return -EINVAL;
}
}
bch2_fs_mi_field_upgrades(c);
return 0;
}

View File

@ -551,6 +551,11 @@ enum fsck_err_opts {
OPT_BOOL(), \
BCH_MEMBER_DISCARD, true, \
NULL, "Enable discard/TRIM support") \
x(rotational, u8, \
OPT_DEVICE|OPT_RUNTIME, \
OPT_BOOL(), \
BCH_MEMBER_ROTATIONAL, false, \
NULL, "Disk is rotational; different behaviour for reconcile")\
x(btree_node_prefetch, u8, \
OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \

View File

@ -45,6 +45,7 @@ enum bch_counters_flags {
x(reconcile_scan_inum, 117, TYPE_SECTORS) \
x(reconcile_btree, 118, TYPE_SECTORS) \
x(reconcile_data, 119, TYPE_SECTORS) \
x(reconcile_phys, 120, TYPE_SECTORS) \
x(reconcile_set_pending, 83, TYPE_SECTORS) \
x(evacuate_bucket, 84, TYPE_COUNTER) \
x(stripe_create, 102, TYPE_COUNTER) \

View File

@ -344,6 +344,7 @@ enum bch_fsck_flags {
x(dirent_cf_name_too_big, 304, 0) \
x(dirent_stray_data_after_cf_name, 305, 0) \
x(reconcile_work_incorrectly_set, 309, FSCK_AUTOFIX) \
x(reconcile_work_phys_incorrectly_set, 341, FSCK_AUTOFIX) \
x(validate_error_in_commit, 329, 0) \
x(extent_io_opts_not_set, 330, FSCK_AUTOFIX) \
x(extent_io_opts_unneeded, 331, FSCK_AUTOFIX) \
@ -352,7 +353,7 @@ enum bch_fsck_flags {
x(btree_ptr_with_no_reconcile_bp, 335, FSCK_AUTOFIX) \
x(btree_ptr_with_bad_reconcile_bp, 336, FSCK_AUTOFIX) \
x(btree_ptr_to_bad_reconcile_bp, 337, FSCK_AUTOFIX) \
x(MAX, 341, 0)
x(MAX, 342, 0)
enum bch_sb_error_id {
#define x(t, n, ...) BCH_FSCK_ERR_##t = n,

View File

@ -263,6 +263,8 @@ void bch2_member_to_text(struct printbuf *out,
prt_printf(out, "(none)");
prt_newline(out);
prt_printf(out, "Rotational:\t%llu\n", BCH_MEMBER_ROTATIONAL(m));
prt_printf(out, "Btree allocated bitmap blocksize:\t");
if (m->btree_bitmap_shift < 64)
prt_units_u64(out, 1ULL << m->btree_bitmap_shift);
@ -455,6 +457,8 @@ void bch2_sb_members_to_cpu(struct bch_fs *c)
for_each_member_device(c, ca) {
struct bch_member m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx);
ca->mi = bch2_mi_to_cpu(&m);
mod_bit(ca->dev_idx, c->devs_rotational.d, ca->mi.rotational);
}
struct bch_sb_field_members_v2 *mi2 = bch2_sb_field_get(c->disk_sb.sb, members_v2);
@ -811,3 +815,42 @@ void bch2_sb_members_clean_deleted(struct bch_fs *c)
if (write_sb)
bch2_write_super(c);
}
void __bch2_dev_mi_field_upgrades(struct bch_fs *c, struct bch_dev *ca, bool *write_sb)
{
struct bch_member *m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
if (!BCH_MEMBER_ROTATIONAL_SET(m)) {
SET_BCH_MEMBER_ROTATIONAL(m, !bdev_nonrot(ca->disk_sb.bdev));
SET_BCH_MEMBER_ROTATIONAL_SET(m, true);
*write_sb = true;
}
}
void bch2_dev_mi_field_upgrades(struct bch_dev *ca)
{
struct bch_fs *c = ca->fs;
guard(mutex)(&c->sb_lock);
bool write_sb = false;
__bch2_dev_mi_field_upgrades(c, ca, &write_sb);
if (write_sb)
bch2_write_super(c);
}
/*
* Set BCH_MEMBER_ROTATIONAL, if it hasn't been initialized
*/
void bch2_fs_mi_field_upgrades(struct bch_fs *c)
{
guard(mutex)(&c->sb_lock);
bool write_sb = false;
scoped_guard(rcu)
for_each_online_member_rcu(c, ca)
__bch2_dev_mi_field_upgrades(c, ca, &write_sb);
if (write_sb)
bch2_write_super(c);
}

View File

@ -376,7 +376,8 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
: 1,
.freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
.resize_on_mount = BCH_MEMBER_RESIZE_ON_MOUNT(mi),
.valid = bch2_member_alive(mi),
.rotational = BCH_MEMBER_ROTATIONAL(mi),
.valid = bch2_member_alive(mi),
.btree_bitmap_shift = mi->btree_bitmap_shift,
.btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap),
};
@ -442,6 +443,10 @@ void bch2_maybe_schedule_btree_bitmap_gc(struct bch_fs *);
int bch2_sb_member_alloc(struct bch_fs *);
void bch2_sb_members_clean_deleted(struct bch_fs *);
void __bch2_dev_mi_field_upgrades(struct bch_fs *, struct bch_dev *, bool *);
void bch2_dev_mi_field_upgrades(struct bch_dev *);
void bch2_fs_mi_field_upgrades(struct bch_fs *);
static inline void bch2_prt_member_name(struct printbuf *out, struct bch_fs *c, unsigned idx)
{
if (idx == BCH_SB_MEMBER_INVALID) {

View File

@ -99,8 +99,9 @@ LE64_BITMASK(BCH_MEMBER_GROUP, struct bch_member, flags, 20, 28)
LE64_BITMASK(BCH_MEMBER_DURABILITY, struct bch_member, flags, 28, 30)
LE64_BITMASK(BCH_MEMBER_FREESPACE_INITIALIZED,
struct bch_member, flags, 30, 31)
LE64_BITMASK(BCH_MEMBER_RESIZE_ON_MOUNT,
struct bch_member, flags, 31, 32)
LE64_BITMASK(BCH_MEMBER_RESIZE_ON_MOUNT,struct bch_member, flags, 31, 32)
LE64_BITMASK(BCH_MEMBER_ROTATIONAL, struct bch_member, flags, 32, 33)
LE64_BITMASK(BCH_MEMBER_ROTATIONAL_SET, struct bch_member, flags, 33, 34)
#if 0
LE64_BITMASK(BCH_MEMBER_NR_READ_ERRORS, struct bch_member, flags[1], 0, 20);

View File

@ -14,6 +14,7 @@ struct bch_member_cpu {
u8 durability;
u8 freespace_initialized;
u8 resize_on_mount;
u8 rotational;
u8 valid;
u8 btree_bitmap_shift;
u64 btree_allocated_bitmap;

View File

@ -4,6 +4,7 @@
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/stat.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
@ -125,20 +126,32 @@ int blkdev_issue_zeroout(struct block_device *bdev,
unsigned bdev_logical_block_size(struct block_device *bdev)
{
struct stat statbuf;
unsigned blksize;
int ret;
ret = fstat(bdev->bd_fd, &statbuf);
BUG_ON(ret);
struct stat statbuf = xfstat(bdev->bd_fd);
if (!S_ISBLK(statbuf.st_mode))
return statbuf.st_blksize;
unsigned blksize;
xioctl(bdev->bd_fd, BLKPBSZGET, &blksize);
return blksize;
}
bool bdev_nonrot(struct block_device *bdev)
{
struct stat statbuf = xfstat(bdev->bd_fd);
if (!S_ISBLK(statbuf.st_mode))
return false;
char *path = mprintf("/sys/dev/block/%u:%u/queue/rotational",
major(statbuf.st_rdev),
minor(statbuf.st_rdev));
u64 v = !access(path, R_OK)
? read_file_u64(AT_FDCWD, path)
: 0;
free(path);
return !v;
}
sector_t get_capacity(struct gendisk *disk)
{
struct block_device *bdev =