mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
Update bcachefs sources to 5963d1b1a4 bcacehfs: Fix bch2_get_alloc_in_memory_pos()
This commit is contained in:
parent
48eefee749
commit
934a84dfaf
@ -1 +1 @@
|
||||
ea47add37d2771a3bbb3649da466dc2e326904bc
|
||||
5963d1b1a4a31af4282e6710c7948eb215160386
|
||||
|
@ -157,6 +157,46 @@ struct mean_and_variance_weighted {
|
||||
|
||||
s64 fast_divpow2(s64 n, u8 d);
|
||||
|
||||
static inline struct mean_and_variance
|
||||
mean_and_variance_update_inlined(struct mean_and_variance s1, s64 v1)
|
||||
{
|
||||
struct mean_and_variance s2;
|
||||
u64 v2 = abs(v1);
|
||||
|
||||
s2.n = s1.n + 1;
|
||||
s2.sum = s1.sum + v1;
|
||||
s2.sum_squares = u128_add(s1.sum_squares, u128_square(v2));
|
||||
return s2;
|
||||
}
|
||||
|
||||
static inline struct mean_and_variance_weighted
|
||||
mean_and_variance_weighted_update_inlined(struct mean_and_variance_weighted s1, s64 x)
|
||||
{
|
||||
struct mean_and_variance_weighted s2;
|
||||
// previous weighted variance.
|
||||
u64 var_w0 = s1.variance;
|
||||
u8 w = s2.w = s1.w;
|
||||
// new value weighted.
|
||||
s64 x_w = x << w;
|
||||
s64 diff_w = x_w - s1.mean;
|
||||
s64 diff = fast_divpow2(diff_w, w);
|
||||
// new mean weighted.
|
||||
s64 u_w1 = s1.mean + diff;
|
||||
|
||||
BUG_ON(w % 2 != 0);
|
||||
|
||||
if (!s1.init) {
|
||||
s2.mean = x_w;
|
||||
s2.variance = 0;
|
||||
} else {
|
||||
s2.mean = u_w1;
|
||||
s2.variance = ((var_w0 << w) - var_w0 + ((diff_w * (x_w - u_w1)) >> w)) >> w;
|
||||
}
|
||||
s2.init = true;
|
||||
|
||||
return s2;
|
||||
}
|
||||
|
||||
struct mean_and_variance mean_and_variance_update(struct mean_and_variance s1, s64 v1);
|
||||
s64 mean_and_variance_get_mean(struct mean_and_variance s);
|
||||
u64 mean_and_variance_get_variance(struct mean_and_variance s1);
|
||||
|
@ -540,15 +540,17 @@ bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter
|
||||
BTREE_ITER_INTENT);
|
||||
k = bch2_btree_iter_peek_slot(iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret) {
|
||||
bch2_trans_iter_exit(trans, iter);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
if (unlikely(ret))
|
||||
goto err;
|
||||
|
||||
a = bch2_alloc_to_v4_mut_inlined(trans, k);
|
||||
if (IS_ERR(a))
|
||||
bch2_trans_iter_exit(trans, iter);
|
||||
ret = PTR_ERR_OR_ZERO(a);
|
||||
if (unlikely(ret))
|
||||
goto err;
|
||||
return a;
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, iter);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
int bch2_alloc_read(struct bch_fs *c)
|
||||
@ -1100,7 +1102,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (bkey_cmp(*discard_pos_done, iter.pos) &&
|
||||
if (!bkey_eq(*discard_pos_done, iter.pos) &&
|
||||
ca->mi.discard && !c->opts.nochanges) {
|
||||
/*
|
||||
* This works without any other locks because this is the only
|
||||
|
@ -419,7 +419,7 @@ bch2_bucket_alloc_early(struct btree_trans *trans,
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
struct bch_alloc_v4 a;
|
||||
|
||||
if (bkey_cmp(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
|
||||
if (bkey_ge(k.k->p, POS(ca->dev_idx, ca->mi.nbuckets)))
|
||||
break;
|
||||
|
||||
if (ca->new_fs_bucket_idx &&
|
||||
@ -1245,34 +1245,11 @@ struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *c, struct open_bucket *ob)
|
||||
};
|
||||
}
|
||||
|
||||
/*
|
||||
* Append pointers to the space we just allocated to @k, and mark @sectors space
|
||||
* as allocated out of @ob
|
||||
*/
|
||||
void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
|
||||
struct bkey_i *k, unsigned sectors,
|
||||
bool cached)
|
||||
|
||||
{
|
||||
struct open_bucket *ob;
|
||||
unsigned i;
|
||||
|
||||
BUG_ON(sectors > wp->sectors_free);
|
||||
wp->sectors_free -= sectors;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
|
||||
|
||||
ptr.cached = cached ||
|
||||
(!ca->mi.durability &&
|
||||
wp->data_type == BCH_DATA_user);
|
||||
|
||||
bch2_bkey_append_ptr(k, ptr);
|
||||
|
||||
BUG_ON(sectors > ob->sectors_free);
|
||||
ob->sectors_free -= sectors;
|
||||
}
|
||||
bch2_alloc_sectors_append_ptrs_inlined(c, wp, k, sectors, cached);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1281,17 +1258,7 @@ void bch2_alloc_sectors_append_ptrs(struct bch_fs *c, struct write_point *wp,
|
||||
*/
|
||||
void bch2_alloc_sectors_done(struct bch_fs *c, struct write_point *wp)
|
||||
{
|
||||
struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
|
||||
struct open_bucket *ob;
|
||||
unsigned i;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i)
|
||||
ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
|
||||
wp->ptrs = keep;
|
||||
|
||||
mutex_unlock(&wp->lock);
|
||||
|
||||
bch2_open_buckets_put(c, &ptrs);
|
||||
bch2_alloc_sectors_done_inlined(c, wp);
|
||||
}
|
||||
|
||||
static inline void writepoint_init(struct write_point *wp,
|
||||
|
@ -4,6 +4,8 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_types.h"
|
||||
#include "extents.h"
|
||||
#include "super.h"
|
||||
|
||||
#include <linux/hash.h>
|
||||
|
||||
@ -79,6 +81,21 @@ static inline void bch2_open_buckets_put(struct bch_fs *c,
|
||||
ptrs->nr = 0;
|
||||
}
|
||||
|
||||
static inline void bch2_alloc_sectors_done_inlined(struct bch_fs *c, struct write_point *wp)
|
||||
{
|
||||
struct open_buckets ptrs = { .nr = 0 }, keep = { .nr = 0 };
|
||||
struct open_bucket *ob;
|
||||
unsigned i;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i)
|
||||
ob_push(c, !ob->sectors_free ? &ptrs : &keep, ob);
|
||||
wp->ptrs = keep;
|
||||
|
||||
mutex_unlock(&wp->lock);
|
||||
|
||||
bch2_open_buckets_put(c, &ptrs);
|
||||
}
|
||||
|
||||
static inline void bch2_open_bucket_get(struct bch_fs *c,
|
||||
struct write_point *wp,
|
||||
struct open_buckets *ptrs)
|
||||
@ -147,6 +164,37 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *,
|
||||
struct write_point **);
|
||||
|
||||
struct bch_extent_ptr bch2_ob_ptr(struct bch_fs *, struct open_bucket *);
|
||||
|
||||
/*
|
||||
* Append pointers to the space we just allocated to @k, and mark @sectors space
|
||||
* as allocated out of @ob
|
||||
*/
|
||||
static inline void
|
||||
bch2_alloc_sectors_append_ptrs_inlined(struct bch_fs *c, struct write_point *wp,
|
||||
struct bkey_i *k, unsigned sectors,
|
||||
bool cached)
|
||||
{
|
||||
struct open_bucket *ob;
|
||||
unsigned i;
|
||||
|
||||
BUG_ON(sectors > wp->sectors_free);
|
||||
wp->sectors_free -= sectors;
|
||||
|
||||
open_bucket_for_each(c, &wp->ptrs, ob, i) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ob->dev);
|
||||
struct bch_extent_ptr ptr = bch2_ob_ptr(c, ob);
|
||||
|
||||
ptr.cached = cached ||
|
||||
(!ca->mi.durability &&
|
||||
wp->data_type == BCH_DATA_user);
|
||||
|
||||
bch2_bkey_append_ptr(k, ptr);
|
||||
|
||||
BUG_ON(sectors > ob->sectors_free);
|
||||
ob->sectors_free -= sectors;
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_alloc_sectors_append_ptrs(struct bch_fs *, struct write_point *,
|
||||
struct bkey_i *, unsigned, bool);
|
||||
void bch2_alloc_sectors_done(struct bch_fs *, struct write_point *);
|
||||
|
@ -76,22 +76,26 @@ struct dev_stripe_state {
|
||||
};
|
||||
|
||||
struct write_point {
|
||||
struct hlist_node node;
|
||||
struct mutex lock;
|
||||
u64 last_used;
|
||||
unsigned long write_point;
|
||||
enum bch_data_type data_type;
|
||||
struct {
|
||||
struct hlist_node node;
|
||||
struct mutex lock;
|
||||
u64 last_used;
|
||||
unsigned long write_point;
|
||||
enum bch_data_type data_type;
|
||||
|
||||
/* calculated based on how many pointers we're actually going to use: */
|
||||
unsigned sectors_free;
|
||||
/* calculated based on how many pointers we're actually going to use: */
|
||||
unsigned sectors_free;
|
||||
|
||||
struct open_buckets ptrs;
|
||||
struct dev_stripe_state stripe;
|
||||
struct open_buckets ptrs;
|
||||
struct dev_stripe_state stripe;
|
||||
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
|
||||
|
||||
struct work_struct index_update_work;
|
||||
struct {
|
||||
struct work_struct index_update_work;
|
||||
|
||||
struct list_head writes;
|
||||
spinlock_t writes_lock;
|
||||
struct list_head writes;
|
||||
spinlock_t writes_lock;
|
||||
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
|
||||
};
|
||||
|
||||
struct write_point_specifier {
|
||||
|
@ -36,7 +36,7 @@ static inline struct bpos bucket_pos_to_bp(const struct bch_fs *c,
|
||||
(bucket_to_sector(ca, bucket.offset) <<
|
||||
MAX_EXTENT_COMPRESS_RATIO_SHIFT) + bucket_offset);
|
||||
|
||||
BUG_ON(bkey_cmp(bucket, bp_pos_to_bucket(c, ret)));
|
||||
BUG_ON(!bkey_eq(bucket, bp_pos_to_bucket(c, ret)));
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -60,7 +60,7 @@ static bool extent_matches_bp(struct bch_fs *c,
|
||||
|
||||
bch2_extent_ptr_to_bp(c, btree_id, level, k, p,
|
||||
&bucket2, &bp2);
|
||||
if (!bpos_cmp(bucket, bucket2) &&
|
||||
if (bpos_eq(bucket, bucket2) &&
|
||||
!memcmp(&bp, &bp2, sizeof(bp)))
|
||||
return true;
|
||||
}
|
||||
@ -79,7 +79,7 @@ int bch2_backpointer_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
||||
return -BCH_ERR_invalid_bkey;
|
||||
}
|
||||
|
||||
if (bpos_cmp(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
|
||||
if (!bpos_eq(bp.k->p, bucket_pos_to_bp(c, bucket, bp.v->bucket_offset))) {
|
||||
prt_str(err, "backpointer at wrong pos");
|
||||
return -BCH_ERR_invalid_bkey;
|
||||
}
|
||||
@ -434,7 +434,7 @@ int bch2_get_next_backpointer(struct btree_trans *trans,
|
||||
|
||||
for_each_btree_key_norestart(trans, bp_iter, BTREE_ID_backpointers,
|
||||
bp_pos, 0, k, ret) {
|
||||
if (bpos_cmp(k.k->p, bp_end_pos) >= 0)
|
||||
if (bpos_ge(k.k->p, bp_end_pos))
|
||||
break;
|
||||
|
||||
if (k.k->type != KEY_TYPE_backpointer)
|
||||
@ -646,8 +646,8 @@ static int check_bp_exists(struct btree_trans *trans,
|
||||
struct bkey_s_c alloc_k, bp_k;
|
||||
int ret;
|
||||
|
||||
if (bpos_cmp(bucket_pos, bucket_start) < 0 ||
|
||||
bpos_cmp(bucket_pos, bucket_end) > 0)
|
||||
if (bpos_lt(bucket_pos, bucket_start) ||
|
||||
bpos_gt(bucket_pos, bucket_end))
|
||||
return 0;
|
||||
|
||||
bch2_trans_iter_init(trans, &alloc_iter, BTREE_ID_alloc, bucket_pos, 0);
|
||||
@ -900,6 +900,14 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bpos bucket_pos_to_bp_safe(const struct bch_fs *c,
|
||||
struct bpos bucket)
|
||||
{
|
||||
return bch2_dev_exists2(c, bucket.inode)
|
||||
? bucket_pos_to_bp(c, bucket, 0)
|
||||
: bucket;
|
||||
}
|
||||
|
||||
int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
|
||||
struct bpos start, struct bpos *end)
|
||||
{
|
||||
@ -913,7 +921,7 @@ int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
|
||||
bch2_trans_node_iter_init(trans, &alloc_iter, BTREE_ID_alloc,
|
||||
start, 0, 1, 0);
|
||||
bch2_trans_node_iter_init(trans, &bp_iter, BTREE_ID_backpointers,
|
||||
bucket_pos_to_bp(trans->c, start, 0), 0, 1, 0);
|
||||
bucket_pos_to_bp_safe(trans->c, start), 0, 1, 0);
|
||||
while (1) {
|
||||
alloc_k = !alloc_end
|
||||
? __bch2_btree_iter_peek_and_restart(trans, &alloc_iter, 0)
|
||||
@ -934,8 +942,8 @@ int bch2_get_alloc_in_memory_pos(struct btree_trans *trans,
|
||||
break;
|
||||
}
|
||||
|
||||
if (bpos_cmp(alloc_iter.pos, SPOS_MAX) &&
|
||||
bpos_cmp(bucket_pos_to_bp(trans->c, alloc_iter.pos, 0), bp_iter.pos) < 0) {
|
||||
if (bpos_lt(alloc_iter.pos, SPOS_MAX) &&
|
||||
bpos_lt(bucket_pos_to_bp_safe(trans->c, alloc_iter.pos), bp_iter.pos)) {
|
||||
if (!bch2_btree_iter_advance(&alloc_iter))
|
||||
alloc_end = true;
|
||||
} else {
|
||||
@ -960,11 +968,11 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (!bpos_cmp(start, POS_MIN) && bpos_cmp(end, SPOS_MAX))
|
||||
if (bpos_eq(start, POS_MIN) && !bpos_eq(end, SPOS_MAX))
|
||||
bch_verbose(c, "%s(): alloc info does not fit in ram, running in multiple passes with %zu nodes per pass",
|
||||
__func__, btree_nodes_fit_in_ram(c));
|
||||
|
||||
if (bpos_cmp(start, POS_MIN) || bpos_cmp(end, SPOS_MAX)) {
|
||||
if (!bpos_eq(start, POS_MIN) || !bpos_eq(end, SPOS_MAX)) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
|
||||
prt_str(&buf, "check_extents_to_backpointers(): ");
|
||||
@ -977,7 +985,7 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
|
||||
}
|
||||
|
||||
ret = bch2_check_extents_to_backpointers_pass(&trans, start, end);
|
||||
if (ret || !bpos_cmp(end, SPOS_MAX))
|
||||
if (ret || bpos_eq(end, SPOS_MAX))
|
||||
break;
|
||||
|
||||
start = bpos_successor(end);
|
||||
|
@ -262,14 +262,6 @@ bool bch2_bkey_transform(const struct bkey_format *out_f,
|
||||
return true;
|
||||
}
|
||||
|
||||
#define bkey_fields() \
|
||||
x(BKEY_FIELD_INODE, p.inode) \
|
||||
x(BKEY_FIELD_OFFSET, p.offset) \
|
||||
x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
|
||||
x(BKEY_FIELD_SIZE, size) \
|
||||
x(BKEY_FIELD_VERSION_HI, version.hi) \
|
||||
x(BKEY_FIELD_VERSION_LO, version.lo)
|
||||
|
||||
struct bkey __bch2_bkey_unpack_key(const struct bkey_format *format,
|
||||
const struct bkey_packed *in)
|
||||
{
|
||||
@ -505,18 +497,18 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
|
||||
le64_to_cpu(f->field_offset[BKEY_FIELD_INODE])))
|
||||
return BKEY_PACK_POS_FAIL;
|
||||
|
||||
if (!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode)) {
|
||||
if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_INODE, in.inode))) {
|
||||
in.offset = KEY_OFFSET_MAX;
|
||||
in.snapshot = KEY_SNAPSHOT_MAX;
|
||||
exact = false;
|
||||
}
|
||||
|
||||
if (!set_inc_field_lossy(&state, BKEY_FIELD_OFFSET, in.offset)) {
|
||||
if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_OFFSET, in.offset))) {
|
||||
in.snapshot = KEY_SNAPSHOT_MAX;
|
||||
exact = false;
|
||||
}
|
||||
|
||||
if (!set_inc_field_lossy(&state, BKEY_FIELD_SNAPSHOT, in.snapshot))
|
||||
if (unlikely(!set_inc_field_lossy(&state, BKEY_FIELD_SNAPSHOT, in.snapshot)))
|
||||
exact = false;
|
||||
|
||||
pack_state_finish(&state, out);
|
||||
@ -553,24 +545,6 @@ void bch2_bkey_format_init(struct bkey_format_state *s)
|
||||
s->field_min[BKEY_FIELD_SIZE] = 0;
|
||||
}
|
||||
|
||||
static void __bkey_format_add(struct bkey_format_state *s,
|
||||
unsigned field, u64 v)
|
||||
{
|
||||
s->field_min[field] = min(s->field_min[field], v);
|
||||
s->field_max[field] = max(s->field_max[field], v);
|
||||
}
|
||||
|
||||
/*
|
||||
* Changes @format so that @k can be successfully packed with @format
|
||||
*/
|
||||
void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
|
||||
{
|
||||
#define x(id, field) __bkey_format_add(s, id, k->field);
|
||||
bkey_fields()
|
||||
#undef x
|
||||
__bkey_format_add(s, BKEY_FIELD_OFFSET, bkey_start_offset(k));
|
||||
}
|
||||
|
||||
void bch2_bkey_format_add_pos(struct bkey_format_state *s, struct bpos p)
|
||||
{
|
||||
unsigned field = 0;
|
||||
|
@ -89,17 +89,6 @@ do { \
|
||||
|
||||
struct btree;
|
||||
|
||||
struct bkey_format_state {
|
||||
u64 field_min[BKEY_NR_FIELDS];
|
||||
u64 field_max[BKEY_NR_FIELDS];
|
||||
};
|
||||
|
||||
void bch2_bkey_format_init(struct bkey_format_state *);
|
||||
void bch2_bkey_format_add_key(struct bkey_format_state *, const struct bkey *);
|
||||
void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
|
||||
struct bkey_format bch2_bkey_format_done(struct bkey_format_state *);
|
||||
const char *bch2_bkey_format_validate(struct bkey_format *);
|
||||
|
||||
__pure
|
||||
unsigned bch2_bkey_greatest_differing_bit(const struct btree *,
|
||||
const struct bkey_packed *,
|
||||
@ -147,6 +136,37 @@ static inline int bkey_cmp_left_packed_byval(const struct btree *b,
|
||||
return bkey_cmp_left_packed(b, l, &r);
|
||||
}
|
||||
|
||||
static __always_inline bool bpos_eq(struct bpos l, struct bpos r)
|
||||
{
|
||||
return !((l.inode ^ r.inode) |
|
||||
(l.offset ^ r.offset) |
|
||||
(l.snapshot ^ r.snapshot));
|
||||
}
|
||||
|
||||
static __always_inline bool bpos_lt(struct bpos l, struct bpos r)
|
||||
{
|
||||
return l.inode != r.inode ? l.inode < r.inode :
|
||||
l.offset != r.offset ? l.offset < r.offset :
|
||||
l.snapshot != r.snapshot ? l.snapshot < r.snapshot : false;
|
||||
}
|
||||
|
||||
static __always_inline bool bpos_le(struct bpos l, struct bpos r)
|
||||
{
|
||||
return l.inode != r.inode ? l.inode < r.inode :
|
||||
l.offset != r.offset ? l.offset < r.offset :
|
||||
l.snapshot != r.snapshot ? l.snapshot < r.snapshot : true;
|
||||
}
|
||||
|
||||
static __always_inline bool bpos_gt(struct bpos l, struct bpos r)
|
||||
{
|
||||
return bpos_lt(r, l);
|
||||
}
|
||||
|
||||
static __always_inline bool bpos_ge(struct bpos l, struct bpos r)
|
||||
{
|
||||
return bpos_le(r, l);
|
||||
}
|
||||
|
||||
static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
|
||||
{
|
||||
return cmp_int(l.inode, r.inode) ?:
|
||||
@ -154,20 +174,60 @@ static __always_inline int bpos_cmp(struct bpos l, struct bpos r)
|
||||
cmp_int(l.snapshot, r.snapshot);
|
||||
}
|
||||
|
||||
static inline struct bpos bpos_min(struct bpos l, struct bpos r)
|
||||
{
|
||||
return bpos_lt(l, r) ? l : r;
|
||||
}
|
||||
|
||||
static inline struct bpos bpos_max(struct bpos l, struct bpos r)
|
||||
{
|
||||
return bpos_gt(l, r) ? l : r;
|
||||
}
|
||||
|
||||
static __always_inline bool bkey_eq(struct bpos l, struct bpos r)
|
||||
{
|
||||
return !((l.inode ^ r.inode) |
|
||||
(l.offset ^ r.offset));
|
||||
}
|
||||
|
||||
static __always_inline bool bkey_lt(struct bpos l, struct bpos r)
|
||||
{
|
||||
return l.inode != r.inode
|
||||
? l.inode < r.inode
|
||||
: l.offset < r.offset;
|
||||
}
|
||||
|
||||
static __always_inline bool bkey_le(struct bpos l, struct bpos r)
|
||||
{
|
||||
return l.inode != r.inode
|
||||
? l.inode < r.inode
|
||||
: l.offset <= r.offset;
|
||||
}
|
||||
|
||||
static __always_inline bool bkey_gt(struct bpos l, struct bpos r)
|
||||
{
|
||||
return bkey_lt(r, l);
|
||||
}
|
||||
|
||||
static __always_inline bool bkey_ge(struct bpos l, struct bpos r)
|
||||
{
|
||||
return bkey_le(r, l);
|
||||
}
|
||||
|
||||
static __always_inline int bkey_cmp(struct bpos l, struct bpos r)
|
||||
{
|
||||
return cmp_int(l.inode, r.inode) ?:
|
||||
cmp_int(l.offset, r.offset);
|
||||
}
|
||||
|
||||
static inline struct bpos bpos_min(struct bpos l, struct bpos r)
|
||||
static inline struct bpos bkey_min(struct bpos l, struct bpos r)
|
||||
{
|
||||
return bpos_cmp(l, r) < 0 ? l : r;
|
||||
return bkey_lt(l, r) ? l : r;
|
||||
}
|
||||
|
||||
static inline struct bpos bpos_max(struct bpos l, struct bpos r)
|
||||
static inline struct bpos bkey_max(struct bpos l, struct bpos r)
|
||||
{
|
||||
return bpos_cmp(l, r) > 0 ? l : r;
|
||||
return bkey_gt(l, r) ? l : r;
|
||||
}
|
||||
|
||||
void bch2_bpos_swab(struct bpos *);
|
||||
@ -663,4 +723,40 @@ void bch2_bkey_pack_test(void);
|
||||
static inline void bch2_bkey_pack_test(void) {}
|
||||
#endif
|
||||
|
||||
#define bkey_fields() \
|
||||
x(BKEY_FIELD_INODE, p.inode) \
|
||||
x(BKEY_FIELD_OFFSET, p.offset) \
|
||||
x(BKEY_FIELD_SNAPSHOT, p.snapshot) \
|
||||
x(BKEY_FIELD_SIZE, size) \
|
||||
x(BKEY_FIELD_VERSION_HI, version.hi) \
|
||||
x(BKEY_FIELD_VERSION_LO, version.lo)
|
||||
|
||||
struct bkey_format_state {
|
||||
u64 field_min[BKEY_NR_FIELDS];
|
||||
u64 field_max[BKEY_NR_FIELDS];
|
||||
};
|
||||
|
||||
void bch2_bkey_format_init(struct bkey_format_state *);
|
||||
|
||||
static inline void __bkey_format_add(struct bkey_format_state *s, unsigned field, u64 v)
|
||||
{
|
||||
s->field_min[field] = min(s->field_min[field], v);
|
||||
s->field_max[field] = max(s->field_max[field], v);
|
||||
}
|
||||
|
||||
/*
|
||||
* Changes @format so that @k can be successfully packed with @format
|
||||
*/
|
||||
static inline void bch2_bkey_format_add_key(struct bkey_format_state *s, const struct bkey *k)
|
||||
{
|
||||
#define x(id, field) __bkey_format_add(s, id, k->field);
|
||||
bkey_fields()
|
||||
#undef x
|
||||
__bkey_format_add(s, BKEY_FIELD_OFFSET, bkey_start_offset(k));
|
||||
}
|
||||
|
||||
void bch2_bkey_format_add_pos(struct bkey_format_state *, struct bpos);
|
||||
struct bkey_format bch2_bkey_format_done(struct bkey_format_state *);
|
||||
const char *bch2_bkey_format_validate(struct bkey_format *);
|
||||
|
||||
#endif /* _BCACHEFS_BKEY_H */
|
||||
|
@ -250,7 +250,7 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
|
||||
}
|
||||
|
||||
if (type != BKEY_TYPE_btree &&
|
||||
!bkey_cmp(k.k->p, POS_MAX)) {
|
||||
bkey_eq(k.k->p, POS_MAX)) {
|
||||
prt_printf(err, "key at POS_MAX");
|
||||
return -BCH_ERR_invalid_bkey;
|
||||
}
|
||||
@ -269,12 +269,12 @@ int bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
|
||||
int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
|
||||
struct printbuf *err)
|
||||
{
|
||||
if (bpos_cmp(k.k->p, b->data->min_key) < 0) {
|
||||
if (bpos_lt(k.k->p, b->data->min_key)) {
|
||||
prt_printf(err, "key before start of btree node");
|
||||
return -BCH_ERR_invalid_bkey;
|
||||
}
|
||||
|
||||
if (bpos_cmp(k.k->p, b->data->max_key) > 0) {
|
||||
if (bpos_gt(k.k->p, b->data->max_key)) {
|
||||
prt_printf(err, "key past end of btree node");
|
||||
return -BCH_ERR_invalid_bkey;
|
||||
}
|
||||
@ -284,11 +284,11 @@ int bch2_bkey_in_btree_node(struct btree *b, struct bkey_s_c k,
|
||||
|
||||
void bch2_bpos_to_text(struct printbuf *out, struct bpos pos)
|
||||
{
|
||||
if (!bpos_cmp(pos, POS_MIN))
|
||||
if (bpos_eq(pos, POS_MIN))
|
||||
prt_printf(out, "POS_MIN");
|
||||
else if (!bpos_cmp(pos, POS_MAX))
|
||||
else if (bpos_eq(pos, POS_MAX))
|
||||
prt_printf(out, "POS_MAX");
|
||||
else if (!bpos_cmp(pos, SPOS_MAX))
|
||||
else if (bpos_eq(pos, SPOS_MAX))
|
||||
prt_printf(out, "SPOS_MAX");
|
||||
else {
|
||||
if (pos.inode == U64_MAX)
|
||||
|
@ -60,7 +60,7 @@ static inline bool bch2_bkey_maybe_mergable(const struct bkey *l, const struct b
|
||||
{
|
||||
return l->type == r->type &&
|
||||
!bversion_cmp(l->version, r->version) &&
|
||||
!bpos_cmp(l->p, bkey_start_pos(r)) &&
|
||||
bpos_eq(l->p, bkey_start_pos(r)) &&
|
||||
(u64) l->size + r->size <= KEY_SIZE_MAX &&
|
||||
bch2_bkey_ops[l->type].key_merge &&
|
||||
!bch2_key_merging_disabled;
|
||||
|
@ -95,13 +95,12 @@ void bch2_dump_bset(struct bch_fs *c, struct btree *b,
|
||||
|
||||
n = bkey_unpack_key(b, _n);
|
||||
|
||||
if (bpos_cmp(n.p, k.k->p) < 0) {
|
||||
if (bpos_lt(n.p, k.k->p)) {
|
||||
printk(KERN_ERR "Key skipped backwards\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!bkey_deleted(k.k) &&
|
||||
!bpos_cmp(n.p, k.k->p))
|
||||
if (!bkey_deleted(k.k) && bpos_eq(n.p, k.k->p))
|
||||
printk(KERN_ERR "Duplicate keys\n");
|
||||
}
|
||||
|
||||
@ -542,7 +541,7 @@ static void bch2_bset_verify_rw_aux_tree(struct btree *b,
|
||||
goto start;
|
||||
while (1) {
|
||||
if (rw_aux_to_bkey(b, t, j) == k) {
|
||||
BUG_ON(bpos_cmp(rw_aux_tree(b, t)[j].k,
|
||||
BUG_ON(!bpos_eq(rw_aux_tree(b, t)[j].k,
|
||||
bkey_unpack_pos(b, k)));
|
||||
start:
|
||||
if (++j == t->size)
|
||||
@ -1077,7 +1076,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b,
|
||||
while (l + 1 != r) {
|
||||
unsigned m = (l + r) >> 1;
|
||||
|
||||
if (bpos_cmp(rw_aux_tree(b, t)[m].k, *search) < 0)
|
||||
if (bpos_lt(rw_aux_tree(b, t)[m].k, *search))
|
||||
l = m;
|
||||
else
|
||||
r = m;
|
||||
@ -1330,8 +1329,8 @@ void bch2_btree_node_iter_init(struct btree_node_iter *iter,
|
||||
struct bkey_packed *k[MAX_BSETS];
|
||||
unsigned i;
|
||||
|
||||
EBUG_ON(bpos_cmp(*search, b->data->min_key) < 0);
|
||||
EBUG_ON(bpos_cmp(*search, b->data->max_key) > 0);
|
||||
EBUG_ON(bpos_lt(*search, b->data->min_key));
|
||||
EBUG_ON(bpos_gt(*search, b->data->max_key));
|
||||
bset_aux_tree_verify(b);
|
||||
|
||||
memset(iter, 0, sizeof(*iter));
|
||||
|
@ -753,6 +753,12 @@ static noinline struct btree *bch2_btree_node_fill(struct bch_fs *c,
|
||||
if (IS_ERR(b))
|
||||
return b;
|
||||
|
||||
/*
|
||||
* Btree nodes read in from disk should not have the accessed bit set
|
||||
* initially, so that linear scans don't thrash the cache:
|
||||
*/
|
||||
clear_btree_node_accessed(b);
|
||||
|
||||
bkey_copy(&b->key, k);
|
||||
if (bch2_btree_node_hash_insert(bc, b, level, btree_id)) {
|
||||
/* raced with another fill: */
|
||||
@ -833,26 +839,17 @@ static inline void btree_check_header(struct bch_fs *c, struct btree *b)
|
||||
{
|
||||
if (b->c.btree_id != BTREE_NODE_ID(b->data) ||
|
||||
b->c.level != BTREE_NODE_LEVEL(b->data) ||
|
||||
bpos_cmp(b->data->max_key, b->key.k.p) ||
|
||||
!bpos_eq(b->data->max_key, b->key.k.p) ||
|
||||
(b->key.k.type == KEY_TYPE_btree_ptr_v2 &&
|
||||
bpos_cmp(b->data->min_key,
|
||||
!bpos_eq(b->data->min_key,
|
||||
bkey_i_to_btree_ptr_v2(&b->key)->v.min_key)))
|
||||
btree_bad_header(c, b);
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
|
||||
* in from disk if necessary.
|
||||
*
|
||||
* If IO is necessary and running under generic_make_request, returns -EAGAIN.
|
||||
*
|
||||
* The btree node will have either a read or a write lock held, depending on
|
||||
* the @write parameter.
|
||||
*/
|
||||
struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
|
||||
const struct bkey_i *k, unsigned level,
|
||||
enum six_lock_type lock_type,
|
||||
unsigned long trace_ip)
|
||||
static struct btree *__bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
|
||||
const struct bkey_i *k, unsigned level,
|
||||
enum six_lock_type lock_type,
|
||||
unsigned long trace_ip)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_cache *bc = &c->btree_cache;
|
||||
@ -861,18 +858,6 @@ struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *
|
||||
int ret;
|
||||
|
||||
EBUG_ON(level >= BTREE_MAX_DEPTH);
|
||||
|
||||
b = btree_node_mem_ptr(k);
|
||||
|
||||
/*
|
||||
* Check b->hash_val _before_ calling btree_node_lock() - this might not
|
||||
* be the node we want anymore, and trying to lock the wrong node could
|
||||
* cause an unneccessary transaction restart:
|
||||
*/
|
||||
if (likely(c->opts.btree_node_mem_ptr_optimization &&
|
||||
b &&
|
||||
b->hash_val == btree_ptr_hash_val(k)))
|
||||
goto lock_node;
|
||||
retry:
|
||||
b = btree_cache_find(bc, k);
|
||||
if (unlikely(!b)) {
|
||||
@ -891,35 +876,6 @@ retry:
|
||||
if (IS_ERR(b))
|
||||
return b;
|
||||
} else {
|
||||
lock_node:
|
||||
/*
|
||||
* There's a potential deadlock with splits and insertions into
|
||||
* interior nodes we have to avoid:
|
||||
*
|
||||
* The other thread might be holding an intent lock on the node
|
||||
* we want, and they want to update its parent node so they're
|
||||
* going to upgrade their intent lock on the parent node to a
|
||||
* write lock.
|
||||
*
|
||||
* But if we're holding a read lock on the parent, and we're
|
||||
* trying to get the intent lock they're holding, we deadlock.
|
||||
*
|
||||
* So to avoid this we drop the read locks on parent nodes when
|
||||
* we're starting to take intent locks - and handle the race.
|
||||
*
|
||||
* The race is that they might be about to free the node we
|
||||
* want, and dropping our read lock on the parent node lets them
|
||||
* update the parent marking the node we want as freed, and then
|
||||
* free it:
|
||||
*
|
||||
* To guard against this, btree nodes are evicted from the cache
|
||||
* when they're freed - and b->hash_val is zeroed out, which we
|
||||
* check for after we lock the node.
|
||||
*
|
||||
* Then, bch2_btree_node_relock() on the parent will fail - because
|
||||
* the parent was modified, when the pointer to the node we want
|
||||
* was removed - and we'll bail out:
|
||||
*/
|
||||
if (btree_node_read_locked(path, level + 1))
|
||||
btree_node_unlock(trans, path, level + 1);
|
||||
|
||||
@ -939,6 +895,10 @@ lock_node:
|
||||
trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
|
||||
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
|
||||
}
|
||||
|
||||
/* avoid atomic set bit if it's not needed: */
|
||||
if (!btree_node_accessed(b))
|
||||
set_btree_node_accessed(b);
|
||||
}
|
||||
|
||||
if (unlikely(btree_node_read_in_flight(b))) {
|
||||
@ -976,6 +936,106 @@ lock_node:
|
||||
prefetch(p + L1_CACHE_BYTES * 2);
|
||||
}
|
||||
|
||||
if (unlikely(btree_node_read_error(b))) {
|
||||
six_unlock_type(&b->c.lock, lock_type);
|
||||
return ERR_PTR(-EIO);
|
||||
}
|
||||
|
||||
EBUG_ON(b->c.btree_id != path->btree_id);
|
||||
EBUG_ON(BTREE_NODE_LEVEL(b->data) != level);
|
||||
btree_check_header(c, b);
|
||||
|
||||
return b;
|
||||
}
|
||||
|
||||
/**
|
||||
* bch_btree_node_get - find a btree node in the cache and lock it, reading it
|
||||
* in from disk if necessary.
|
||||
*
|
||||
* If IO is necessary and running under generic_make_request, returns -EAGAIN.
|
||||
*
|
||||
* The btree node will have either a read or a write lock held, depending on
|
||||
* the @write parameter.
|
||||
*/
|
||||
struct btree *bch2_btree_node_get(struct btree_trans *trans, struct btree_path *path,
|
||||
const struct bkey_i *k, unsigned level,
|
||||
enum six_lock_type lock_type,
|
||||
unsigned long trace_ip)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree *b;
|
||||
struct bset_tree *t;
|
||||
int ret;
|
||||
|
||||
EBUG_ON(level >= BTREE_MAX_DEPTH);
|
||||
|
||||
b = btree_node_mem_ptr(k);
|
||||
|
||||
/*
|
||||
* Check b->hash_val _before_ calling btree_node_lock() - this might not
|
||||
* be the node we want anymore, and trying to lock the wrong node could
|
||||
* cause an unneccessary transaction restart:
|
||||
*/
|
||||
if (unlikely(!c->opts.btree_node_mem_ptr_optimization ||
|
||||
!b ||
|
||||
b->hash_val != btree_ptr_hash_val(k)))
|
||||
return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
|
||||
|
||||
if (btree_node_read_locked(path, level + 1))
|
||||
btree_node_unlock(trans, path, level + 1);
|
||||
|
||||
ret = btree_node_lock(trans, path, &b->c, level, lock_type, trace_ip);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
BUG_ON(ret);
|
||||
|
||||
if (unlikely(b->hash_val != btree_ptr_hash_val(k) ||
|
||||
b->c.level != level ||
|
||||
race_fault())) {
|
||||
six_unlock_type(&b->c.lock, lock_type);
|
||||
if (bch2_btree_node_relock(trans, path, level + 1))
|
||||
return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
|
||||
|
||||
trace_and_count(c, trans_restart_btree_node_reused, trans, trace_ip, path);
|
||||
return ERR_PTR(btree_trans_restart(trans, BCH_ERR_transaction_restart_lock_node_reused));
|
||||
}
|
||||
|
||||
if (unlikely(btree_node_read_in_flight(b))) {
|
||||
u32 seq = b->c.lock.state.seq;
|
||||
|
||||
six_unlock_type(&b->c.lock, lock_type);
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
bch2_btree_node_wait_on_read(b);
|
||||
|
||||
/*
|
||||
* should_be_locked is not set on this path yet, so we need to
|
||||
* relock it specifically:
|
||||
*/
|
||||
if (trans) {
|
||||
int ret = bch2_trans_relock(trans) ?:
|
||||
bch2_btree_path_relock_intent(trans, path);
|
||||
if (ret) {
|
||||
BUG_ON(!trans->restarted);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (!six_relock_type(&b->c.lock, lock_type, seq))
|
||||
return __bch2_btree_node_get(trans, path, k, level, lock_type, trace_ip);
|
||||
}
|
||||
|
||||
prefetch(b->aux_data);
|
||||
|
||||
for_each_bset(b, t) {
|
||||
void *p = (u64 *) b->aux_data + t->aux_data_offset;
|
||||
|
||||
prefetch(p + L1_CACHE_BYTES * 0);
|
||||
prefetch(p + L1_CACHE_BYTES * 1);
|
||||
prefetch(p + L1_CACHE_BYTES * 2);
|
||||
}
|
||||
|
||||
/* avoid atomic set bit if it's not needed: */
|
||||
if (!btree_node_accessed(b))
|
||||
set_btree_node_accessed(b);
|
||||
|
@ -76,7 +76,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
|
||||
if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
|
||||
struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
|
||||
|
||||
if (bpos_cmp(expected_start, bp->v.min_key)) {
|
||||
if (!bpos_eq(expected_start, bp->v.min_key)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
if (bkey_deleted(&prev->k->k)) {
|
||||
@ -106,7 +106,7 @@ static int bch2_gc_check_topology(struct bch_fs *c,
|
||||
}
|
||||
}
|
||||
|
||||
if (is_last && bpos_cmp(cur.k->k.p, node_end)) {
|
||||
if (is_last && !bpos_eq(cur.k->k.p, node_end)) {
|
||||
bch2_topology_error(c);
|
||||
|
||||
printbuf_reset(&buf1);
|
||||
@ -274,12 +274,12 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
|
||||
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
|
||||
|
||||
if (prev &&
|
||||
bpos_cmp(expected_start, cur->data->min_key) > 0 &&
|
||||
bpos_gt(expected_start, cur->data->min_key) &&
|
||||
BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
|
||||
/* cur overwrites prev: */
|
||||
|
||||
if (mustfix_fsck_err_on(bpos_cmp(prev->data->min_key,
|
||||
cur->data->min_key) >= 0, c,
|
||||
if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
|
||||
cur->data->min_key), c,
|
||||
"btree node overwritten by next node at btree %s level %u:\n"
|
||||
" node %s\n"
|
||||
" next %s",
|
||||
@ -289,7 +289,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mustfix_fsck_err_on(bpos_cmp(prev->key.k.p,
|
||||
if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
|
||||
bpos_predecessor(cur->data->min_key)), c,
|
||||
"btree node with incorrect max_key at btree %s level %u:\n"
|
||||
" node %s\n"
|
||||
@ -301,8 +301,8 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
|
||||
} else {
|
||||
/* prev overwrites cur: */
|
||||
|
||||
if (mustfix_fsck_err_on(bpos_cmp(expected_start,
|
||||
cur->data->max_key) >= 0, c,
|
||||
if (mustfix_fsck_err_on(bpos_ge(expected_start,
|
||||
cur->data->max_key), c,
|
||||
"btree node overwritten by prev node at btree %s level %u:\n"
|
||||
" prev %s\n"
|
||||
" node %s",
|
||||
@ -312,7 +312,7 @@ static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (mustfix_fsck_err_on(bpos_cmp(expected_start, cur->data->min_key), c,
|
||||
if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
|
||||
"btree node with incorrect min_key at btree %s level %u:\n"
|
||||
" prev %s\n"
|
||||
" node %s",
|
||||
@ -336,7 +336,7 @@ static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
|
||||
bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
|
||||
bch2_bpos_to_text(&buf2, b->key.k.p);
|
||||
|
||||
if (mustfix_fsck_err_on(bpos_cmp(child->key.k.p, b->key.k.p), c,
|
||||
if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
|
||||
"btree node with incorrect max_key at btree %s level %u:\n"
|
||||
" %s\n"
|
||||
" expected %s",
|
||||
@ -374,8 +374,8 @@ again:
|
||||
bch2_btree_and_journal_iter_init_node_iter(&iter, c, b);
|
||||
|
||||
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
||||
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
|
||||
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
|
||||
BUG_ON(bpos_lt(k.k->p, b->data->min_key));
|
||||
BUG_ON(bpos_gt(k.k->p, b->data->max_key));
|
||||
|
||||
bch2_btree_and_journal_iter_advance(&iter);
|
||||
bch2_bkey_buf_reassemble(&cur_k, c, k);
|
||||
@ -912,8 +912,8 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
bkey_init(&prev.k->k);
|
||||
|
||||
while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
|
||||
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
|
||||
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
|
||||
BUG_ON(bpos_lt(k.k->p, b->data->min_key));
|
||||
BUG_ON(bpos_gt(k.k->p, b->data->max_key));
|
||||
|
||||
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
|
||||
false, &k, true);
|
||||
@ -1018,7 +1018,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
|
||||
six_lock_read(&b->c.lock, NULL, NULL);
|
||||
printbuf_reset(&buf);
|
||||
bch2_bpos_to_text(&buf, b->data->min_key);
|
||||
if (mustfix_fsck_err_on(bpos_cmp(b->data->min_key, POS_MIN), c,
|
||||
if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
|
||||
"btree root with incorrect min_key: %s", buf.buf)) {
|
||||
bch_err(c, "repair unimplemented");
|
||||
ret = -BCH_ERR_fsck_repair_unimplemented;
|
||||
@ -1027,7 +1027,7 @@ static int bch2_gc_btree_init(struct btree_trans *trans,
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_bpos_to_text(&buf, b->data->max_key);
|
||||
if (mustfix_fsck_err_on(bpos_cmp(b->data->max_key, SPOS_MAX), c,
|
||||
if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
|
||||
"btree root with incorrect max_key: %s", buf.buf)) {
|
||||
bch_err(c, "repair unimplemented");
|
||||
ret = -BCH_ERR_fsck_repair_unimplemented;
|
||||
@ -1344,7 +1344,7 @@ static int bch2_alloc_write_key(struct btree_trans *trans,
|
||||
enum bch_data_type type;
|
||||
int ret;
|
||||
|
||||
if (bkey_cmp(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
|
||||
if (bkey_ge(iter->pos, POS(ca->dev_idx, ca->mi.nbuckets)))
|
||||
return 1;
|
||||
|
||||
bch2_alloc_to_v4(k, &old);
|
||||
|
@ -83,7 +83,7 @@ static void verify_no_dups(struct btree *b,
|
||||
struct bkey l = bkey_unpack_key(b, p);
|
||||
struct bkey r = bkey_unpack_key(b, k);
|
||||
|
||||
BUG_ON(bpos_cmp(l.p, bkey_start_pos(&r)) >= 0);
|
||||
BUG_ON(bpos_ge(l.p, bkey_start_pos(&r)));
|
||||
}
|
||||
#endif
|
||||
}
|
||||
@ -650,8 +650,8 @@ void bch2_btree_node_drop_keys_outside_node(struct btree *b)
|
||||
bch2_btree_build_aux_trees(b);
|
||||
|
||||
for_each_btree_node_key_unpack(b, k, &iter, &unpacked) {
|
||||
BUG_ON(bpos_cmp(k.k->p, b->data->min_key) < 0);
|
||||
BUG_ON(bpos_cmp(k.k->p, b->data->max_key) > 0);
|
||||
BUG_ON(bpos_lt(k.k->p, b->data->min_key));
|
||||
BUG_ON(bpos_gt(k.k->p, b->data->max_key));
|
||||
}
|
||||
}
|
||||
|
||||
@ -749,7 +749,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
|
||||
b->data->max_key = b->key.k.p;
|
||||
}
|
||||
|
||||
btree_err_on(bpos_cmp(b->data->min_key, bp->min_key),
|
||||
btree_err_on(!bpos_eq(b->data->min_key, bp->min_key),
|
||||
BTREE_ERR_MUST_RETRY, c, ca, b, NULL,
|
||||
"incorrect min_key: got %s should be %s",
|
||||
(printbuf_reset(&buf1),
|
||||
@ -758,7 +758,7 @@ static int validate_bset(struct bch_fs *c, struct bch_dev *ca,
|
||||
bch2_bpos_to_text(&buf2, bp->min_key), buf2.buf));
|
||||
}
|
||||
|
||||
btree_err_on(bpos_cmp(bn->max_key, b->key.k.p),
|
||||
btree_err_on(!bpos_eq(bn->max_key, b->key.k.p),
|
||||
BTREE_ERR_MUST_RETRY, c, ca, b, i,
|
||||
"incorrect max key %s",
|
||||
(printbuf_reset(&buf1),
|
||||
|
@ -201,7 +201,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
|
||||
{
|
||||
if (version < bcachefs_metadata_version_inode_btree_change &&
|
||||
btree_node_type_is_extents(btree_id) &&
|
||||
bpos_cmp(bn->min_key, POS_MIN) &&
|
||||
!bpos_eq(bn->min_key, POS_MIN) &&
|
||||
write)
|
||||
bn->min_key = bpos_nosnap_predecessor(bn->min_key);
|
||||
|
||||
@ -218,7 +218,7 @@ static inline void compat_btree_node(unsigned level, enum btree_id btree_id,
|
||||
|
||||
if (version < bcachefs_metadata_version_inode_btree_change &&
|
||||
btree_node_type_is_extents(btree_id) &&
|
||||
bpos_cmp(bn->min_key, POS_MIN) &&
|
||||
!bpos_eq(bn->min_key, POS_MIN) &&
|
||||
!write)
|
||||
bn->min_key = bpos_nosnap_successor(bn->min_key);
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
|
||||
struct bpos pos = iter->pos;
|
||||
|
||||
if ((iter->flags & BTREE_ITER_IS_EXTENTS) &&
|
||||
bkey_cmp(pos, POS_MAX))
|
||||
!bkey_eq(pos, POS_MAX))
|
||||
pos = bkey_successor(iter, pos);
|
||||
return pos;
|
||||
}
|
||||
@ -115,13 +115,13 @@ static inline struct bpos btree_iter_search_key(struct btree_iter *iter)
|
||||
static inline bool btree_path_pos_before_node(struct btree_path *path,
|
||||
struct btree *b)
|
||||
{
|
||||
return bpos_cmp(path->pos, b->data->min_key) < 0;
|
||||
return bpos_lt(path->pos, b->data->min_key);
|
||||
}
|
||||
|
||||
static inline bool btree_path_pos_after_node(struct btree_path *path,
|
||||
struct btree *b)
|
||||
{
|
||||
return bpos_cmp(b->key.k.p, path->pos) < 0;
|
||||
return bpos_gt(path->pos, b->key.k.p);
|
||||
}
|
||||
|
||||
static inline bool btree_path_pos_in_node(struct btree_path *path,
|
||||
@ -147,7 +147,7 @@ static void bch2_btree_path_verify_cached(struct btree_trans *trans,
|
||||
|
||||
ck = (void *) path->l[0].b;
|
||||
BUG_ON(ck->key.btree_id != path->btree_id ||
|
||||
bkey_cmp(ck->key.pos, path->pos));
|
||||
!bkey_eq(ck->key.pos, path->pos));
|
||||
|
||||
if (!locked)
|
||||
btree_node_unlock(trans, path, 0);
|
||||
@ -292,8 +292,8 @@ static void bch2_btree_iter_verify_entry_exit(struct btree_iter *iter)
|
||||
BUG_ON(!(iter->flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
iter->pos.snapshot != iter->snapshot);
|
||||
|
||||
BUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&iter->k)) < 0 ||
|
||||
bkey_cmp(iter->pos, iter->k.p) > 0);
|
||||
BUG_ON(bkey_lt(iter->pos, bkey_start_pos(&iter->k)) ||
|
||||
bkey_gt(iter->pos, iter->k.p));
|
||||
}
|
||||
|
||||
static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k)
|
||||
@ -327,7 +327,7 @@ static int bch2_btree_iter_verify_ret(struct btree_iter *iter, struct bkey_s_c k
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (!bkey_cmp(prev.k->p, k.k->p) &&
|
||||
if (bkey_eq(prev.k->p, k.k->p) &&
|
||||
bch2_snapshot_is_ancestor(trans->c, iter->snapshot,
|
||||
prev.k->p.snapshot) > 0) {
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
@ -367,11 +367,11 @@ void bch2_assert_pos_locked(struct btree_trans *trans, enum btree_id id,
|
||||
continue;
|
||||
|
||||
if (!key_cache) {
|
||||
if (bkey_cmp(pos, path->l[0].b->data->min_key) >= 0 &&
|
||||
bkey_cmp(pos, path->l[0].b->key.k.p) <= 0)
|
||||
if (bkey_ge(pos, path->l[0].b->data->min_key) &&
|
||||
bkey_le(pos, path->l[0].b->key.k.p))
|
||||
return;
|
||||
} else {
|
||||
if (!bkey_cmp(pos, path->pos))
|
||||
if (bkey_eq(pos, path->pos))
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -888,7 +888,6 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
|
||||
struct btree *b;
|
||||
unsigned level = path->level - 1;
|
||||
enum six_lock_type lock_type = __btree_lock_want(path, level);
|
||||
bool replay_done = test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags);
|
||||
struct bkey_buf tmp;
|
||||
int ret;
|
||||
|
||||
@ -896,7 +895,7 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
|
||||
|
||||
bch2_bkey_buf_init(&tmp);
|
||||
|
||||
if (unlikely(!replay_done)) {
|
||||
if (unlikely(trans->journal_replay_not_finished)) {
|
||||
ret = btree_node_iter_and_journal_peek(trans, path, flags, &tmp);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -916,7 +915,8 @@ static __always_inline int btree_path_down(struct btree_trans *trans,
|
||||
if (unlikely(ret))
|
||||
goto err;
|
||||
|
||||
if (likely(replay_done && tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
|
||||
if (likely(!trans->journal_replay_not_finished &&
|
||||
tmp.k->k.type == KEY_TYPE_btree_ptr_v2) &&
|
||||
unlikely(b != btree_node_mem_ptr(tmp.k)))
|
||||
btree_node_mem_ptr_set(trans, path, level + 1, b);
|
||||
|
||||
@ -1484,7 +1484,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
|
||||
bool intent = flags & BTREE_ITER_INTENT;
|
||||
int i;
|
||||
|
||||
BUG_ON(trans->restarted);
|
||||
EBUG_ON(trans->restarted);
|
||||
btree_trans_verify_sorted(trans);
|
||||
bch2_trans_verify_locks(trans);
|
||||
|
||||
@ -1564,16 +1564,16 @@ struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *
|
||||
_k = bch2_btree_node_iter_peek_all(&l->iter, l->b);
|
||||
k = _k ? bkey_disassemble(l->b, _k, u) : bkey_s_c_null;
|
||||
|
||||
EBUG_ON(k.k && bkey_deleted(k.k) && bpos_cmp(k.k->p, path->pos) == 0);
|
||||
EBUG_ON(k.k && bkey_deleted(k.k) && bpos_eq(k.k->p, path->pos));
|
||||
|
||||
if (!k.k || bpos_cmp(path->pos, k.k->p))
|
||||
if (!k.k || !bpos_eq(path->pos, k.k->p))
|
||||
goto hole;
|
||||
} else {
|
||||
struct bkey_cached *ck = (void *) path->l[0].b;
|
||||
|
||||
EBUG_ON(ck &&
|
||||
(path->btree_id != ck->key.btree_id ||
|
||||
bkey_cmp(path->pos, ck->key.pos)));
|
||||
!bkey_eq(path->pos, ck->key.pos)));
|
||||
EBUG_ON(!ck || !ck->valid);
|
||||
|
||||
*u = ck->k->k;
|
||||
@ -1632,7 +1632,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
|
||||
if (!b)
|
||||
goto out;
|
||||
|
||||
BUG_ON(bpos_cmp(b->key.k.p, iter->pos) < 0);
|
||||
BUG_ON(bpos_lt(b->key.k.p, iter->pos));
|
||||
|
||||
bkey_init(&iter->k);
|
||||
iter->k.p = iter->pos = b->key.k.p;
|
||||
@ -1684,7 +1684,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
|
||||
b = btree_path_node(path, path->level + 1);
|
||||
|
||||
if (!bpos_cmp(iter->pos, b->key.k.p)) {
|
||||
if (bpos_eq(iter->pos, b->key.k.p)) {
|
||||
__btree_path_set_level_up(trans, path, path->level++);
|
||||
} else {
|
||||
/*
|
||||
@ -1729,9 +1729,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
|
||||
{
|
||||
if (likely(!(iter->flags & BTREE_ITER_ALL_LEVELS))) {
|
||||
struct bpos pos = iter->k.p;
|
||||
bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
|
||||
? bpos_cmp(pos, SPOS_MAX)
|
||||
: bkey_cmp(pos, SPOS_MAX)) != 0;
|
||||
bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
|
||||
? bpos_eq(pos, SPOS_MAX)
|
||||
: bkey_eq(pos, SPOS_MAX));
|
||||
|
||||
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
pos = bkey_successor(iter, pos);
|
||||
@ -1749,9 +1749,9 @@ inline bool bch2_btree_iter_advance(struct btree_iter *iter)
|
||||
inline bool bch2_btree_iter_rewind(struct btree_iter *iter)
|
||||
{
|
||||
struct bpos pos = bkey_start_pos(&iter->k);
|
||||
bool ret = (iter->flags & BTREE_ITER_ALL_SNAPSHOTS
|
||||
? bpos_cmp(pos, POS_MIN)
|
||||
: bkey_cmp(pos, POS_MIN)) != 0;
|
||||
bool ret = !(iter->flags & BTREE_ITER_ALL_SNAPSHOTS
|
||||
? bpos_eq(pos, POS_MIN)
|
||||
: bkey_eq(pos, POS_MIN));
|
||||
|
||||
if (ret && !(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
pos = bkey_predecessor(iter, pos);
|
||||
@ -1771,11 +1771,11 @@ static inline struct bkey_i *btree_trans_peek_updates(struct btree_trans *trans,
|
||||
continue;
|
||||
if (i->btree_id > btree_id)
|
||||
break;
|
||||
if (bpos_cmp(i->k->k.p, pos) < 0)
|
||||
if (bpos_lt(i->k->k.p, pos))
|
||||
continue;
|
||||
if (i->key_cache_already_flushed)
|
||||
continue;
|
||||
if (!ret || bpos_cmp(i->k->k.p, ret->k.p) < 0)
|
||||
if (!ret || bpos_lt(i->k->k.p, ret->k.p))
|
||||
ret = i->k;
|
||||
}
|
||||
|
||||
@ -1789,7 +1789,7 @@ struct bkey_i *bch2_btree_journal_peek(struct btree_trans *trans,
|
||||
{
|
||||
struct bkey_i *k;
|
||||
|
||||
if (bpos_cmp(start_pos, iter->journal_pos) < 0)
|
||||
if (bpos_lt(start_pos, iter->journal_pos))
|
||||
iter->journal_idx = 0;
|
||||
|
||||
k = bch2_journal_keys_peek_upto(trans->c, iter->btree_id,
|
||||
@ -1924,8 +1924,8 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
? btree_trans_peek_updates(trans, iter->btree_id, search_key)
|
||||
: NULL;
|
||||
if (next_update &&
|
||||
bpos_cmp(next_update->k.p,
|
||||
k.k ? k.k->p : l->b->key.k.p) <= 0) {
|
||||
bpos_le(next_update->k.p,
|
||||
k.k ? k.k->p : l->b->key.k.p)) {
|
||||
iter->k = next_update->k;
|
||||
k = bkey_i_to_s_c(next_update);
|
||||
}
|
||||
@ -1938,7 +1938,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
* whiteout, with a real key at the same position, since
|
||||
* in the btree deleted keys sort before non deleted.
|
||||
*/
|
||||
search_key = bpos_cmp(search_key, k.k->p)
|
||||
search_key = !bpos_eq(search_key, k.k->p)
|
||||
? k.k->p
|
||||
: bpos_successor(k.k->p);
|
||||
continue;
|
||||
@ -1946,7 +1946,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
|
||||
if (likely(k.k)) {
|
||||
break;
|
||||
} else if (likely(bpos_cmp(l->b->key.k.p, SPOS_MAX))) {
|
||||
} else if (likely(!bpos_eq(l->b->key.k.p, SPOS_MAX))) {
|
||||
/* Advance to next leaf node: */
|
||||
search_key = bpos_successor(l->b->key.k.p);
|
||||
} else {
|
||||
@ -1996,19 +1996,17 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
*/
|
||||
if (!(iter->flags & BTREE_ITER_IS_EXTENTS))
|
||||
iter_pos = k.k->p;
|
||||
else if (bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0)
|
||||
iter_pos = bkey_start_pos(k.k);
|
||||
else
|
||||
iter_pos = iter->pos;
|
||||
iter_pos = bkey_max(iter->pos, bkey_start_pos(k.k));
|
||||
|
||||
if (bkey_cmp(iter_pos, end) > 0) {
|
||||
if (bkey_gt(iter_pos, end)) {
|
||||
bch2_btree_iter_set_pos(iter, end);
|
||||
k = bkey_s_c_null;
|
||||
goto out_no_locked;
|
||||
}
|
||||
|
||||
if (iter->update_path &&
|
||||
bkey_cmp(iter->update_path->pos, k.k->p)) {
|
||||
!bkey_eq(iter->update_path->pos, k.k->p)) {
|
||||
bch2_path_put_nokeep(trans, iter->update_path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->update_path = NULL;
|
||||
@ -2134,7 +2132,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
|
||||
/* Check if we should go up to the parent node: */
|
||||
if (!k.k ||
|
||||
(iter->advanced &&
|
||||
!bpos_cmp(path_l(iter->path)->b->key.k.p, iter->pos))) {
|
||||
bpos_eq(path_l(iter->path)->b->key.k.p, iter->pos))) {
|
||||
iter->pos = path_l(iter->path)->b->key.k.p;
|
||||
btree_path_set_level_up(trans, iter->path);
|
||||
iter->advanced = false;
|
||||
@ -2150,7 +2148,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
|
||||
if (iter->path->level != iter->min_depth &&
|
||||
(iter->advanced ||
|
||||
!k.k ||
|
||||
bpos_cmp(iter->pos, k.k->p))) {
|
||||
!bpos_eq(iter->pos, k.k->p))) {
|
||||
btree_path_set_level_down(trans, iter->path, iter->min_depth);
|
||||
iter->pos = bpos_successor(iter->pos);
|
||||
iter->advanced = false;
|
||||
@ -2161,7 +2159,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
|
||||
if (iter->path->level == iter->min_depth &&
|
||||
iter->advanced &&
|
||||
k.k &&
|
||||
!bpos_cmp(iter->pos, k.k->p)) {
|
||||
bpos_eq(iter->pos, k.k->p)) {
|
||||
iter->pos = bpos_successor(iter->pos);
|
||||
iter->advanced = false;
|
||||
continue;
|
||||
@ -2169,7 +2167,7 @@ struct bkey_s_c bch2_btree_iter_peek_all_levels(struct btree_iter *iter)
|
||||
|
||||
if (iter->advanced &&
|
||||
iter->path->level == iter->min_depth &&
|
||||
bpos_cmp(k.k->p, iter->pos))
|
||||
!bpos_eq(k.k->p, iter->pos))
|
||||
iter->advanced = false;
|
||||
|
||||
BUG_ON(iter->advanced);
|
||||
@ -2240,8 +2238,8 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
&iter->path->l[0], &iter->k);
|
||||
if (!k.k ||
|
||||
((iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
? bpos_cmp(bkey_start_pos(k.k), search_key) >= 0
|
||||
: bpos_cmp(k.k->p, search_key) > 0))
|
||||
? bpos_ge(bkey_start_pos(k.k), search_key)
|
||||
: bpos_gt(k.k->p, search_key)))
|
||||
k = btree_path_level_prev(trans, iter->path,
|
||||
&iter->path->l[0], &iter->k);
|
||||
|
||||
@ -2257,7 +2255,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
* longer at the same _key_ (not pos), return
|
||||
* that candidate
|
||||
*/
|
||||
if (saved_path && bkey_cmp(k.k->p, saved_k.p)) {
|
||||
if (saved_path && !bkey_eq(k.k->p, saved_k.p)) {
|
||||
bch2_path_put_nokeep(trans, iter->path,
|
||||
iter->flags & BTREE_ITER_INTENT);
|
||||
iter->path = saved_path;
|
||||
@ -2292,7 +2290,7 @@ got_key:
|
||||
}
|
||||
|
||||
break;
|
||||
} else if (likely(bpos_cmp(iter->path->l[0].b->data->min_key, POS_MIN))) {
|
||||
} else if (likely(!bpos_eq(iter->path->l[0].b->data->min_key, POS_MIN))) {
|
||||
/* Advance to previous leaf node: */
|
||||
search_key = bpos_predecessor(iter->path->l[0].b->data->min_key);
|
||||
} else {
|
||||
@ -2303,10 +2301,10 @@ got_key:
|
||||
}
|
||||
}
|
||||
|
||||
EBUG_ON(bkey_cmp(bkey_start_pos(k.k), iter->pos) > 0);
|
||||
EBUG_ON(bkey_gt(bkey_start_pos(k.k), iter->pos));
|
||||
|
||||
/* Extents can straddle iter->pos: */
|
||||
if (bkey_cmp(k.k->p, iter->pos) < 0)
|
||||
if (bkey_lt(k.k->p, iter->pos))
|
||||
iter->pos = k.k->p;
|
||||
|
||||
if (iter->flags & BTREE_ITER_FILTER_SNAPSHOTS)
|
||||
@ -2374,7 +2372,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
if ((iter->flags & BTREE_ITER_WITH_UPDATES) &&
|
||||
(next_update = btree_trans_peek_updates(trans,
|
||||
iter->btree_id, search_key)) &&
|
||||
!bpos_cmp(next_update->k.p, iter->pos)) {
|
||||
bpos_eq(next_update->k.p, iter->pos)) {
|
||||
iter->k = next_update->k;
|
||||
k = bkey_i_to_s_c(next_update);
|
||||
goto out;
|
||||
@ -2434,7 +2432,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
|
||||
next = k.k ? bkey_start_pos(k.k) : POS_MAX;
|
||||
|
||||
if (bkey_cmp(iter->pos, next) < 0) {
|
||||
if (bkey_lt(iter->pos, next)) {
|
||||
bkey_init(&iter->k);
|
||||
iter->k.p = iter->pos;
|
||||
|
||||
@ -2594,9 +2592,14 @@ static inline void btree_path_list_remove(struct btree_trans *trans,
|
||||
unsigned i;
|
||||
|
||||
EBUG_ON(path->sorted_idx >= trans->nr_sorted);
|
||||
|
||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
trans->nr_sorted--;
|
||||
memmove_u64s_down_small(trans->sorted + path->sorted_idx,
|
||||
trans->sorted + path->sorted_idx + 1,
|
||||
DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
|
||||
#else
|
||||
array_remove_item(trans->sorted, trans->nr_sorted, path->sorted_idx);
|
||||
|
||||
#endif
|
||||
for (i = path->sorted_idx; i < trans->nr_sorted; i++)
|
||||
trans->paths[trans->sorted[i]].sorted_idx = i;
|
||||
|
||||
@ -2615,12 +2618,20 @@ static inline void btree_path_list_add(struct btree_trans *trans,
|
||||
|
||||
path->sorted_idx = pos ? pos->sorted_idx + 1 : 0;
|
||||
|
||||
if (trans->in_traverse_all &&
|
||||
if (unlikely(trans->in_traverse_all) &&
|
||||
trans->traverse_all_idx != U8_MAX &&
|
||||
trans->traverse_all_idx >= path->sorted_idx)
|
||||
trans->traverse_all_idx++;
|
||||
|
||||
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
||||
memmove_u64s_up_small(trans->sorted + path->sorted_idx + 1,
|
||||
trans->sorted + path->sorted_idx,
|
||||
DIV_ROUND_UP(trans->nr_sorted - path->sorted_idx, 8));
|
||||
trans->nr_sorted++;
|
||||
trans->sorted[path->sorted_idx] = path->idx;
|
||||
#else
|
||||
array_insert_item(trans->sorted, trans->nr_sorted, path->sorted_idx, path->idx);
|
||||
#endif
|
||||
|
||||
for (i = path->sorted_idx; i < trans->nr_sorted; i++)
|
||||
trans->paths[trans->sorted[i]].sorted_idx = i;
|
||||
@ -2644,72 +2655,24 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
|
||||
iter->key_cache_path = NULL;
|
||||
}
|
||||
|
||||
static inline void __bch2_trans_iter_init(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
unsigned btree_id, struct bpos pos,
|
||||
unsigned locks_want,
|
||||
unsigned depth,
|
||||
unsigned flags,
|
||||
unsigned long ip)
|
||||
{
|
||||
if (unlikely(trans->restarted))
|
||||
panic("bch2_trans_iter_init(): in transaction restart, %s by %pS\n",
|
||||
bch2_err_str(trans->restarted),
|
||||
(void *) trans->last_restarted_ip);
|
||||
|
||||
if (flags & BTREE_ITER_ALL_LEVELS)
|
||||
flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
|
||||
|
||||
if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
|
||||
btree_node_type_is_extents(btree_id))
|
||||
flags |= BTREE_ITER_IS_EXTENTS;
|
||||
|
||||
if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
!btree_type_has_snapshots(btree_id))
|
||||
flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
|
||||
|
||||
if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
btree_type_has_snapshots(btree_id))
|
||||
flags |= BTREE_ITER_FILTER_SNAPSHOTS;
|
||||
|
||||
if (trans->journal_replay_not_finished)
|
||||
flags |= BTREE_ITER_WITH_JOURNAL;
|
||||
|
||||
iter->trans = trans;
|
||||
iter->path = NULL;
|
||||
iter->update_path = NULL;
|
||||
iter->key_cache_path = NULL;
|
||||
iter->btree_id = btree_id;
|
||||
iter->min_depth = depth;
|
||||
iter->flags = flags;
|
||||
iter->snapshot = pos.snapshot;
|
||||
iter->pos = pos;
|
||||
iter->k.type = KEY_TYPE_deleted;
|
||||
iter->k.p = pos;
|
||||
iter->k.size = 0;
|
||||
iter->journal_idx = 0;
|
||||
iter->journal_pos = POS_MIN;
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
iter->ip_allocated = ip;
|
||||
#endif
|
||||
|
||||
iter->path = bch2_path_get(trans, btree_id, iter->pos,
|
||||
locks_want, depth, flags, ip);
|
||||
}
|
||||
|
||||
void bch2_trans_iter_init(struct btree_trans *trans,
|
||||
static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
unsigned btree_id, struct bpos pos,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!btree_id_cached(trans->c, btree_id)) {
|
||||
flags &= ~BTREE_ITER_CACHED;
|
||||
flags &= ~BTREE_ITER_WITH_KEY_CACHE;
|
||||
} else if (!(flags & BTREE_ITER_CACHED))
|
||||
flags |= BTREE_ITER_WITH_KEY_CACHE;
|
||||
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
|
||||
bch2_btree_iter_flags(trans, btree_id, flags),
|
||||
_RET_IP_);
|
||||
}
|
||||
|
||||
__bch2_trans_iter_init(trans, iter, btree_id, pos,
|
||||
0, 0, flags, _RET_IP_);
|
||||
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
unsigned btree_id, struct bpos pos,
|
||||
unsigned flags)
|
||||
{
|
||||
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
|
||||
bch2_btree_iter_flags(trans, btree_id, flags),
|
||||
_RET_IP_);
|
||||
}
|
||||
|
||||
void bch2_trans_node_iter_init(struct btree_trans *trans,
|
||||
@ -2720,11 +2683,16 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
|
||||
unsigned depth,
|
||||
unsigned flags)
|
||||
{
|
||||
__bch2_trans_iter_init(trans, iter, btree_id, pos, locks_want, depth,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
__BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
flags, _RET_IP_);
|
||||
flags |= BTREE_ITER_NOT_EXTENTS;
|
||||
flags |= __BTREE_ITER_ALL_SNAPSHOTS;
|
||||
flags |= BTREE_ITER_ALL_SNAPSHOTS;
|
||||
|
||||
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
|
||||
__bch2_btree_iter_flags(trans, btree_id, flags),
|
||||
_RET_IP_);
|
||||
|
||||
iter->min_depth = depth;
|
||||
|
||||
BUG_ON(iter->path->locks_want < min(locks_want, BTREE_MAX_DEPTH));
|
||||
BUG_ON(iter->path->level != depth);
|
||||
BUG_ON(iter->min_depth != depth);
|
||||
|
@ -303,8 +303,85 @@ static inline void bch2_btree_iter_set_snapshot(struct btree_iter *iter, u32 sna
|
||||
}
|
||||
|
||||
void bch2_trans_iter_exit(struct btree_trans *, struct btree_iter *);
|
||||
void bch2_trans_iter_init(struct btree_trans *, struct btree_iter *,
|
||||
|
||||
static inline unsigned __bch2_btree_iter_flags(struct btree_trans *trans,
|
||||
unsigned btree_id,
|
||||
unsigned flags)
|
||||
{
|
||||
if (flags & BTREE_ITER_ALL_LEVELS)
|
||||
flags |= BTREE_ITER_ALL_SNAPSHOTS|__BTREE_ITER_ALL_SNAPSHOTS;
|
||||
|
||||
if (!(flags & (BTREE_ITER_ALL_SNAPSHOTS|BTREE_ITER_NOT_EXTENTS)) &&
|
||||
btree_node_type_is_extents(btree_id))
|
||||
flags |= BTREE_ITER_IS_EXTENTS;
|
||||
|
||||
if (!(flags & __BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
!btree_type_has_snapshots(btree_id))
|
||||
flags &= ~BTREE_ITER_ALL_SNAPSHOTS;
|
||||
|
||||
if (!(flags & BTREE_ITER_ALL_SNAPSHOTS) &&
|
||||
btree_type_has_snapshots(btree_id))
|
||||
flags |= BTREE_ITER_FILTER_SNAPSHOTS;
|
||||
|
||||
if (trans->journal_replay_not_finished)
|
||||
flags |= BTREE_ITER_WITH_JOURNAL;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline unsigned bch2_btree_iter_flags(struct btree_trans *trans,
|
||||
unsigned btree_id,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!btree_id_cached(trans->c, btree_id)) {
|
||||
flags &= ~BTREE_ITER_CACHED;
|
||||
flags &= ~BTREE_ITER_WITH_KEY_CACHE;
|
||||
} else if (!(flags & BTREE_ITER_CACHED))
|
||||
flags |= BTREE_ITER_WITH_KEY_CACHE;
|
||||
|
||||
return __bch2_btree_iter_flags(trans, btree_id, flags);
|
||||
}
|
||||
|
||||
static inline void bch2_trans_iter_init_common(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
unsigned btree_id, struct bpos pos,
|
||||
unsigned locks_want,
|
||||
unsigned depth,
|
||||
unsigned flags,
|
||||
unsigned long ip)
|
||||
{
|
||||
memset(iter, 0, sizeof(*iter));
|
||||
iter->trans = trans;
|
||||
iter->btree_id = btree_id;
|
||||
iter->flags = flags;
|
||||
iter->snapshot = pos.snapshot;
|
||||
iter->pos = pos;
|
||||
iter->k.p = pos;
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
iter->ip_allocated = ip;
|
||||
#endif
|
||||
iter->path = bch2_path_get(trans, btree_id, iter->pos,
|
||||
locks_want, depth, flags, ip);
|
||||
}
|
||||
|
||||
void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
|
||||
unsigned, struct bpos, unsigned);
|
||||
|
||||
static inline void bch2_trans_iter_init(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
unsigned btree_id, struct bpos pos,
|
||||
unsigned flags)
|
||||
{
|
||||
if (__builtin_constant_p(btree_id) &&
|
||||
__builtin_constant_p(flags))
|
||||
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
|
||||
bch2_btree_iter_flags(trans, btree_id, flags),
|
||||
_THIS_IP_);
|
||||
else
|
||||
bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
|
||||
}
|
||||
|
||||
void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
|
||||
enum btree_id, struct bpos,
|
||||
unsigned, unsigned, unsigned);
|
||||
@ -394,7 +471,7 @@ static inline struct bkey_s_c bch2_btree_iter_peek_upto_type(struct btree_iter *
|
||||
if (!(flags & BTREE_ITER_SLOTS))
|
||||
return bch2_btree_iter_peek_upto(iter, end);
|
||||
|
||||
if (bkey_cmp(iter->pos, end) > 0)
|
||||
if (bkey_gt(iter->pos, end))
|
||||
return bkey_s_c_null;
|
||||
|
||||
return bch2_btree_iter_peek_slot(iter);
|
||||
|
@ -27,8 +27,8 @@ static int bch2_btree_key_cache_cmp_fn(struct rhashtable_compare_arg *arg,
|
||||
const struct bkey_cached *ck = obj;
|
||||
const struct bkey_cached_key *key = arg->key;
|
||||
|
||||
return cmp_int(ck->key.btree_id, key->btree_id) ?:
|
||||
bpos_cmp(ck->key.pos, key->pos);
|
||||
return ck->key.btree_id != key->btree_id ||
|
||||
!bpos_eq(ck->key.pos, key->pos);
|
||||
}
|
||||
|
||||
static const struct rhashtable_params bch2_btree_key_cache_params = {
|
||||
@ -476,7 +476,7 @@ retry:
|
||||
BUG_ON(ret);
|
||||
|
||||
if (ck->key.btree_id != path->btree_id ||
|
||||
bpos_cmp(ck->key.pos, path->pos)) {
|
||||
!bpos_eq(ck->key.pos, path->pos)) {
|
||||
six_unlock_type(&ck->c.lock, lock_want);
|
||||
goto retry;
|
||||
}
|
||||
@ -550,7 +550,7 @@ retry:
|
||||
return ret;
|
||||
|
||||
if (ck->key.btree_id != path->btree_id ||
|
||||
bpos_cmp(ck->key.pos, path->pos)) {
|
||||
!bpos_eq(ck->key.pos, path->pos)) {
|
||||
six_unlock_type(&ck->c.lock, lock_want);
|
||||
goto retry;
|
||||
}
|
||||
|
@ -288,7 +288,6 @@ struct btree_iter {
|
||||
unsigned snapshot;
|
||||
|
||||
struct bpos pos;
|
||||
struct bpos pos_after_commit;
|
||||
/*
|
||||
* Current unpacked key - so that bch2_btree_iter_next()/
|
||||
* bch2_btree_iter_next_slot() can correctly advance pos.
|
||||
|
@ -72,7 +72,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
|
||||
break;
|
||||
bp = bkey_s_c_to_btree_ptr_v2(k);
|
||||
|
||||
if (bpos_cmp(next_node, bp.v->min_key)) {
|
||||
if (!bpos_eq(next_node, bp.v->min_key)) {
|
||||
bch2_dump_btree_node(c, b);
|
||||
bch2_bpos_to_text(&buf1, next_node);
|
||||
bch2_bpos_to_text(&buf2, bp.v->min_key);
|
||||
@ -82,7 +82,7 @@ static void btree_node_interior_verify(struct bch_fs *c, struct btree *b)
|
||||
bch2_btree_node_iter_advance(&iter, b);
|
||||
|
||||
if (bch2_btree_node_iter_end(&iter)) {
|
||||
if (bpos_cmp(k.k->p, b->key.k.p)) {
|
||||
if (!bpos_eq(k.k->p, b->key.k.p)) {
|
||||
bch2_dump_btree_node(c, b);
|
||||
bch2_bpos_to_text(&buf1, b->key.k.p);
|
||||
bch2_bpos_to_text(&buf2, k.k->p);
|
||||
@ -1329,7 +1329,7 @@ __bch2_btree_insert_keys_interior(struct btree_update *as,
|
||||
while (!bch2_keylist_empty(keys)) {
|
||||
struct bkey_i *k = bch2_keylist_front(keys);
|
||||
|
||||
if (bpos_cmp(k->k.p, b->key.k.p) > 0)
|
||||
if (bpos_gt(k->k.p, b->key.k.p))
|
||||
break;
|
||||
|
||||
bch2_insert_fixup_btree_ptr(as, trans, path, b, &node_iter, k);
|
||||
@ -1446,8 +1446,7 @@ static void btree_split_insert_keys(struct btree_update *as,
|
||||
struct keylist *keys)
|
||||
{
|
||||
if (!bch2_keylist_empty(keys) &&
|
||||
bpos_cmp(bch2_keylist_front(keys)->k.p,
|
||||
b->data->max_key) <= 0) {
|
||||
bpos_le(bch2_keylist_front(keys)->k.p, b->data->max_key)) {
|
||||
struct btree_node_iter node_iter;
|
||||
|
||||
bch2_btree_node_iter_init(&node_iter, b, &bch2_keylist_front(keys)->k.p);
|
||||
@ -1771,8 +1770,8 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
|
||||
b = path->l[level].b;
|
||||
|
||||
if ((sib == btree_prev_sib && !bpos_cmp(b->data->min_key, POS_MIN)) ||
|
||||
(sib == btree_next_sib && !bpos_cmp(b->data->max_key, SPOS_MAX))) {
|
||||
if ((sib == btree_prev_sib && bpos_eq(b->data->min_key, POS_MIN)) ||
|
||||
(sib == btree_next_sib && bpos_eq(b->data->max_key, SPOS_MAX))) {
|
||||
b->sib_u64s[sib] = U16_MAX;
|
||||
return 0;
|
||||
}
|
||||
@ -1805,7 +1804,7 @@ int __bch2_foreground_maybe_merge(struct btree_trans *trans,
|
||||
next = m;
|
||||
}
|
||||
|
||||
if (bkey_cmp(bpos_successor(prev->data->max_key), next->data->min_key)) {
|
||||
if (!bpos_eq(bpos_successor(prev->data->max_key), next->data->min_key)) {
|
||||
struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
|
||||
|
||||
bch2_bpos_to_text(&buf1, prev->data->max_key);
|
||||
@ -2099,7 +2098,7 @@ static int __bch2_btree_node_update_key(struct btree_trans *trans,
|
||||
_THIS_IP_);
|
||||
|
||||
BUG_ON(iter2.path->level != b->c.level);
|
||||
BUG_ON(bpos_cmp(iter2.path->pos, new_key->k.p));
|
||||
BUG_ON(!bpos_eq(iter2.path->pos, new_key->k.p));
|
||||
|
||||
btree_path_set_level_up(trans, iter2.path);
|
||||
|
||||
|
@ -92,8 +92,8 @@ bool bch2_btree_bset_insert_key(struct btree_trans *trans,
|
||||
EBUG_ON(btree_node_just_written(b));
|
||||
EBUG_ON(bset_written(b, btree_bset_last(b)));
|
||||
EBUG_ON(bkey_deleted(&insert->k) && bkey_val_u64s(&insert->k));
|
||||
EBUG_ON(bpos_cmp(insert->k.p, b->data->min_key) < 0);
|
||||
EBUG_ON(bpos_cmp(insert->k.p, b->data->max_key) > 0);
|
||||
EBUG_ON(bpos_lt(insert->k.p, b->data->min_key));
|
||||
EBUG_ON(bpos_gt(insert->k.p, b->data->max_key));
|
||||
EBUG_ON(insert->k.u64s >
|
||||
bch_btree_keys_u64s_remaining(trans->c, b));
|
||||
|
||||
@ -257,7 +257,7 @@ static void btree_insert_key_leaf(struct btree_trans *trans,
|
||||
static inline void btree_insert_entry_checks(struct btree_trans *trans,
|
||||
struct btree_insert_entry *i)
|
||||
{
|
||||
BUG_ON(bpos_cmp(i->k->k.p, i->path->pos));
|
||||
BUG_ON(!bpos_eq(i->k->k.p, i->path->pos));
|
||||
BUG_ON(i->cached != i->path->cached);
|
||||
BUG_ON(i->level != i->path->level);
|
||||
BUG_ON(i->btree_id != i->path->btree_id);
|
||||
@ -517,11 +517,12 @@ static int bch2_trans_commit_run_triggers(struct btree_trans *trans)
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
trans_for_each_update(trans, i)
|
||||
BUG_ON(!(i->flags & BTREE_TRIGGER_NORUN) &&
|
||||
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS & (1U << i->bkey_type)) &&
|
||||
(!i->insert_trigger_run || !i->overwrite_trigger_run));
|
||||
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -614,7 +615,7 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
|
||||
*/
|
||||
i->old_v = bch2_btree_path_peek_slot(i->path, &i->old_k).v;
|
||||
|
||||
if (unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))) {
|
||||
if (unlikely(trans->journal_replay_not_finished)) {
|
||||
struct bkey_i *j_k =
|
||||
bch2_journal_keys_peek_slot(c, i->btree_id, i->level,
|
||||
i->k->k.p);
|
||||
@ -761,6 +762,7 @@ static noinline void bch2_drop_overwrites_from_journal(struct btree_trans *trans
|
||||
bch2_journal_key_overwritten(trans->c, i->btree_id, i->level, i->k->k.p);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
|
||||
struct btree_insert_entry *i,
|
||||
struct printbuf *err)
|
||||
@ -787,6 +789,7 @@ static noinline int bch2_trans_commit_bkey_invalid(struct btree_trans *trans,
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Get journal reservation, take write locks, and attempt to do btree update(s):
|
||||
@ -799,15 +802,17 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
|
||||
struct btree_insert_entry *i;
|
||||
struct printbuf buf = PRINTBUF;
|
||||
int ret, u64s_delta = 0;
|
||||
int rw = (trans->flags & BTREE_INSERT_JOURNAL_REPLAY) ? READ : WRITE;
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
trans_for_each_update(trans, i) {
|
||||
int rw = (trans->flags & BTREE_INSERT_JOURNAL_REPLAY) ? READ : WRITE;
|
||||
|
||||
if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
|
||||
i->bkey_type, rw, &buf)))
|
||||
return bch2_trans_commit_bkey_invalid(trans, i, &buf);
|
||||
btree_insert_entry_checks(trans, i);
|
||||
}
|
||||
|
||||
#endif
|
||||
printbuf_exit(&buf);
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
@ -845,7 +850,7 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans,
|
||||
|
||||
ret = bch2_trans_commit_write_locked(trans, stopped_at, trace_ip);
|
||||
|
||||
if (!ret && unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags)))
|
||||
if (!ret && unlikely(trans->journal_replay_not_finished))
|
||||
bch2_drop_overwrites_from_journal(trans);
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
@ -1034,13 +1039,13 @@ int __bch2_trans_commit(struct btree_trans *trans)
|
||||
trans->journal_u64s += jset_u64s(JSET_ENTRY_LOG_U64s);
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
BUG_ON(!i->path->should_be_locked);
|
||||
EBUG_ON(!i->path->should_be_locked);
|
||||
|
||||
ret = bch2_btree_path_upgrade(trans, i->path, i->level + 1);
|
||||
if (unlikely(ret))
|
||||
goto out;
|
||||
|
||||
BUG_ON(!btree_node_intent_locked(i->path, i->level));
|
||||
EBUG_ON(!btree_node_intent_locked(i->path, i->level));
|
||||
|
||||
if (i->key_cache_already_flushed)
|
||||
continue;
|
||||
@ -1065,7 +1070,7 @@ int __bch2_trans_commit(struct btree_trans *trans)
|
||||
goto err;
|
||||
}
|
||||
retry:
|
||||
BUG_ON(trans->restarted);
|
||||
EBUG_ON(trans->restarted);
|
||||
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
|
||||
|
||||
ret = do_bch2_trans_commit(trans, &i, _RET_IP_);
|
||||
@ -1123,7 +1128,7 @@ static noinline int __check_pos_snapshot_overwritten(struct btree_trans *trans,
|
||||
if (!k.k)
|
||||
break;
|
||||
|
||||
if (bkey_cmp(pos, k.k->p))
|
||||
if (!bkey_eq(pos, k.k->p))
|
||||
break;
|
||||
|
||||
if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
|
||||
@ -1211,12 +1216,12 @@ int bch2_trans_update_extent(struct btree_trans *trans,
|
||||
}
|
||||
nomerge1:
|
||||
ret = 0;
|
||||
if (!bkey_cmp(k.k->p, start))
|
||||
if (bkey_eq(k.k->p, start))
|
||||
goto next;
|
||||
|
||||
while (bkey_cmp(insert->k.p, bkey_start_pos(k.k)) > 0) {
|
||||
bool front_split = bkey_cmp(bkey_start_pos(k.k), start) < 0;
|
||||
bool back_split = bkey_cmp(k.k->p, insert->k.p) > 0;
|
||||
while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
|
||||
bool front_split = bkey_lt(bkey_start_pos(k.k), start);
|
||||
bool back_split = bkey_gt(k.k->p, insert->k.p);
|
||||
|
||||
/*
|
||||
* If we're going to be splitting a compressed extent, note it
|
||||
@ -1275,7 +1280,7 @@ nomerge1:
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (bkey_cmp(k.k->p, insert->k.p) <= 0) {
|
||||
if (bkey_le(k.k->p, insert->k.p)) {
|
||||
update = bch2_trans_kmalloc(trans, sizeof(*update));
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
@ -1381,7 +1386,7 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
|
||||
for_each_btree_key_norestart(trans, iter, btree_id, pos,
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_NOPRESERVE, k, ret) {
|
||||
if (bkey_cmp(k.k->p, pos))
|
||||
if (!bkey_eq(k.k->p, pos))
|
||||
break;
|
||||
|
||||
if (bch2_snapshot_is_ancestor(trans->c, snapshot,
|
||||
@ -1433,11 +1438,11 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i, n;
|
||||
int cmp;
|
||||
|
||||
BUG_ON(!path->should_be_locked);
|
||||
|
||||
BUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
|
||||
BUG_ON(bpos_cmp(k->k.p, path->pos));
|
||||
EBUG_ON(!path->should_be_locked);
|
||||
EBUG_ON(trans->nr_updates >= BTREE_ITER_MAX);
|
||||
EBUG_ON(!bpos_eq(k->k.p, path->pos));
|
||||
|
||||
n = (struct btree_insert_entry) {
|
||||
.flags = flags,
|
||||
@ -1460,13 +1465,14 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
|
||||
* Pending updates are kept sorted: first, find position of new update,
|
||||
* then delete/trim any updates the new update overwrites:
|
||||
*/
|
||||
trans_for_each_update(trans, i)
|
||||
if (btree_insert_entry_cmp(&n, i) <= 0)
|
||||
trans_for_each_update(trans, i) {
|
||||
cmp = btree_insert_entry_cmp(&n, i);
|
||||
if (cmp <= 0)
|
||||
break;
|
||||
}
|
||||
|
||||
if (i < trans->updates + trans->nr_updates &&
|
||||
!btree_insert_entry_cmp(&n, i)) {
|
||||
BUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
|
||||
if (!cmp && i < trans->updates + trans->nr_updates) {
|
||||
EBUG_ON(i->insert_trigger_run || i->overwrite_trigger_run);
|
||||
|
||||
bch2_path_put(trans, i->path, true);
|
||||
i->flags = n.flags;
|
||||
@ -1481,7 +1487,7 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
|
||||
i->old_v = bch2_btree_path_peek_slot(path, &i->old_k).v;
|
||||
i->old_btree_u64s = !bkey_deleted(&i->old_k) ? i->old_k.u64s : 0;
|
||||
|
||||
if (unlikely(!test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags))) {
|
||||
if (unlikely(trans->journal_replay_not_finished)) {
|
||||
struct bkey_i *j_k =
|
||||
bch2_journal_keys_peek_slot(c, n.btree_id, n.level, k->k.p);
|
||||
|
||||
@ -1507,7 +1513,7 @@ bch2_trans_update_by_path_trace(struct btree_trans *trans, struct btree_path *pa
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __must_check
|
||||
static inline int __must_check
|
||||
bch2_trans_update_by_path(struct btree_trans *trans, struct btree_path *path,
|
||||
struct bkey_i *k, enum btree_update_flags flags)
|
||||
{
|
||||
@ -1544,7 +1550,7 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
|
||||
btree_id_cached(trans->c, path->btree_id)) {
|
||||
if (!iter->key_cache_path ||
|
||||
!iter->key_cache_path->should_be_locked ||
|
||||
bpos_cmp(iter->key_cache_path->pos, k->k.p)) {
|
||||
!bpos_eq(iter->key_cache_path->pos, k->k.p)) {
|
||||
if (!iter->key_cache_path)
|
||||
iter->key_cache_path =
|
||||
bch2_path_get(trans, path->btree_id, path->pos, 1, 0,
|
||||
@ -1655,7 +1661,7 @@ int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (bkey_cmp(iter.pos, end) >= 0)
|
||||
if (bkey_ge(iter.pos, end))
|
||||
break;
|
||||
|
||||
bkey_init(&delete.k);
|
||||
|
@ -284,6 +284,8 @@ static long bch2_ioctl_disk_set_state(struct bch_fs *c,
|
||||
return PTR_ERR(ca);
|
||||
|
||||
ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags);
|
||||
if (ret)
|
||||
bch_err(c, "Error setting device state: %s", bch2_err_str(ret));
|
||||
|
||||
percpu_ref_put(&ca->ref);
|
||||
return ret;
|
||||
@ -631,11 +633,14 @@ do { \
|
||||
\
|
||||
if (copy_from_user(&i, arg, sizeof(i))) \
|
||||
return -EFAULT; \
|
||||
return bch2_ioctl_##_name(c, i); \
|
||||
ret = bch2_ioctl_##_name(c, i); \
|
||||
goto out; \
|
||||
} while (0)
|
||||
|
||||
long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
|
||||
{
|
||||
long ret;
|
||||
|
||||
switch (cmd) {
|
||||
case BCH_IOCTL_QUERY_UUID:
|
||||
return bch2_ioctl_query_uuid(c, arg);
|
||||
@ -679,6 +684,10 @@ long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg)
|
||||
default:
|
||||
return -ENOTTY;
|
||||
}
|
||||
out:
|
||||
if (ret < 0)
|
||||
ret = bch2_err_class(ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static DEFINE_IDR(bch_chardev_minor);
|
||||
|
@ -31,7 +31,7 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
|
||||
darray_init(&s);
|
||||
|
||||
if (!bkey_cmp(old_pos, new_pos))
|
||||
if (bkey_eq(old_pos, new_pos))
|
||||
return 0;
|
||||
|
||||
if (!snapshot_t(c, old_pos.snapshot)->children[0])
|
||||
@ -46,7 +46,7 @@ static int insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (bkey_cmp(old_pos, k.k->p))
|
||||
if (!bkey_eq(old_pos, k.k->p))
|
||||
break;
|
||||
|
||||
if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot)) {
|
||||
@ -238,7 +238,7 @@ err:
|
||||
if (ret)
|
||||
break;
|
||||
next:
|
||||
while (bkey_cmp(iter.pos, bch2_keylist_front(keys)->k.p) >= 0) {
|
||||
while (bkey_ge(iter.pos, bch2_keylist_front(keys)->k.p)) {
|
||||
bch2_keylist_pop_front(keys);
|
||||
if (bch2_keylist_empty(keys))
|
||||
goto out;
|
||||
|
@ -307,7 +307,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!bpos_cmp(SPOS_MAX, i->from))
|
||||
if (bpos_eq(SPOS_MAX, i->from))
|
||||
return i->ret;
|
||||
|
||||
bch2_trans_init(&trans, i->c, 0, 0);
|
||||
@ -318,7 +318,7 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
|
||||
break;
|
||||
|
||||
bch2_btree_node_to_text(&i->buf, i->c, b);
|
||||
i->from = bpos_cmp(SPOS_MAX, b->key.k.p)
|
||||
i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
|
||||
? bpos_successor(b->key.k.p)
|
||||
: b->key.k.p;
|
||||
}
|
||||
@ -369,7 +369,7 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (bpos_cmp(l->b->key.k.p, i->prev_node) > 0) {
|
||||
if (bpos_gt(l->b->key.k.p, i->prev_node)) {
|
||||
bch2_btree_node_to_text(&i->buf, i->c, l->b);
|
||||
i->prev_node = l->b->key.k.p;
|
||||
}
|
||||
|
@ -350,8 +350,8 @@ int bch2_dirent_rename(struct btree_trans *trans,
|
||||
bkey_init(&new_src->k);
|
||||
new_src->k.p = src_iter.pos;
|
||||
|
||||
if (bkey_cmp(dst_pos, src_iter.pos) <= 0 &&
|
||||
bkey_cmp(src_iter.pos, dst_iter.pos) < 0) {
|
||||
if (bkey_le(dst_pos, src_iter.pos) &&
|
||||
bkey_lt(src_iter.pos, dst_iter.pos)) {
|
||||
/*
|
||||
* We have a hash collision for the new dst key,
|
||||
* and new_src - the key we're deleting - is between
|
||||
|
@ -108,7 +108,7 @@ int bch2_stripe_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
||||
{
|
||||
const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
|
||||
|
||||
if (!bkey_cmp(k.k->p, POS_MIN)) {
|
||||
if (bkey_eq(k.k->p, POS_MIN)) {
|
||||
prt_printf(err, "stripe at POS_MIN");
|
||||
return -BCH_ERR_invalid_bkey;
|
||||
}
|
||||
@ -725,7 +725,7 @@ static int ec_stripe_bkey_insert(struct btree_trans *trans,
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0) {
|
||||
if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
|
||||
if (start_pos.offset) {
|
||||
start_pos = min_pos;
|
||||
bch2_btree_iter_set_pos(&iter, start_pos);
|
||||
|
@ -2,7 +2,7 @@
|
||||
#ifndef _BCACHEFS_EC_TYPES_H
|
||||
#define _BCACHEFS_EC_TYPES_H
|
||||
|
||||
#include <linux/llist.h>
|
||||
#include "bcachefs_format.h"
|
||||
|
||||
struct bch_replicas_padded {
|
||||
struct bch_replicas_entry e;
|
||||
|
@ -73,8 +73,7 @@ static int count_iters_for_insert(struct btree_trans *trans,
|
||||
for_each_btree_key_norestart(trans, iter,
|
||||
BTREE_ID_reflink, POS(0, idx + offset),
|
||||
BTREE_ITER_SLOTS, r_k, ret2) {
|
||||
if (bkey_cmp(bkey_start_pos(r_k.k),
|
||||
POS(0, idx + sectors)) >= 0)
|
||||
if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
|
||||
break;
|
||||
|
||||
/* extent_update_to_keys(), for the reflink_v update */
|
||||
@ -132,11 +131,10 @@ int bch2_extent_atomic_end(struct btree_trans *trans,
|
||||
for_each_btree_key_continue_norestart(copy, 0, k, ret) {
|
||||
unsigned offset = 0;
|
||||
|
||||
if (bkey_cmp(bkey_start_pos(k.k), *end) >= 0)
|
||||
if (bkey_ge(bkey_start_pos(k.k), *end))
|
||||
break;
|
||||
|
||||
if (bkey_cmp(bkey_start_pos(&insert->k),
|
||||
bkey_start_pos(k.k)) > 0)
|
||||
if (bkey_gt(bkey_start_pos(&insert->k), bkey_start_pos(k.k)))
|
||||
offset = bkey_start_offset(&insert->k) -
|
||||
bkey_start_offset(k.k);
|
||||
|
||||
|
@ -235,7 +235,7 @@ void bch2_btree_ptr_v2_compat(enum btree_id btree_id, unsigned version,
|
||||
|
||||
if (version < bcachefs_metadata_version_inode_btree_change &&
|
||||
btree_node_type_is_extents(btree_id) &&
|
||||
bkey_cmp(bp.v->min_key, POS_MIN))
|
||||
!bkey_eq(bp.v->min_key, POS_MIN))
|
||||
bp.v->min_key = write
|
||||
? bpos_nosnap_predecessor(bp.v->min_key)
|
||||
: bpos_nosnap_successor(bp.v->min_key);
|
||||
@ -706,29 +706,6 @@ void bch2_bkey_extent_entry_drop(struct bkey_i *k, union bch_extent_entry *entry
|
||||
k->k.u64s -= extent_entry_u64s(entry);
|
||||
}
|
||||
|
||||
void bch2_bkey_append_ptr(struct bkey_i *k,
|
||||
struct bch_extent_ptr ptr)
|
||||
{
|
||||
EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
|
||||
|
||||
switch (k->k.type) {
|
||||
case KEY_TYPE_btree_ptr:
|
||||
case KEY_TYPE_btree_ptr_v2:
|
||||
case KEY_TYPE_extent:
|
||||
EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
|
||||
|
||||
ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
|
||||
|
||||
memcpy((void *) &k->v + bkey_val_bytes(&k->k),
|
||||
&ptr,
|
||||
sizeof(ptr));
|
||||
k->u64s++;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __extent_entry_insert(struct bkey_i *k,
|
||||
union bch_extent_entry *dst,
|
||||
union bch_extent_entry *new)
|
||||
@ -1245,10 +1222,10 @@ int bch2_cut_front_s(struct bpos where, struct bkey_s k)
|
||||
int val_u64s_delta;
|
||||
u64 sub;
|
||||
|
||||
if (bkey_cmp(where, bkey_start_pos(k.k)) <= 0)
|
||||
if (bkey_le(where, bkey_start_pos(k.k)))
|
||||
return 0;
|
||||
|
||||
EBUG_ON(bkey_cmp(where, k.k->p) > 0);
|
||||
EBUG_ON(bkey_gt(where, k.k->p));
|
||||
|
||||
sub = where.offset - bkey_start_offset(k.k);
|
||||
|
||||
@ -1325,10 +1302,10 @@ int bch2_cut_back_s(struct bpos where, struct bkey_s k)
|
||||
int val_u64s_delta;
|
||||
u64 len = 0;
|
||||
|
||||
if (bkey_cmp(where, k.k->p) >= 0)
|
||||
if (bkey_ge(where, k.k->p))
|
||||
return 0;
|
||||
|
||||
EBUG_ON(bkey_cmp(where, bkey_start_pos(k.k)) < 0);
|
||||
EBUG_ON(bkey_lt(where, bkey_start_pos(k.k)));
|
||||
|
||||
len = where.offset - bkey_start_offset(k.k);
|
||||
|
||||
|
@ -599,8 +599,35 @@ unsigned bch2_bkey_replicas(struct bch_fs *, struct bkey_s_c);
|
||||
unsigned bch2_extent_ptr_durability(struct bch_fs *, struct extent_ptr_decoded *);
|
||||
unsigned bch2_bkey_durability(struct bch_fs *, struct bkey_s_c);
|
||||
|
||||
void bch2_bkey_drop_device(struct bkey_s, unsigned);
|
||||
void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
|
||||
const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
|
||||
bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
|
||||
|
||||
void bch2_bkey_extent_entry_drop(struct bkey_i *, union bch_extent_entry *);
|
||||
void bch2_bkey_append_ptr(struct bkey_i *, struct bch_extent_ptr);
|
||||
|
||||
static inline void bch2_bkey_append_ptr(struct bkey_i *k, struct bch_extent_ptr ptr)
|
||||
{
|
||||
EBUG_ON(bch2_bkey_has_device(bkey_i_to_s_c(k), ptr.dev));
|
||||
|
||||
switch (k->k.type) {
|
||||
case KEY_TYPE_btree_ptr:
|
||||
case KEY_TYPE_btree_ptr_v2:
|
||||
case KEY_TYPE_extent:
|
||||
EBUG_ON(bkey_val_u64s(&k->k) >= BKEY_EXTENT_VAL_U64s_MAX);
|
||||
|
||||
ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
|
||||
|
||||
memcpy((void *) &k->v + bkey_val_bytes(&k->k),
|
||||
&ptr,
|
||||
sizeof(ptr));
|
||||
k->u64s++;
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_extent_ptr_decoded_append(struct bkey_i *,
|
||||
struct extent_ptr_decoded *);
|
||||
union bch_extent_entry *bch2_bkey_drop_ptr(struct bkey_s,
|
||||
@ -623,11 +650,6 @@ do { \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
void bch2_bkey_drop_device(struct bkey_s, unsigned);
|
||||
void bch2_bkey_drop_device_noerror(struct bkey_s, unsigned);
|
||||
const struct bch_extent_ptr *bch2_bkey_has_device(struct bkey_s_c, unsigned);
|
||||
bool bch2_bkey_has_target(struct bch_fs *, struct bkey_s_c, unsigned);
|
||||
|
||||
bool bch2_bkey_matches_ptr(struct bch_fs *, struct bkey_s_c,
|
||||
struct bch_extent_ptr, u64);
|
||||
bool bch2_extents_match(struct bkey_s_c, struct bkey_s_c);
|
||||
@ -654,9 +676,8 @@ enum bch_extent_overlap {
|
||||
static inline enum bch_extent_overlap bch2_extent_overlap(const struct bkey *k,
|
||||
const struct bkey *m)
|
||||
{
|
||||
int cmp1 = bkey_cmp(k->p, m->p) < 0;
|
||||
int cmp2 = bkey_cmp(bkey_start_pos(k),
|
||||
bkey_start_pos(m)) > 0;
|
||||
int cmp1 = bkey_lt(k->p, m->p);
|
||||
int cmp2 = bkey_gt(bkey_start_pos(k), bkey_start_pos(m));
|
||||
|
||||
return (cmp1 << 1) + cmp2;
|
||||
}
|
||||
|
@ -1168,12 +1168,14 @@ void bch2_readahead(struct readahead_control *ractl)
|
||||
{
|
||||
struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
|
||||
struct bch_io_opts opts;
|
||||
struct btree_trans trans;
|
||||
struct page *page;
|
||||
struct readpages_iter readpages_iter;
|
||||
int ret;
|
||||
|
||||
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
|
||||
|
||||
ret = readpages_iter_init(&readpages_iter, ractl);
|
||||
BUG_ON(ret);
|
||||
|
||||
@ -1236,11 +1238,14 @@ static int bch2_read_single_page(struct page *page,
|
||||
struct bch_inode_info *inode = to_bch_ei(mapping->host);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch_read_bio *rbio;
|
||||
struct bch_io_opts opts;
|
||||
int ret;
|
||||
DECLARE_COMPLETION_ONSTACK(done);
|
||||
|
||||
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
|
||||
|
||||
rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_NOFS, &c->bio_read),
|
||||
io_opts(c, &inode->ei_inode));
|
||||
opts);
|
||||
rbio->bio.bi_private = &done;
|
||||
rbio->bio.bi_end_io = bch2_read_single_page_end_io;
|
||||
|
||||
@ -1277,9 +1282,10 @@ struct bch_writepage_state {
|
||||
static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
|
||||
struct bch_inode_info *inode)
|
||||
{
|
||||
return (struct bch_writepage_state) {
|
||||
.opts = io_opts(c, &inode->ei_inode)
|
||||
};
|
||||
struct bch_writepage_state ret = { 0 };
|
||||
|
||||
bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void bch2_writepage_io_done(struct bch_write_op *op)
|
||||
@ -1945,7 +1951,7 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
|
||||
struct file *file = req->ki_filp;
|
||||
struct bch_inode_info *inode = file_bch_inode(file);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
|
||||
struct bch_io_opts opts;
|
||||
struct dio_read *dio;
|
||||
struct bio *bio;
|
||||
loff_t offset = req->ki_pos;
|
||||
@ -1953,6 +1959,8 @@ static int bch2_direct_IO_read(struct kiocb *req, struct iov_iter *iter)
|
||||
size_t shorten;
|
||||
ssize_t ret;
|
||||
|
||||
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
|
||||
|
||||
if ((offset|iter->count) & (block_bytes(c) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
@ -2109,7 +2117,7 @@ retry:
|
||||
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents,
|
||||
SPOS(inum.inum, offset, snapshot),
|
||||
BTREE_ITER_SLOTS, k, err) {
|
||||
if (bkey_cmp(bkey_start_pos(k.k), POS(inum.inum, end)) >= 0)
|
||||
if (bkey_ge(bkey_start_pos(k.k), POS(inum.inum, end)))
|
||||
break;
|
||||
|
||||
if (k.k->p.snapshot != snapshot ||
|
||||
@ -2271,17 +2279,20 @@ static __always_inline void bch2_dio_write_end(struct dio_write *dio)
|
||||
set_bit(EI_INODE_ERROR, &inode->ei_flags);
|
||||
}
|
||||
|
||||
static long bch2_dio_write_loop(struct dio_write *dio)
|
||||
static __always_inline long bch2_dio_write_loop(struct dio_write *dio)
|
||||
{
|
||||
struct bch_fs *c = dio->op.c;
|
||||
struct kiocb *req = dio->req;
|
||||
struct address_space *mapping = dio->mapping;
|
||||
struct bch_inode_info *inode = dio->inode;
|
||||
struct bch_io_opts opts;
|
||||
struct bio *bio = &dio->op.wbio.bio;
|
||||
unsigned unaligned, iter_count;
|
||||
bool sync = dio->sync, dropped_locks;
|
||||
long ret;
|
||||
|
||||
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
|
||||
|
||||
while (1) {
|
||||
iter_count = dio->iter.count;
|
||||
|
||||
@ -2329,7 +2340,7 @@ static long bch2_dio_write_loop(struct dio_write *dio)
|
||||
goto err;
|
||||
}
|
||||
|
||||
bch2_write_op_init(&dio->op, c, io_opts(c, &inode->ei_inode));
|
||||
bch2_write_op_init(&dio->op, c, opts);
|
||||
dio->op.end_io = sync
|
||||
? NULL
|
||||
: bch2_dio_write_loop_async;
|
||||
@ -2393,18 +2404,10 @@ err:
|
||||
goto out;
|
||||
}
|
||||
|
||||
static void bch2_dio_write_loop_async(struct bch_write_op *op)
|
||||
static noinline __cold void bch2_dio_write_continue(struct dio_write *dio)
|
||||
{
|
||||
struct dio_write *dio = container_of(op, struct dio_write, op);
|
||||
struct mm_struct *mm = dio->mm;
|
||||
|
||||
bch2_dio_write_end(dio);
|
||||
|
||||
if (likely(!dio->iter.count) || dio->op.error) {
|
||||
bch2_dio_write_done(dio);
|
||||
return;
|
||||
}
|
||||
|
||||
bio_reset(&dio->op.wbio.bio, NULL, REQ_OP_WRITE);
|
||||
|
||||
if (mm)
|
||||
@ -2414,6 +2417,18 @@ static void bch2_dio_write_loop_async(struct bch_write_op *op)
|
||||
kthread_unuse_mm(mm);
|
||||
}
|
||||
|
||||
static void bch2_dio_write_loop_async(struct bch_write_op *op)
|
||||
{
|
||||
struct dio_write *dio = container_of(op, struct dio_write, op);
|
||||
|
||||
bch2_dio_write_end(dio);
|
||||
|
||||
if (likely(!dio->iter.count) || dio->op.error)
|
||||
bch2_dio_write_done(dio);
|
||||
else
|
||||
bch2_dio_write_continue(dio);
|
||||
}
|
||||
|
||||
static noinline
|
||||
ssize_t bch2_direct_write(struct kiocb *req, struct iov_iter *iter)
|
||||
{
|
||||
@ -2593,7 +2608,7 @@ retry:
|
||||
goto err;
|
||||
|
||||
for_each_btree_key_norestart(&trans, iter, BTREE_ID_extents, start, 0, k, ret) {
|
||||
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
|
||||
if (bkey_ge(bkey_start_pos(k.k), end))
|
||||
break;
|
||||
|
||||
if (bkey_extent_is_data(k.k)) {
|
||||
@ -3031,13 +3046,13 @@ static long bchfs_fcollapse_finsert(struct bch_inode_info *inode,
|
||||
break;
|
||||
|
||||
if (insert &&
|
||||
bkey_cmp(k.k->p, POS(inode->v.i_ino, offset >> 9)) <= 0)
|
||||
bkey_le(k.k->p, POS(inode->v.i_ino, offset >> 9)))
|
||||
break;
|
||||
reassemble:
|
||||
bch2_bkey_buf_reassemble(©, c, k);
|
||||
|
||||
if (insert &&
|
||||
bkey_cmp(bkey_start_pos(k.k), move_pos) < 0)
|
||||
bkey_lt(bkey_start_pos(k.k), move_pos))
|
||||
bch2_cut_front(move_pos, copy.k);
|
||||
|
||||
copy.k->k.p.offset += shift >> 9;
|
||||
@ -3047,7 +3062,7 @@ reassemble:
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
if (bkey_cmp(atomic_end, copy.k->k.p)) {
|
||||
if (!bkey_eq(atomic_end, copy.k->k.p)) {
|
||||
if (insert) {
|
||||
move_pos = atomic_end;
|
||||
move_pos.offset -= shift >> 9;
|
||||
@ -3116,16 +3131,17 @@ static int __bchfs_fallocate(struct bch_inode_info *inode, int mode,
|
||||
struct btree_trans trans;
|
||||
struct btree_iter iter;
|
||||
struct bpos end_pos = POS(inode->v.i_ino, end_sector);
|
||||
struct bch_io_opts opts = io_opts(c, &inode->ei_inode);
|
||||
struct bch_io_opts opts;
|
||||
int ret = 0;
|
||||
|
||||
bch2_inode_opts_get(&opts, c, &inode->ei_inode);
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 512);
|
||||
|
||||
bch2_trans_iter_init(&trans, &iter, BTREE_ID_extents,
|
||||
POS(inode->v.i_ino, start_sector),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||
|
||||
while (!ret && bkey_cmp(iter.pos, end_pos) < 0) {
|
||||
while (!ret && bkey_lt(iter.pos, end_pos)) {
|
||||
s64 i_sectors_delta = 0;
|
||||
struct quota_res quota_res = { 0 };
|
||||
struct bkey_s_c k;
|
||||
|
@ -133,7 +133,7 @@ static int lookup_first_inode(struct btree_trans *trans, u64 inode_nr,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!k.k || bkey_cmp(k.k->p, POS(0, inode_nr))) {
|
||||
if (!k.k || !bkey_eq(k.k->p, POS(0, inode_nr))) {
|
||||
ret = -ENOENT;
|
||||
goto err;
|
||||
}
|
||||
@ -527,7 +527,7 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
|
||||
};
|
||||
int ret = 0;
|
||||
|
||||
if (bkey_cmp(s->pos, pos))
|
||||
if (!bkey_eq(s->pos, pos))
|
||||
s->ids.nr = 0;
|
||||
|
||||
pos.snapshot = n.equiv;
|
||||
@ -825,7 +825,7 @@ static int hash_check_key(struct btree_trans *trans,
|
||||
for_each_btree_key_norestart(trans, iter, desc.btree_id,
|
||||
POS(hash_k.k->p.inode, hash),
|
||||
BTREE_ITER_SLOTS, k, ret) {
|
||||
if (!bkey_cmp(k.k->p, hash_k.k->p))
|
||||
if (bkey_eq(k.k->p, hash_k.k->p))
|
||||
break;
|
||||
|
||||
if (fsck_err_on(k.k->type == desc.key_type &&
|
||||
@ -1199,7 +1199,7 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
|
||||
BUG_ON(!iter->path->should_be_locked);
|
||||
#if 0
|
||||
if (bkey_cmp(prev.k->k.p, bkey_start_pos(k.k)) > 0) {
|
||||
if (bkey_gt(prev.k->k.p, bkey_start_pos(k.k))) {
|
||||
char buf1[200];
|
||||
char buf2[200];
|
||||
|
||||
|
@ -657,7 +657,7 @@ int bch2_inode_create(struct btree_trans *trans,
|
||||
again:
|
||||
while ((k = bch2_btree_iter_peek(iter)).k &&
|
||||
!(ret = bkey_err(k)) &&
|
||||
bkey_cmp(k.k->p, POS(0, max)) < 0) {
|
||||
bkey_lt(k.k->p, POS(0, max))) {
|
||||
while (pos < iter->pos.offset) {
|
||||
if (!bch2_btree_key_cache_find(c, BTREE_ID_inodes, POS(0, pos)))
|
||||
goto found_slot;
|
||||
@ -897,3 +897,25 @@ void bch2_inode_nlink_dec(struct btree_trans *trans, struct bch_inode_unpacked *
|
||||
else
|
||||
bi->bi_flags |= BCH_INODE_UNLINKED;
|
||||
}
|
||||
|
||||
struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *inode)
|
||||
{
|
||||
struct bch_opts ret = { 0 };
|
||||
#define x(_name, _bits) \
|
||||
if (inode->bi_##_name) \
|
||||
opt_set(ret, _name, inode->bi_##_name - 1);
|
||||
BCH_INODE_OPTS()
|
||||
#undef x
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
|
||||
struct bch_inode_unpacked *inode)
|
||||
{
|
||||
#define x(_name, _bits) opts->_name = inode_opt_get(c, inode, _name);
|
||||
BCH_INODE_OPTS()
|
||||
#undef x
|
||||
|
||||
if (opts->nocow)
|
||||
opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0;
|
||||
}
|
||||
|
@ -110,17 +110,8 @@ int bch2_inode_find_by_inum_trans(struct btree_trans *, subvol_inum,
|
||||
int bch2_inode_find_by_inum(struct bch_fs *, subvol_inum,
|
||||
struct bch_inode_unpacked *);
|
||||
|
||||
static inline struct bch_io_opts bch2_inode_opts_get(struct bch_inode_unpacked *inode)
|
||||
{
|
||||
struct bch_io_opts ret = { 0 };
|
||||
|
||||
#define x(_name, _bits) \
|
||||
if (inode->bi_##_name) \
|
||||
opt_set(ret, _name, inode->bi_##_name - 1);
|
||||
BCH_INODE_OPTS()
|
||||
#undef x
|
||||
return ret;
|
||||
}
|
||||
#define inode_opt_get(_c, _inode, _name) \
|
||||
((_inode)->bi_##_name ? (_inode)->bi_##_name - 1 : (_c)->opts._name)
|
||||
|
||||
static inline void bch2_inode_opt_set(struct bch_inode_unpacked *inode,
|
||||
enum inode_opt_id id, u64 v)
|
||||
@ -151,17 +142,6 @@ static inline u64 bch2_inode_opt_get(struct bch_inode_unpacked *inode,
|
||||
}
|
||||
}
|
||||
|
||||
static inline struct bch_io_opts
|
||||
io_opts(struct bch_fs *c, struct bch_inode_unpacked *inode)
|
||||
{
|
||||
struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
|
||||
|
||||
bch2_io_opts_apply(&opts, bch2_inode_opts_get(inode));
|
||||
if (opts.nocow)
|
||||
opts.compression = opts.background_compression = opts.data_checksum = opts.erasure_code;
|
||||
return opts;
|
||||
}
|
||||
|
||||
static inline u8 mode_to_type(umode_t mode)
|
||||
{
|
||||
return (mode >> 12) & 15;
|
||||
@ -201,4 +181,8 @@ static inline void bch2_inode_nlink_set(struct bch_inode_unpacked *bi,
|
||||
int bch2_inode_nlink_inc(struct bch_inode_unpacked *);
|
||||
void bch2_inode_nlink_dec(struct btree_trans *, struct bch_inode_unpacked *);
|
||||
|
||||
struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *);
|
||||
void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *,
|
||||
struct bch_inode_unpacked *);
|
||||
|
||||
#endif /* _BCACHEFS_INODE_H */
|
||||
|
120
libbcachefs/io.c
120
libbcachefs/io.c
@ -225,7 +225,7 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
|
||||
(!new_compressed && bch2_bkey_sectors_compressed(old))))
|
||||
*usage_increasing = true;
|
||||
|
||||
if (bkey_cmp(old.k->p, new->k.p) >= 0)
|
||||
if (bkey_ge(old.k->p, new->k.p))
|
||||
break;
|
||||
}
|
||||
|
||||
@ -233,10 +233,10 @@ int bch2_sum_sector_overwrites(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
|
||||
struct btree_iter *extent_iter,
|
||||
u64 new_i_size,
|
||||
s64 i_sectors_delta)
|
||||
static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans,
|
||||
struct btree_iter *extent_iter,
|
||||
u64 new_i_size,
|
||||
s64 i_sectors_delta)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c inode_k;
|
||||
@ -514,7 +514,7 @@ int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
|
||||
bch2_btree_iter_set_snapshot(iter, snapshot);
|
||||
|
||||
k = bch2_btree_iter_peek(iter);
|
||||
if (bkey_cmp(iter->pos, end_pos) >= 0) {
|
||||
if (bkey_ge(iter->pos, end_pos)) {
|
||||
bch2_btree_iter_set_pos(iter, end_pos);
|
||||
break;
|
||||
}
|
||||
@ -608,7 +608,7 @@ static int bch2_write_index_default(struct bch_write_op *op)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (bkey_cmp(iter.pos, k->k.p) >= 0)
|
||||
if (bkey_ge(iter.pos, k->k.p))
|
||||
bch2_keylist_pop_front(&op->insert_keys);
|
||||
else
|
||||
bch2_cut_front(iter.pos, k);
|
||||
@ -745,14 +745,9 @@ static void __bch2_write_index(struct bch_write_op *op)
|
||||
* particularly want to plumb io_opts all the way through the btree
|
||||
* update stack right now
|
||||
*/
|
||||
for_each_keylist_key(keys, k) {
|
||||
for_each_keylist_key(keys, k)
|
||||
bch2_rebalance_add_key(c, bkey_i_to_s_c(k), &op->opts);
|
||||
|
||||
if (bch2_bkey_is_incompressible(bkey_i_to_s_c(k)))
|
||||
bch2_check_set_feature(op->c, BCH_FEATURE_incompressible);
|
||||
|
||||
}
|
||||
|
||||
if (!bch2_keylist_empty(keys)) {
|
||||
u64 sectors_start = keylist_sectors(keys);
|
||||
|
||||
@ -887,7 +882,7 @@ static void init_append_extent(struct bch_write_op *op,
|
||||
crc.nonce)
|
||||
bch2_extent_crc_append(&e->k_i, crc);
|
||||
|
||||
bch2_alloc_sectors_append_ptrs(c, wp, &e->k_i, crc.compressed_size,
|
||||
bch2_alloc_sectors_append_ptrs_inlined(c, wp, &e->k_i, crc.compressed_size,
|
||||
op->flags & BCH_WRITE_CACHED);
|
||||
|
||||
bch2_keylist_push(&op->insert_keys);
|
||||
@ -1371,7 +1366,7 @@ static void bch2_nocow_write_convert_unwritten(struct bch_write_op *op)
|
||||
bkey_start_pos(&orig->k),
|
||||
BTREE_ITER_INTENT, k,
|
||||
NULL, NULL, BTREE_INSERT_NOFAIL, ({
|
||||
if (bkey_cmp(bkey_start_pos(k.k), orig->k.p) >= 0)
|
||||
if (bkey_ge(bkey_start_pos(k.k), orig->k.p))
|
||||
break;
|
||||
|
||||
bch2_nocow_write_convert_one_unwritten(&trans, &iter, orig, k, op->new_i_size);
|
||||
@ -1418,8 +1413,14 @@ static void bch2_nocow_write(struct bch_write_op *op)
|
||||
struct bkey_s_c k;
|
||||
struct bkey_ptrs_c ptrs;
|
||||
const struct bch_extent_ptr *ptr, *ptr2;
|
||||
struct {
|
||||
struct bpos b;
|
||||
unsigned gen;
|
||||
two_state_lock_t *l;
|
||||
} buckets[BCH_REPLICAS_MAX];
|
||||
unsigned nr_buckets = 0;
|
||||
u32 snapshot;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
if (op->flags & BCH_WRITE_MOVE)
|
||||
return;
|
||||
@ -1438,6 +1439,8 @@ retry:
|
||||
while (1) {
|
||||
struct bio *bio = &op->wbio.bio;
|
||||
|
||||
nr_buckets = 0;
|
||||
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
@ -1456,27 +1459,44 @@ retry:
|
||||
|
||||
/* Get iorefs before dropping btree locks: */
|
||||
ptrs = bch2_bkey_ptrs_c(k);
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
buckets[nr_buckets].b = PTR_BUCKET_POS(c, ptr);
|
||||
buckets[nr_buckets].gen = ptr->gen;
|
||||
buckets[nr_buckets].l =
|
||||
bucket_nocow_lock(&c->nocow_locks, buckets[nr_buckets].b);
|
||||
|
||||
prefetch(buckets[nr_buckets].l);
|
||||
nr_buckets++;
|
||||
|
||||
if (unlikely(!bch2_dev_get_ioref(bch_dev_bkey_exists(c, ptr->dev), WRITE)))
|
||||
goto err_get_ioref;
|
||||
|
||||
if (ptr->unwritten)
|
||||
op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
|
||||
}
|
||||
|
||||
/* Unlock before taking nocow locks, doing IO: */
|
||||
bkey_reassemble(op->insert_keys.top, k);
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
bch2_cut_front(op->pos, op->insert_keys.top);
|
||||
bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
|
||||
if (op->flags & BCH_WRITE_CONVERT_UNWRITTEN)
|
||||
bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top);
|
||||
|
||||
ptrs = bch2_bkey_ptrs_c(bkey_i_to_s_c(op->insert_keys.top));
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
bch2_bucket_nocow_lock(&c->nocow_locks,
|
||||
PTR_BUCKET_POS(c, ptr),
|
||||
BUCKET_NOCOW_LOCK_UPDATE);
|
||||
if (unlikely(ptr_stale(bch_dev_bkey_exists(c, ptr->dev), ptr)))
|
||||
for (i = 0; i < nr_buckets; i++) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, buckets[i].b.inode);
|
||||
two_state_lock_t *l = buckets[i].l;
|
||||
bool stale;
|
||||
|
||||
if (!bch2_two_state_trylock(l, BUCKET_NOCOW_LOCK_UPDATE))
|
||||
__bch2_bucket_nocow_lock(&c->nocow_locks, l, BUCKET_NOCOW_LOCK_UPDATE);
|
||||
|
||||
rcu_read_lock();
|
||||
stale = gen_after(*bucket_gen(ca, buckets[i].b.offset), buckets[i].gen);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (unlikely(stale))
|
||||
goto err_bucket_stale;
|
||||
|
||||
if (ptr->unwritten)
|
||||
op->flags |= BCH_WRITE_CONVERT_UNWRITTEN;
|
||||
}
|
||||
|
||||
bio = &op->wbio.bio;
|
||||
@ -1550,13 +1570,10 @@ err_get_ioref:
|
||||
/* Fall back to COW path: */
|
||||
goto out;
|
||||
err_bucket_stale:
|
||||
bkey_for_each_ptr(ptrs, ptr2) {
|
||||
while (--i >= 0)
|
||||
bch2_bucket_nocow_unlock(&c->nocow_locks,
|
||||
PTR_BUCKET_POS(c, ptr2),
|
||||
buckets[i].b,
|
||||
BUCKET_NOCOW_LOCK_UPDATE);
|
||||
if (ptr2 == ptr)
|
||||
break;
|
||||
}
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr2)
|
||||
percpu_ref_put(&bch_dev_bkey_exists(c, ptr2->dev)->io_ref);
|
||||
@ -1620,28 +1637,34 @@ again:
|
||||
BCH_WRITE_ONLY_SPECIFIED_DEVS))
|
||||
? NULL : &op->cl, &wp));
|
||||
if (unlikely(ret)) {
|
||||
if (unlikely(ret != -EAGAIN)) {
|
||||
op->error = ret;
|
||||
op->flags |= BCH_WRITE_DONE;
|
||||
}
|
||||
if (ret == -EAGAIN)
|
||||
break;
|
||||
|
||||
break;
|
||||
goto err;
|
||||
}
|
||||
|
||||
bch2_open_bucket_get(c, wp, &op->open_buckets);
|
||||
ret = bch2_write_extent(op, wp, &bio);
|
||||
|
||||
bch2_alloc_sectors_done(c, wp);
|
||||
if (ret >= 0)
|
||||
bch2_open_bucket_get(c, wp, &op->open_buckets);
|
||||
bch2_alloc_sectors_done_inlined(c, wp);
|
||||
err:
|
||||
if (ret <= 0) {
|
||||
if (!(op->flags & BCH_WRITE_SYNC)) {
|
||||
spin_lock(&wp->writes_lock);
|
||||
op->wp = wp;
|
||||
list_add_tail(&op->wp_list, &wp->writes);
|
||||
spin_unlock(&wp->writes_lock);
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
op->error = ret;
|
||||
op->flags |= BCH_WRITE_DONE;
|
||||
break;
|
||||
|
||||
if (ret < 0) {
|
||||
op->error = ret;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
op->flags |= BCH_WRITE_DONE;
|
||||
|
||||
bio->bi_end_io = bch2_write_endio;
|
||||
bio->bi_private = &op->cl;
|
||||
bio->bi_opf |= REQ_OP_WRITE;
|
||||
@ -1670,11 +1693,6 @@ again:
|
||||
goto again;
|
||||
bch2_write_done(&op->cl);
|
||||
} else {
|
||||
spin_lock(&wp->writes_lock);
|
||||
op->wp = wp;
|
||||
list_add_tail(&op->wp_list, &wp->writes);
|
||||
spin_unlock(&wp->writes_lock);
|
||||
|
||||
continue_at(&op->cl, bch2_write_index, NULL);
|
||||
}
|
||||
out_nofs_restore:
|
||||
@ -1750,7 +1768,7 @@ void bch2_write(struct closure *cl)
|
||||
EBUG_ON(op->cl.parent);
|
||||
BUG_ON(!op->nr_replicas);
|
||||
BUG_ON(!op->write_point.v);
|
||||
BUG_ON(!bkey_cmp(op->pos, POS_MAX));
|
||||
BUG_ON(bkey_eq(op->pos, POS_MAX));
|
||||
|
||||
op->start_time = local_clock();
|
||||
bch2_keylist_init(&op->insert_keys, op->inline_keys);
|
||||
|
@ -36,7 +36,7 @@ void bch2_keylist_add_in_order(struct keylist *l, struct bkey_i *insert)
|
||||
struct bkey_i *where;
|
||||
|
||||
for_each_keylist_key(l, where)
|
||||
if (bkey_cmp(insert->k.p, where->k.p) < 0)
|
||||
if (bpos_lt(insert->k.p, where->k.p))
|
||||
break;
|
||||
|
||||
memmove_u64s_up((u64 *) where + insert->k.u64s,
|
||||
@ -63,6 +63,6 @@ void bch2_verify_keylist_sorted(struct keylist *l)
|
||||
|
||||
for_each_keylist_key(l, k)
|
||||
BUG_ON(bkey_next(k) != l->top &&
|
||||
bpos_cmp(k->k.p, bkey_next(k)->k.p) >= 0);
|
||||
bpos_ge(k->k.p, bkey_next(k)->k.p));
|
||||
}
|
||||
#endif
|
||||
|
@ -361,7 +361,7 @@ static int lookup_inode(struct btree_trans *trans, struct bpos pos,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!k.k || bkey_cmp(k.k->p, pos)) {
|
||||
if (!k.k || !bkey_eq(k.k->p, pos)) {
|
||||
ret = -ENOENT;
|
||||
goto err;
|
||||
}
|
||||
@ -434,8 +434,6 @@ static int move_get_io_opts(struct btree_trans *trans,
|
||||
if (*cur_inum == k.k->p.inode)
|
||||
return 0;
|
||||
|
||||
*io_opts = bch2_opts_to_inode_opts(trans->c->opts);
|
||||
|
||||
ret = lookup_inode(trans,
|
||||
SPOS(0, k.k->p.inode, k.k->p.snapshot),
|
||||
&inode);
|
||||
@ -443,8 +441,9 @@ static int move_get_io_opts(struct btree_trans *trans,
|
||||
return ret;
|
||||
|
||||
if (!ret)
|
||||
bch2_io_opts_apply(io_opts, bch2_inode_opts_get(&inode));
|
||||
|
||||
bch2_inode_opts_get(io_opts, trans->c, &inode);
|
||||
else
|
||||
*io_opts = bch2_opts_to_inode_opts(trans->c->opts);
|
||||
*cur_inum = k.k->p.inode;
|
||||
return 0;
|
||||
}
|
||||
@ -492,7 +491,7 @@ static int __bch2_move_data(struct moving_context *ctxt,
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (bkey_cmp(bkey_start_pos(k.k), end) >= 0)
|
||||
if (bkey_ge(bkey_start_pos(k.k), end))
|
||||
break;
|
||||
|
||||
ctxt->stats->pos = iter.pos;
|
||||
|
@ -5,12 +5,11 @@
|
||||
#include "util.h"
|
||||
|
||||
void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
|
||||
struct bpos bucket, int flags)
|
||||
two_state_lock_t *l, int flags)
|
||||
{
|
||||
struct bch_fs *c = container_of(t, struct bch_fs, nocow_locks);
|
||||
two_state_lock_t *l = bucket_nocow_lock(t, bucket);
|
||||
u64 start_time = local_clock();
|
||||
|
||||
bch2_two_state_lock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
|
||||
__bch2_two_state_lock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
|
||||
bch2_time_stats_update(&c->times[BCH_TIME_nocow_lock_contended], start_time);
|
||||
}
|
||||
|
@ -5,12 +5,12 @@
|
||||
#include "bcachefs_format.h"
|
||||
#include "two_state_shared_lock.h"
|
||||
|
||||
#include <linux/siphash.h>
|
||||
#include <linux/hash.h>
|
||||
|
||||
#define BUCKET_NOCOW_LOCKS (1U << 10)
|
||||
#define BUCKET_NOCOW_LOCKS_BITS 10
|
||||
#define BUCKET_NOCOW_LOCKS (1U << BUCKET_NOCOW_LOCKS_BITS)
|
||||
|
||||
struct bucket_nocow_lock_table {
|
||||
siphash_key_t key;
|
||||
two_state_lock_t l[BUCKET_NOCOW_LOCKS];
|
||||
};
|
||||
|
||||
@ -20,7 +20,7 @@ static inline two_state_lock_t *bucket_nocow_lock(struct bucket_nocow_lock_table
|
||||
struct bpos bucket)
|
||||
{
|
||||
u64 dev_bucket = bucket.inode << 56 | bucket.offset;
|
||||
unsigned h = siphash_1u64(dev_bucket, &t->key);
|
||||
unsigned h = hash_64(dev_bucket, BUCKET_NOCOW_LOCKS_BITS);
|
||||
|
||||
return t->l + (h & (BUCKET_NOCOW_LOCKS - 1));
|
||||
}
|
||||
@ -41,7 +41,7 @@ static inline void bch2_bucket_nocow_unlock(struct bucket_nocow_lock_table *t,
|
||||
bch2_two_state_unlock(l, flags & BUCKET_NOCOW_LOCK_UPDATE);
|
||||
}
|
||||
|
||||
void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *, struct bpos, int);
|
||||
void __bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *, two_state_lock_t *, int);
|
||||
|
||||
static inline void bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
|
||||
struct bpos bucket, int flags)
|
||||
@ -49,7 +49,7 @@ static inline void bch2_bucket_nocow_lock(struct bucket_nocow_lock_table *t,
|
||||
two_state_lock_t *l = bucket_nocow_lock(t, bucket);
|
||||
|
||||
if (!bch2_two_state_trylock(l, flags & BUCKET_NOCOW_LOCK_UPDATE))
|
||||
__bch2_bucket_nocow_lock(t, bucket, flags);
|
||||
__bch2_bucket_nocow_lock(t, l, flags);
|
||||
}
|
||||
|
||||
#endif /* _BCACHEFS_NOCOW_LOCKING_H */
|
||||
|
@ -532,33 +532,11 @@ void bch2_opt_set_sb(struct bch_fs *c, const struct bch_option *opt, u64 v)
|
||||
|
||||
struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts src)
|
||||
{
|
||||
struct bch_io_opts ret = { 0 };
|
||||
#define x(_name, _bits) \
|
||||
if (opt_defined(src, _name)) \
|
||||
opt_set(ret, _name, src._name);
|
||||
BCH_INODE_OPTS()
|
||||
#undef x
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct bch_opts bch2_inode_opts_to_opts(struct bch_io_opts src)
|
||||
{
|
||||
struct bch_opts ret = { 0 };
|
||||
#define x(_name, _bits) \
|
||||
if (opt_defined(src, _name)) \
|
||||
opt_set(ret, _name, src._name);
|
||||
BCH_INODE_OPTS()
|
||||
#undef x
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_io_opts_apply(struct bch_io_opts *dst, struct bch_io_opts src)
|
||||
{
|
||||
#define x(_name, _bits) \
|
||||
if (opt_defined(src, _name)) \
|
||||
opt_set(*dst, _name, src._name);
|
||||
return (struct bch_io_opts) {
|
||||
#define x(_name, _bits) ._name = src._name,
|
||||
BCH_INODE_OPTS()
|
||||
#undef x
|
||||
};
|
||||
}
|
||||
|
||||
bool bch2_opt_is_inode_opt(enum bch_opt_id id)
|
||||
|
@ -499,18 +499,12 @@ int bch2_parse_mount_opts(struct bch_fs *, struct bch_opts *, char *);
|
||||
/* inode opts: */
|
||||
|
||||
struct bch_io_opts {
|
||||
#define x(_name, _bits) unsigned _name##_defined:1;
|
||||
BCH_INODE_OPTS()
|
||||
#undef x
|
||||
|
||||
#define x(_name, _bits) u##_bits _name;
|
||||
BCH_INODE_OPTS()
|
||||
#undef x
|
||||
};
|
||||
|
||||
struct bch_io_opts bch2_opts_to_inode_opts(struct bch_opts);
|
||||
struct bch_opts bch2_inode_opts_to_opts(struct bch_io_opts);
|
||||
void bch2_io_opts_apply(struct bch_io_opts *, struct bch_io_opts);
|
||||
bool bch2_opt_is_inode_opt(enum bch_opt_id);
|
||||
|
||||
#endif /* _BCACHEFS_OPTS_H */
|
||||
|
@ -133,9 +133,8 @@ search:
|
||||
(k = idx_to_key(keys, *idx),
|
||||
k->btree_id == btree_id &&
|
||||
k->level == level &&
|
||||
bpos_cmp(k->k->k.p, end_pos) <= 0)) {
|
||||
if (bpos_cmp(k->k->k.p, pos) >= 0 &&
|
||||
!k->overwritten)
|
||||
bpos_le(k->k->k.p, end_pos))) {
|
||||
if (bpos_ge(k->k->k.p, pos) && !k->overwritten)
|
||||
return k->k;
|
||||
|
||||
(*idx)++;
|
||||
@ -296,7 +295,7 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
|
||||
if (idx < keys->size &&
|
||||
keys->d[idx].btree_id == btree &&
|
||||
keys->d[idx].level == level &&
|
||||
!bpos_cmp(keys->d[idx].k->k.p, pos))
|
||||
bpos_eq(keys->d[idx].k->k.p, pos))
|
||||
keys->d[idx].overwritten = true;
|
||||
}
|
||||
|
||||
@ -355,7 +354,7 @@ static void bch2_journal_iter_advance_btree(struct btree_and_journal_iter *iter)
|
||||
|
||||
void bch2_btree_and_journal_iter_advance(struct btree_and_journal_iter *iter)
|
||||
{
|
||||
if (!bpos_cmp(iter->pos, SPOS_MAX))
|
||||
if (bpos_eq(iter->pos, SPOS_MAX))
|
||||
iter->at_end = true;
|
||||
else
|
||||
iter->pos = bpos_successor(iter->pos);
|
||||
@ -369,19 +368,19 @@ again:
|
||||
return bkey_s_c_null;
|
||||
|
||||
while ((btree_k = bch2_journal_iter_peek_btree(iter)).k &&
|
||||
bpos_cmp(btree_k.k->p, iter->pos) < 0)
|
||||
bpos_lt(btree_k.k->p, iter->pos))
|
||||
bch2_journal_iter_advance_btree(iter);
|
||||
|
||||
while ((journal_k = bch2_journal_iter_peek(&iter->journal)).k &&
|
||||
bpos_cmp(journal_k.k->p, iter->pos) < 0)
|
||||
bpos_lt(journal_k.k->p, iter->pos))
|
||||
bch2_journal_iter_advance(&iter->journal);
|
||||
|
||||
ret = journal_k.k &&
|
||||
(!btree_k.k || bpos_cmp(journal_k.k->p, btree_k.k->p) <= 0)
|
||||
(!btree_k.k || bpos_le(journal_k.k->p, btree_k.k->p))
|
||||
? journal_k
|
||||
: btree_k;
|
||||
|
||||
if (ret.k && iter->b && bpos_cmp(ret.k->p, iter->b->data->max_key) > 0)
|
||||
if (ret.k && iter->b && bpos_gt(ret.k->p, iter->b->data->max_key))
|
||||
ret = bkey_s_c_null;
|
||||
|
||||
if (ret.k) {
|
||||
@ -529,7 +528,7 @@ static int journal_keys_sort(struct bch_fs *c)
|
||||
while (src + 1 < keys->d + keys->nr &&
|
||||
src[0].btree_id == src[1].btree_id &&
|
||||
src[0].level == src[1].level &&
|
||||
!bpos_cmp(src[0].k->k.p, src[1].k->k.p))
|
||||
bpos_eq(src[0].k->k.p, src[1].k->k.p))
|
||||
src++;
|
||||
|
||||
*dst++ = *src++;
|
||||
|
@ -252,7 +252,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
|
||||
int ret;
|
||||
|
||||
for_each_btree_key_continue_norestart(*iter, 0, k, ret) {
|
||||
if (bkey_cmp(iter->pos, end) >= 0)
|
||||
if (bkey_ge(iter->pos, end))
|
||||
break;
|
||||
|
||||
if (bkey_extent_is_unwritten(k))
|
||||
@ -262,7 +262,7 @@ static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
|
||||
return k;
|
||||
}
|
||||
|
||||
if (bkey_cmp(iter->pos, end) >= 0)
|
||||
if (bkey_ge(iter->pos, end))
|
||||
bch2_btree_iter_set_pos(iter, end);
|
||||
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
|
||||
}
|
||||
@ -304,7 +304,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
|
||||
while ((ret == 0 ||
|
||||
bch2_err_matches(ret, BCH_ERR_transaction_restart)) &&
|
||||
bkey_cmp(dst_iter.pos, dst_end) < 0) {
|
||||
bkey_lt(dst_iter.pos, dst_end)) {
|
||||
struct disk_reservation disk_res = { 0 };
|
||||
|
||||
bch2_trans_begin(&trans);
|
||||
@ -337,7 +337,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
if (bkey_cmp(src_want, src_iter.pos) < 0) {
|
||||
if (bkey_lt(src_want, src_iter.pos)) {
|
||||
ret = bch2_fpunch_at(&trans, &dst_iter, dst_inum,
|
||||
min(dst_end.offset,
|
||||
dst_iter.pos.offset +
|
||||
@ -389,8 +389,8 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
bch2_trans_iter_exit(&trans, &dst_iter);
|
||||
bch2_trans_iter_exit(&trans, &src_iter);
|
||||
|
||||
BUG_ON(!ret && bkey_cmp(dst_iter.pos, dst_end));
|
||||
BUG_ON(bkey_cmp(dst_iter.pos, dst_end) > 0);
|
||||
BUG_ON(!ret && !bkey_eq(dst_iter.pos, dst_end));
|
||||
BUG_ON(bkey_gt(dst_iter.pos, dst_end));
|
||||
|
||||
dst_done = dst_iter.pos.offset - dst_start.offset;
|
||||
new_i_size = min(dst_iter.pos.offset << 9, new_i_size);
|
||||
|
@ -30,8 +30,8 @@ int bch2_snapshot_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
||||
struct bkey_s_c_snapshot s;
|
||||
u32 i, id;
|
||||
|
||||
if (bkey_cmp(k.k->p, POS(0, U32_MAX)) > 0 ||
|
||||
bkey_cmp(k.k->p, POS(0, 1)) < 0) {
|
||||
if (bkey_gt(k.k->p, POS(0, U32_MAX)) ||
|
||||
bkey_lt(k.k->p, POS(0, 1))) {
|
||||
prt_printf(err, "bad pos");
|
||||
return -BCH_ERR_invalid_bkey;
|
||||
}
|
||||
@ -592,7 +592,7 @@ static int snapshot_delete_key(struct btree_trans *trans,
|
||||
struct bch_fs *c = trans->c;
|
||||
u32 equiv = snapshot_t(c, k.k->p.snapshot)->equiv;
|
||||
|
||||
if (bkey_cmp(k.k->p, *last_pos))
|
||||
if (!bkey_eq(k.k->p, *last_pos))
|
||||
equiv_seen->nr = 0;
|
||||
*last_pos = k.k->p;
|
||||
|
||||
@ -770,8 +770,8 @@ static int bch2_delete_dead_snapshots_hook(struct btree_trans *trans,
|
||||
int bch2_subvolume_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
||||
int rw, struct printbuf *err)
|
||||
{
|
||||
if (bkey_cmp(k.k->p, SUBVOL_POS_MIN) < 0 ||
|
||||
bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0) {
|
||||
if (bkey_lt(k.k->p, SUBVOL_POS_MIN) ||
|
||||
bkey_gt(k.k->p, SUBVOL_POS_MAX)) {
|
||||
prt_printf(err, "invalid pos");
|
||||
return -BCH_ERR_invalid_bkey;
|
||||
}
|
||||
@ -795,10 +795,11 @@ void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
le32_to_cpu(s.v->snapshot));
|
||||
}
|
||||
|
||||
int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
|
||||
bool inconsistent_if_not_found,
|
||||
int iter_flags,
|
||||
struct bch_subvolume *s)
|
||||
static __always_inline int
|
||||
bch2_subvolume_get_inlined(struct btree_trans *trans, unsigned subvol,
|
||||
bool inconsistent_if_not_found,
|
||||
int iter_flags,
|
||||
struct bch_subvolume *s)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
@ -818,6 +819,14 @@ int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
|
||||
bool inconsistent_if_not_found,
|
||||
int iter_flags,
|
||||
struct bch_subvolume *s)
|
||||
{
|
||||
return bch2_subvolume_get_inlined(trans, subvol, inconsistent_if_not_found, iter_flags, s);
|
||||
}
|
||||
|
||||
int bch2_snapshot_get_subvol(struct btree_trans *trans, u32 snapshot,
|
||||
struct bch_subvolume *subvol)
|
||||
{
|
||||
@ -833,12 +842,12 @@ int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
|
||||
struct bch_subvolume s;
|
||||
int ret;
|
||||
|
||||
ret = bch2_subvolume_get(trans, subvol, true,
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_WITH_UPDATES,
|
||||
&s);
|
||||
|
||||
*snapid = le32_to_cpu(s.snapshot);
|
||||
ret = bch2_subvolume_get_inlined(trans, subvol, true,
|
||||
BTREE_ITER_CACHED|
|
||||
BTREE_ITER_WITH_UPDATES,
|
||||
&s);
|
||||
if (!ret)
|
||||
*snapid = le32_to_cpu(s.snapshot);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1019,7 +1028,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
|
||||
|
||||
for_each_btree_key(trans, dst_iter, BTREE_ID_subvolumes, SUBVOL_POS_MIN,
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k, ret) {
|
||||
if (bkey_cmp(k.k->p, SUBVOL_POS_MAX) > 0)
|
||||
if (bkey_gt(k.k->p, SUBVOL_POS_MAX))
|
||||
break;
|
||||
|
||||
/*
|
||||
|
@ -2,32 +2,7 @@
|
||||
|
||||
#include "two_state_shared_lock.h"
|
||||
|
||||
void bch2_two_state_unlock(two_state_lock_t *lock, int s)
|
||||
void __bch2_two_state_lock(two_state_lock_t *lock, int s)
|
||||
{
|
||||
long i = s ? 1 : -1;
|
||||
|
||||
BUG_ON(atomic_long_read(&lock->v) == 0);
|
||||
|
||||
if (atomic_long_sub_return_release(i, &lock->v) == 0)
|
||||
wake_up_all(&lock->wait);
|
||||
}
|
||||
|
||||
bool bch2_two_state_trylock(two_state_lock_t *lock, int s)
|
||||
{
|
||||
long i = s ? 1 : -1;
|
||||
long v = atomic_long_read(&lock->v), old;
|
||||
|
||||
do {
|
||||
old = v;
|
||||
|
||||
if (i > 0 ? v < 0 : v > 0)
|
||||
return false;
|
||||
} while ((v = atomic_long_cmpxchg_acquire(&lock->v,
|
||||
old, old + i)) != old);
|
||||
return true;
|
||||
}
|
||||
|
||||
void bch2_two_state_lock(two_state_lock_t *lock, int s)
|
||||
{
|
||||
wait_event(lock->wait, bch2_two_state_trylock(lock, s));
|
||||
__wait_event(lock->wait, bch2_two_state_trylock(lock, s));
|
||||
}
|
||||
|
@ -6,6 +6,8 @@
|
||||
#include <linux/sched.h>
|
||||
#include <linux/wait.h>
|
||||
|
||||
#include "util.h"
|
||||
|
||||
/*
|
||||
* Two-state lock - can be taken for add or block - both states are shared,
|
||||
* like read side of rwsem, but conflict with other state:
|
||||
@ -21,8 +23,37 @@ static inline void two_state_lock_init(two_state_lock_t *lock)
|
||||
init_waitqueue_head(&lock->wait);
|
||||
}
|
||||
|
||||
void bch2_two_state_unlock(two_state_lock_t *, int);
|
||||
bool bch2_two_state_trylock(two_state_lock_t *, int);
|
||||
void bch2_two_state_lock(two_state_lock_t *, int);
|
||||
static inline void bch2_two_state_unlock(two_state_lock_t *lock, int s)
|
||||
{
|
||||
long i = s ? 1 : -1;
|
||||
|
||||
EBUG_ON(atomic_long_read(&lock->v) == 0);
|
||||
|
||||
if (atomic_long_sub_return_release(i, &lock->v) == 0)
|
||||
wake_up_all(&lock->wait);
|
||||
}
|
||||
|
||||
static inline bool bch2_two_state_trylock(two_state_lock_t *lock, int s)
|
||||
{
|
||||
long i = s ? 1 : -1;
|
||||
long v = atomic_long_read(&lock->v), old;
|
||||
|
||||
do {
|
||||
old = v;
|
||||
|
||||
if (i > 0 ? v < 0 : v > 0)
|
||||
return false;
|
||||
} while ((v = atomic_long_cmpxchg_acquire(&lock->v,
|
||||
old, old + i)) != old);
|
||||
return true;
|
||||
}
|
||||
|
||||
void __bch2_two_state_lock(two_state_lock_t *, int);
|
||||
|
||||
static inline void bch2_two_state_lock(two_state_lock_t *lock, int s)
|
||||
{
|
||||
if (!bch2_two_state_trylock(lock, s))
|
||||
__bch2_two_state_lock(lock, s);
|
||||
}
|
||||
|
||||
#endif /* _BCACHEFS_TWO_STATE_LOCK_H */
|
||||
|
@ -319,14 +319,14 @@ int bch2_prt_backtrace(struct printbuf *out, struct task_struct *task)
|
||||
|
||||
/* time stats: */
|
||||
|
||||
static void bch2_time_stats_update_one(struct time_stats *stats,
|
||||
u64 start, u64 end)
|
||||
static inline void bch2_time_stats_update_one(struct time_stats *stats,
|
||||
u64 start, u64 end)
|
||||
{
|
||||
u64 duration, freq;
|
||||
|
||||
if (time_after64(end, start)) {
|
||||
duration = end - start;
|
||||
stats->duration_stats = mean_and_variance_update(stats->duration_stats,
|
||||
stats->duration_stats = mean_and_variance_update_inlined(stats->duration_stats,
|
||||
duration);
|
||||
stats->duration_stats_weighted = mean_and_variance_weighted_update(
|
||||
stats->duration_stats_weighted,
|
||||
@ -338,7 +338,7 @@ static void bch2_time_stats_update_one(struct time_stats *stats,
|
||||
|
||||
if (time_after64(end, stats->last_event)) {
|
||||
freq = end - stats->last_event;
|
||||
stats->freq_stats = mean_and_variance_update(stats->freq_stats, freq);
|
||||
stats->freq_stats = mean_and_variance_update_inlined(stats->freq_stats, freq);
|
||||
stats->freq_stats_weighted = mean_and_variance_weighted_update(
|
||||
stats->freq_stats_weighted,
|
||||
freq);
|
||||
@ -348,6 +348,22 @@ static void bch2_time_stats_update_one(struct time_stats *stats,
|
||||
}
|
||||
}
|
||||
|
||||
static noinline void bch2_time_stats_clear_buffer(struct time_stats *stats,
|
||||
struct time_stat_buffer *b)
|
||||
{
|
||||
struct time_stat_buffer_entry *i;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&stats->lock, flags);
|
||||
for (i = b->entries;
|
||||
i < b->entries + ARRAY_SIZE(b->entries);
|
||||
i++)
|
||||
bch2_time_stats_update_one(stats, i->start, i->end);
|
||||
spin_unlock_irqrestore(&stats->lock, flags);
|
||||
|
||||
b->nr = 0;
|
||||
}
|
||||
|
||||
void __bch2_time_stats_update(struct time_stats *stats, u64 start, u64 end)
|
||||
{
|
||||
unsigned long flags;
|
||||
@ -367,7 +383,6 @@ void __bch2_time_stats_update(struct time_stats *stats, u64 start, u64 end)
|
||||
GFP_ATOMIC);
|
||||
spin_unlock_irqrestore(&stats->lock, flags);
|
||||
} else {
|
||||
struct time_stat_buffer_entry *i;
|
||||
struct time_stat_buffer *b;
|
||||
|
||||
preempt_disable();
|
||||
@ -379,17 +394,8 @@ void __bch2_time_stats_update(struct time_stats *stats, u64 start, u64 end)
|
||||
.end = end
|
||||
};
|
||||
|
||||
if (b->nr == ARRAY_SIZE(b->entries)) {
|
||||
spin_lock_irqsave(&stats->lock, flags);
|
||||
for (i = b->entries;
|
||||
i < b->entries + ARRAY_SIZE(b->entries);
|
||||
i++)
|
||||
bch2_time_stats_update_one(stats, i->start, i->end);
|
||||
spin_unlock_irqrestore(&stats->lock, flags);
|
||||
|
||||
b->nr = 0;
|
||||
}
|
||||
|
||||
if (unlikely(b->nr == ARRAY_SIZE(b->entries)))
|
||||
bch2_time_stats_clear_buffer(stats, b);
|
||||
preempt_enable();
|
||||
}
|
||||
}
|
||||
|
@ -582,6 +582,20 @@ static inline void memmove_u64s_down(void *dst, const void *src,
|
||||
__memmove_u64s_down(dst, src, u64s);
|
||||
}
|
||||
|
||||
static inline void __memmove_u64s_down_small(void *dst, const void *src,
|
||||
unsigned u64s)
|
||||
{
|
||||
memcpy_u64s_small(dst, src, u64s);
|
||||
}
|
||||
|
||||
static inline void memmove_u64s_down_small(void *dst, const void *src,
|
||||
unsigned u64s)
|
||||
{
|
||||
EBUG_ON(dst > src);
|
||||
|
||||
__memmove_u64s_down_small(dst, src, u64s);
|
||||
}
|
||||
|
||||
static inline void __memmove_u64s_up_small(void *_dst, const void *_src,
|
||||
unsigned u64s)
|
||||
{
|
||||
|
@ -444,7 +444,7 @@ static int __bch2_xattr_bcachefs_get(const struct xattr_handler *handler,
|
||||
struct bch_inode_info *inode = to_bch_ei(vinode);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch_opts opts =
|
||||
bch2_inode_opts_to_opts(bch2_inode_opts_get(&inode->ei_inode));
|
||||
bch2_inode_opts_to_opts(&inode->ei_inode);
|
||||
const struct bch_option *opt;
|
||||
int id, inode_opt_id;
|
||||
struct printbuf out = PRINTBUF;
|
||||
|
@ -67,13 +67,7 @@ s64 fast_divpow2(s64 n, u8 d)
|
||||
*/
|
||||
struct mean_and_variance mean_and_variance_update(struct mean_and_variance s1, s64 v1)
|
||||
{
|
||||
struct mean_and_variance s2;
|
||||
u64 v2 = abs(v1);
|
||||
|
||||
s2.n = s1.n + 1;
|
||||
s2.sum = s1.sum + v1;
|
||||
s2.sum_squares = u128_add(s1.sum_squares, u128_square(v2));
|
||||
return s2;
|
||||
return mean_and_variance_update_inlined(s1, v1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mean_and_variance_update);
|
||||
|
||||
@ -120,29 +114,7 @@ EXPORT_SYMBOL_GPL(mean_and_variance_get_stddev);
|
||||
struct mean_and_variance_weighted mean_and_variance_weighted_update(struct mean_and_variance_weighted s1,
|
||||
s64 x)
|
||||
{
|
||||
struct mean_and_variance_weighted s2;
|
||||
// previous weighted variance.
|
||||
u64 var_w0 = s1.variance;
|
||||
u8 w = s2.w = s1.w;
|
||||
// new value weighted.
|
||||
s64 x_w = x << w;
|
||||
s64 diff_w = x_w - s1.mean;
|
||||
s64 diff = fast_divpow2(diff_w, w);
|
||||
// new mean weighted.
|
||||
s64 u_w1 = s1.mean + diff;
|
||||
|
||||
BUG_ON(w % 2 != 0);
|
||||
|
||||
if (!s1.init) {
|
||||
s2.mean = x_w;
|
||||
s2.variance = 0;
|
||||
} else {
|
||||
s2.mean = u_w1;
|
||||
s2.variance = ((var_w0 << w) - var_w0 + ((diff_w * (x_w - u_w1)) >> w)) >> w;
|
||||
}
|
||||
s2.init = true;
|
||||
|
||||
return s2;
|
||||
return mean_and_variance_weighted_update_inlined(s1, x);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(mean_and_variance_weighted_update);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user