mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to 33a60d9b05 bcachefs: Assorted fixes for clang
This commit is contained in:
parent
2d7982de78
commit
87179c7a6e
@ -1 +1 @@
|
|||||||
4b5917839c4b279b303133b87cd94cc1a352a0e6
|
33a60d9b05f523be93973b25e0df1ab2d65fa4fc
|
||||||
|
@ -13,6 +13,9 @@
|
|||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/math.h>
|
#include <linux/math.h>
|
||||||
|
|
||||||
|
#define BIT(nr) (1UL << (nr))
|
||||||
|
#define BIT_ULL(nr) (1ULL << (nr))
|
||||||
|
|
||||||
#define __ARG_PLACEHOLDER_1 0,
|
#define __ARG_PLACEHOLDER_1 0,
|
||||||
#define __take_second_arg(__ignored, val, ...) val
|
#define __take_second_arg(__ignored, val, ...) val
|
||||||
|
|
||||||
|
@ -153,4 +153,19 @@ static inline u32 int_sqrt64(u64 x)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#define abs(x) __abs_choose_expr(x, long long, \
|
||||||
|
__abs_choose_expr(x, long, \
|
||||||
|
__abs_choose_expr(x, int, \
|
||||||
|
__abs_choose_expr(x, short, \
|
||||||
|
__abs_choose_expr(x, char, \
|
||||||
|
__builtin_choose_expr( \
|
||||||
|
__builtin_types_compatible_p(typeof(x), char), \
|
||||||
|
(char)({ signed char __x = (x); __x<0?-__x:__x; }), \
|
||||||
|
((void)0)))))))
|
||||||
|
|
||||||
|
#define __abs_choose_expr(x, type, other) __builtin_choose_expr( \
|
||||||
|
__builtin_types_compatible_p(typeof(x), signed type) || \
|
||||||
|
__builtin_types_compatible_p(typeof(x), unsigned type), \
|
||||||
|
({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
|
||||||
|
|
||||||
#endif /* _LINUX_MATH_H */
|
#endif /* _LINUX_MATH_H */
|
||||||
|
@ -27,9 +27,6 @@
|
|||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
#include <linux/bit_spinlock.h>
|
#include <linux/bit_spinlock.h>
|
||||||
|
|
||||||
#define BIT(nr) (1UL << (nr))
|
|
||||||
#define BIT_ULL(nr) (1ULL << (nr))
|
|
||||||
|
|
||||||
#include <linux/rhashtable-types.h>
|
#include <linux/rhashtable-types.h>
|
||||||
/*
|
/*
|
||||||
* Objects in an rhashtable have an embedded struct rhash_head
|
* Objects in an rhashtable have an embedded struct rhash_head
|
||||||
|
@ -80,9 +80,15 @@ static inline void *krealloc_array(void *p, size_t new_n, size_t new_size, gfp_t
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define kzalloc(size, flags) kmalloc(size, flags|__GFP_ZERO)
|
#define kzalloc(size, flags) kmalloc(size, flags|__GFP_ZERO)
|
||||||
#define kmalloc_array(n, size, flags) \
|
|
||||||
((size) != 0 && (n) > SIZE_MAX / (size) \
|
static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
|
||||||
? NULL : kmalloc((n) * (size), flags))
|
{
|
||||||
|
size_t bytes;
|
||||||
|
|
||||||
|
if (unlikely(check_mul_overflow(n, size, &bytes)))
|
||||||
|
return NULL;
|
||||||
|
return kmalloc(bytes, flags);
|
||||||
|
}
|
||||||
|
|
||||||
#define kvmalloc_array(n, size, flags) \
|
#define kvmalloc_array(n, size, flags) \
|
||||||
((size) != 0 && (n) > SIZE_MAX / (size) \
|
((size) != 0 && (n) > SIZE_MAX / (size) \
|
||||||
|
@ -79,36 +79,6 @@ static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
|
|||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void alloc_field_v1_put(struct bkey_i_alloc *a, void **p,
|
|
||||||
unsigned field, u64 v)
|
|
||||||
{
|
|
||||||
unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
|
|
||||||
|
|
||||||
if (!v)
|
|
||||||
return;
|
|
||||||
|
|
||||||
a->v.fields |= 1 << field;
|
|
||||||
|
|
||||||
switch (bytes) {
|
|
||||||
case 1:
|
|
||||||
*((u8 *) *p) = v;
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
*((__le16 *) *p) = cpu_to_le16(v);
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
*((__le32 *) *p) = cpu_to_le32(v);
|
|
||||||
break;
|
|
||||||
case 8:
|
|
||||||
*((__le64 *) *p) = cpu_to_le64(v);
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
BUG();
|
|
||||||
}
|
|
||||||
|
|
||||||
*p += bytes;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
|
static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
|
||||||
struct bkey_s_c k)
|
struct bkey_s_c k)
|
||||||
{
|
{
|
||||||
@ -267,7 +237,8 @@ int bch2_alloc_v3_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
|||||||
}
|
}
|
||||||
|
|
||||||
int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
int bch2_alloc_v4_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
||||||
unsigned flags, struct printbuf *err)
|
enum bkey_invalid_flags flags,
|
||||||
|
struct printbuf *err)
|
||||||
{
|
{
|
||||||
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
|
struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
|
||||||
int rw = flags & WRITE;
|
int rw = flags & WRITE;
|
||||||
@ -1334,7 +1305,7 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans,
|
|||||||
struct btree_iter *iter,
|
struct btree_iter *iter,
|
||||||
struct bpos end)
|
struct bpos end)
|
||||||
{
|
{
|
||||||
if (!btree_node_type_is_extents(iter->btree_id)) {
|
if (!btree_id_is_extents(iter->btree_id)) {
|
||||||
return __bch2_check_discard_freespace_key(trans, iter);
|
return __bch2_check_discard_freespace_key(trans, iter);
|
||||||
} else {
|
} else {
|
||||||
int ret;
|
int ret;
|
||||||
|
@ -1042,8 +1042,12 @@ static bool should_drop_bucket(struct open_bucket *ob, struct bch_fs *c,
|
|||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
if (!drop && ob->ec) {
|
if (!drop && ob->ec) {
|
||||||
|
unsigned nr_blocks;
|
||||||
|
|
||||||
mutex_lock(&ob->ec->lock);
|
mutex_lock(&ob->ec->lock);
|
||||||
for (i = 0; i < ob->ec->new_stripe.key.v.nr_blocks; i++) {
|
nr_blocks = bkey_i_to_stripe(&ob->ec->new_stripe.key)->v.nr_blocks;
|
||||||
|
|
||||||
|
for (i = 0; i < nr_blocks; i++) {
|
||||||
if (!ob->ec->blocks[i])
|
if (!ob->ec->blocks[i])
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
@ -32,7 +32,7 @@ enum bch_watermark {
|
|||||||
};
|
};
|
||||||
|
|
||||||
#define BCH_WATERMARK_BITS 3
|
#define BCH_WATERMARK_BITS 3
|
||||||
#define BCH_WATERMARK_MASK ~(~0 << BCH_WATERMARK_BITS)
|
#define BCH_WATERMARK_MASK ~(~0U << BCH_WATERMARK_BITS)
|
||||||
|
|
||||||
#define OPEN_BUCKETS_COUNT 1024
|
#define OPEN_BUCKETS_COUNT 1024
|
||||||
|
|
||||||
|
@ -2138,7 +2138,7 @@ struct jset_entry_dev_usage {
|
|||||||
__le64 _buckets_unavailable; /* No longer used */
|
__le64 _buckets_unavailable; /* No longer used */
|
||||||
|
|
||||||
struct jset_entry_dev_usage_type d[];
|
struct jset_entry_dev_usage_type d[];
|
||||||
} __packed;
|
};
|
||||||
|
|
||||||
static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
|
static inline unsigned jset_entry_dev_usage_nr_types(struct jset_entry_dev_usage *u)
|
||||||
{
|
{
|
||||||
@ -2194,26 +2194,67 @@ LE32_BITMASK(JSET_NO_FLUSH, struct jset, flags, 5, 6);
|
|||||||
|
|
||||||
/* Btree: */
|
/* Btree: */
|
||||||
|
|
||||||
|
enum btree_id_flags {
|
||||||
|
BTREE_ID_EXTENTS = BIT(0),
|
||||||
|
BTREE_ID_SNAPSHOTS = BIT(1),
|
||||||
|
BTREE_ID_DATA = BIT(2),
|
||||||
|
};
|
||||||
|
|
||||||
#define BCH_BTREE_IDS() \
|
#define BCH_BTREE_IDS() \
|
||||||
x(extents, 0) \
|
x(extents, 0, BTREE_ID_EXTENTS|BTREE_ID_SNAPSHOTS|BTREE_ID_DATA,\
|
||||||
x(inodes, 1) \
|
BIT_ULL(KEY_TYPE_whiteout)| \
|
||||||
x(dirents, 2) \
|
BIT_ULL(KEY_TYPE_error)| \
|
||||||
x(xattrs, 3) \
|
BIT_ULL(KEY_TYPE_cookie)| \
|
||||||
x(alloc, 4) \
|
BIT_ULL(KEY_TYPE_extent)| \
|
||||||
x(quotas, 5) \
|
BIT_ULL(KEY_TYPE_reservation)| \
|
||||||
x(stripes, 6) \
|
BIT_ULL(KEY_TYPE_reflink_p)| \
|
||||||
x(reflink, 7) \
|
BIT_ULL(KEY_TYPE_inline_data)) \
|
||||||
x(subvolumes, 8) \
|
x(inodes, 1, BTREE_ID_SNAPSHOTS, \
|
||||||
x(snapshots, 9) \
|
BIT_ULL(KEY_TYPE_whiteout)| \
|
||||||
x(lru, 10) \
|
BIT_ULL(KEY_TYPE_inode)| \
|
||||||
x(freespace, 11) \
|
BIT_ULL(KEY_TYPE_inode_v2)| \
|
||||||
x(need_discard, 12) \
|
BIT_ULL(KEY_TYPE_inode_v3)| \
|
||||||
x(backpointers, 13) \
|
BIT_ULL(KEY_TYPE_inode_generation)) \
|
||||||
x(bucket_gens, 14) \
|
x(dirents, 2, BTREE_ID_SNAPSHOTS, \
|
||||||
x(snapshot_trees, 15)
|
BIT_ULL(KEY_TYPE_whiteout)| \
|
||||||
|
BIT_ULL(KEY_TYPE_hash_whiteout)| \
|
||||||
|
BIT_ULL(KEY_TYPE_dirent)) \
|
||||||
|
x(xattrs, 3, BTREE_ID_SNAPSHOTS, \
|
||||||
|
BIT_ULL(KEY_TYPE_whiteout)| \
|
||||||
|
BIT_ULL(KEY_TYPE_cookie)| \
|
||||||
|
BIT_ULL(KEY_TYPE_hash_whiteout)| \
|
||||||
|
BIT_ULL(KEY_TYPE_xattr)) \
|
||||||
|
x(alloc, 4, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_alloc)| \
|
||||||
|
BIT_ULL(KEY_TYPE_alloc_v2)| \
|
||||||
|
BIT_ULL(KEY_TYPE_alloc_v3)| \
|
||||||
|
BIT_ULL(KEY_TYPE_alloc_v4)) \
|
||||||
|
x(quotas, 5, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_quota)) \
|
||||||
|
x(stripes, 6, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_stripe)) \
|
||||||
|
x(reflink, 7, BTREE_ID_EXTENTS|BTREE_ID_DATA, \
|
||||||
|
BIT_ULL(KEY_TYPE_reflink_v)| \
|
||||||
|
BIT_ULL(KEY_TYPE_indirect_inline_data)) \
|
||||||
|
x(subvolumes, 8, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_subvolume)) \
|
||||||
|
x(snapshots, 9, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_snapshot)) \
|
||||||
|
x(lru, 10, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_set)) \
|
||||||
|
x(freespace, 11, BTREE_ID_EXTENTS, \
|
||||||
|
BIT_ULL(KEY_TYPE_set)) \
|
||||||
|
x(need_discard, 12, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_set)) \
|
||||||
|
x(backpointers, 13, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_backpointer)) \
|
||||||
|
x(bucket_gens, 14, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_bucket_gens)) \
|
||||||
|
x(snapshot_trees, 15, 0, \
|
||||||
|
BIT_ULL(KEY_TYPE_snapshot_tree))
|
||||||
|
|
||||||
enum btree_id {
|
enum btree_id {
|
||||||
#define x(kwd, val) BTREE_ID_##kwd = val,
|
#define x(name, nr, ...) BTREE_ID_##name = nr,
|
||||||
BCH_BTREE_IDS()
|
BCH_BTREE_IDS()
|
||||||
#undef x
|
#undef x
|
||||||
BTREE_ID_NR
|
BTREE_ID_NR
|
||||||
|
@ -140,78 +140,14 @@ int bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k,
|
|||||||
return ops->key_invalid(c, k, flags, err);
|
return ops->key_invalid(c, k, flags, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned bch2_key_types_allowed[] = {
|
static u64 bch2_key_types_allowed[] = {
|
||||||
[BKEY_TYPE_extents] =
|
#define x(name, nr, flags, keys) [BKEY_TYPE_##name] = BIT_ULL(KEY_TYPE_deleted)|keys,
|
||||||
(1U << KEY_TYPE_deleted)|
|
BCH_BTREE_IDS()
|
||||||
(1U << KEY_TYPE_whiteout)|
|
#undef x
|
||||||
(1U << KEY_TYPE_error)|
|
|
||||||
(1U << KEY_TYPE_cookie)|
|
|
||||||
(1U << KEY_TYPE_extent)|
|
|
||||||
(1U << KEY_TYPE_reservation)|
|
|
||||||
(1U << KEY_TYPE_reflink_p)|
|
|
||||||
(1U << KEY_TYPE_inline_data),
|
|
||||||
[BKEY_TYPE_inodes] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_whiteout)|
|
|
||||||
(1U << KEY_TYPE_inode)|
|
|
||||||
(1U << KEY_TYPE_inode_v2)|
|
|
||||||
(1U << KEY_TYPE_inode_v3)|
|
|
||||||
(1U << KEY_TYPE_inode_generation),
|
|
||||||
[BKEY_TYPE_dirents] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_whiteout)|
|
|
||||||
(1U << KEY_TYPE_hash_whiteout)|
|
|
||||||
(1U << KEY_TYPE_dirent),
|
|
||||||
[BKEY_TYPE_xattrs] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_whiteout)|
|
|
||||||
(1U << KEY_TYPE_cookie)|
|
|
||||||
(1U << KEY_TYPE_hash_whiteout)|
|
|
||||||
(1U << KEY_TYPE_xattr),
|
|
||||||
[BKEY_TYPE_alloc] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_alloc)|
|
|
||||||
(1U << KEY_TYPE_alloc_v2)|
|
|
||||||
(1U << KEY_TYPE_alloc_v3)|
|
|
||||||
(1U << KEY_TYPE_alloc_v4),
|
|
||||||
[BKEY_TYPE_quotas] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_quota),
|
|
||||||
[BKEY_TYPE_stripes] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_stripe),
|
|
||||||
[BKEY_TYPE_reflink] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_reflink_v)|
|
|
||||||
(1U << KEY_TYPE_indirect_inline_data),
|
|
||||||
[BKEY_TYPE_subvolumes] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_subvolume),
|
|
||||||
[BKEY_TYPE_snapshots] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_snapshot),
|
|
||||||
[BKEY_TYPE_lru] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_set),
|
|
||||||
[BKEY_TYPE_freespace] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_set),
|
|
||||||
[BKEY_TYPE_need_discard] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_set),
|
|
||||||
[BKEY_TYPE_backpointers] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_backpointer),
|
|
||||||
[BKEY_TYPE_bucket_gens] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_bucket_gens),
|
|
||||||
[BKEY_TYPE_snapshot_trees] =
|
|
||||||
(1U << KEY_TYPE_deleted)|
|
|
||||||
(1U << KEY_TYPE_snapshot_tree),
|
|
||||||
[BKEY_TYPE_btree] =
|
[BKEY_TYPE_btree] =
|
||||||
(1U << KEY_TYPE_deleted)|
|
BIT_ULL(KEY_TYPE_deleted)|
|
||||||
(1U << KEY_TYPE_btree_ptr)|
|
BIT_ULL(KEY_TYPE_btree_ptr)|
|
||||||
(1U << KEY_TYPE_btree_ptr_v2),
|
BIT_ULL(KEY_TYPE_btree_ptr_v2),
|
||||||
};
|
};
|
||||||
|
|
||||||
int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
|
int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
|
||||||
@ -225,7 +161,7 @@ int __bch2_bkey_invalid(struct bch_fs *c, struct bkey_s_c k,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (flags & BKEY_INVALID_COMMIT &&
|
if (flags & BKEY_INVALID_COMMIT &&
|
||||||
!(bch2_key_types_allowed[type] & (1U << k.k->type))) {
|
!(bch2_key_types_allowed[type] & BIT_ULL(k.k->type))) {
|
||||||
prt_printf(err, "invalid key type for btree %s (%s)",
|
prt_printf(err, "invalid key type for btree %s (%s)",
|
||||||
bch2_btree_ids[type], bch2_bkey_types[k.k->type]);
|
bch2_btree_ids[type], bch2_bkey_types[k.k->type]);
|
||||||
return -BCH_ERR_invalid_bkey;
|
return -BCH_ERR_invalid_bkey;
|
||||||
|
@ -55,11 +55,12 @@ static inline const struct bkey_ops *bch2_bkey_type_ops(enum bch_bkey_type type)
|
|||||||
: &bch2_bkey_null_ops;
|
: &bch2_bkey_null_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c, unsigned, struct printbuf *);
|
int bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c,
|
||||||
int __bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c,
|
enum bkey_invalid_flags, struct printbuf *);
|
||||||
enum btree_node_type, unsigned, struct printbuf *);
|
int __bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
|
||||||
int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c,
|
enum bkey_invalid_flags, struct printbuf *);
|
||||||
enum btree_node_type, unsigned, struct printbuf *);
|
int bch2_bkey_invalid(struct bch_fs *, struct bkey_s_c, enum btree_node_type,
|
||||||
|
enum bkey_invalid_flags, struct printbuf *);
|
||||||
int bch2_bkey_in_btree_node(struct btree *, struct bkey_s_c, struct printbuf *);
|
int bch2_bkey_in_btree_node(struct btree *, struct bkey_s_c, struct printbuf *);
|
||||||
|
|
||||||
void bch2_bpos_to_text(struct printbuf *, struct bpos);
|
void bch2_bpos_to_text(struct printbuf *, struct bpos);
|
||||||
|
@ -51,7 +51,7 @@ static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
|
|||||||
static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
|
static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
|
||||||
{
|
{
|
||||||
switch (id) {
|
switch (id) {
|
||||||
#define x(name, v) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
|
#define x(name, v, ...) case BTREE_ID_##name: return GC_PHASE_BTREE_##name;
|
||||||
BCH_BTREE_IDS()
|
BCH_BTREE_IDS()
|
||||||
#undef x
|
#undef x
|
||||||
default:
|
default:
|
||||||
|
@ -35,18 +35,6 @@ static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
|
|||||||
|
|
||||||
static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
|
static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
|
||||||
|
|
||||||
/*
|
|
||||||
* Unlocks before scheduling
|
|
||||||
* Note: does not revalidate iterator
|
|
||||||
*/
|
|
||||||
static inline int bch2_trans_cond_resched(struct btree_trans *trans)
|
|
||||||
{
|
|
||||||
if (need_resched() || race_fault())
|
|
||||||
return drop_locks_do(trans, (schedule(), 0));
|
|
||||||
else
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int __btree_path_cmp(const struct btree_path *l,
|
static inline int __btree_path_cmp(const struct btree_path *l,
|
||||||
enum btree_id r_btree_id,
|
enum btree_id r_btree_id,
|
||||||
bool r_cached,
|
bool r_cached,
|
||||||
@ -2732,16 +2720,6 @@ void bch2_trans_iter_exit(struct btree_trans *trans, struct btree_iter *iter)
|
|||||||
iter->key_cache_path = NULL;
|
iter->key_cache_path = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void bch2_trans_iter_init_inlined(struct btree_trans *trans,
|
|
||||||
struct btree_iter *iter,
|
|
||||||
unsigned btree_id, struct bpos pos,
|
|
||||||
unsigned flags)
|
|
||||||
{
|
|
||||||
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
|
|
||||||
bch2_btree_iter_flags(trans, btree_id, flags),
|
|
||||||
_RET_IP_);
|
|
||||||
}
|
|
||||||
|
|
||||||
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
|
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
|
||||||
struct btree_iter *iter,
|
struct btree_iter *iter,
|
||||||
unsigned btree_id, struct bpos pos,
|
unsigned btree_id, struct bpos pos,
|
||||||
|
@ -387,7 +387,7 @@ int __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree_path *p
|
|||||||
six_lock_readers_add(&b->lock, readers);
|
six_lock_readers_add(&b->lock, readers);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_intent);
|
mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_INTENT_LOCKED);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -551,7 +551,7 @@ bool bch2_btree_node_upgrade(struct btree_trans *trans,
|
|||||||
trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
|
trace_and_count(trans->c, btree_path_upgrade_fail, trans, _RET_IP_, path, level);
|
||||||
return false;
|
return false;
|
||||||
success:
|
success:
|
||||||
mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
|
mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -666,7 +666,7 @@ void __bch2_btree_path_downgrade(struct btree_trans *trans,
|
|||||||
} else {
|
} else {
|
||||||
if (btree_node_intent_locked(path, l)) {
|
if (btree_node_intent_locked(path, l)) {
|
||||||
six_lock_downgrade(&path->l[l].b->c.lock);
|
six_lock_downgrade(&path->l[l].b->c.lock);
|
||||||
mark_btree_node_locked_noreset(path, l, SIX_LOCK_read);
|
mark_btree_node_locked_noreset(path, l, BTREE_NODE_READ_LOCKED);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -181,7 +181,7 @@ bch2_btree_node_unlock_write_inlined(struct btree_trans *trans, struct btree_pat
|
|||||||
EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
|
EBUG_ON(path->l[b->c.level].lock_seq != six_lock_seq(&b->c.lock));
|
||||||
EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
|
EBUG_ON(btree_node_locked_type(path, b->c.level) != SIX_LOCK_write);
|
||||||
|
|
||||||
mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
|
mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
|
||||||
|
|
||||||
trans_for_each_path_with_node(trans, b, linked)
|
trans_for_each_path_with_node(trans, b, linked)
|
||||||
linked->l[b->c.level].lock_seq++;
|
linked->l[b->c.level].lock_seq++;
|
||||||
@ -294,7 +294,7 @@ static inline int __btree_node_lock_write(struct btree_trans *trans,
|
|||||||
* write lock: thus, we need to tell the cycle detector we have a write
|
* write lock: thus, we need to tell the cycle detector we have a write
|
||||||
* lock _before_ taking the lock:
|
* lock _before_ taking the lock:
|
||||||
*/
|
*/
|
||||||
mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write);
|
mark_btree_node_locked_noreset(path, b->level, BTREE_NODE_WRITE_LOCKED);
|
||||||
|
|
||||||
return likely(six_trylock_write(&b->lock))
|
return likely(six_trylock_write(&b->lock))
|
||||||
? 0
|
? 0
|
||||||
|
@ -643,7 +643,7 @@ static inline unsigned bset_byte_offset(struct btree *b, void *i)
|
|||||||
}
|
}
|
||||||
|
|
||||||
enum btree_node_type {
|
enum btree_node_type {
|
||||||
#define x(kwd, val) BKEY_TYPE_##kwd = val,
|
#define x(kwd, val, ...) BKEY_TYPE_##kwd = val,
|
||||||
BCH_BTREE_IDS()
|
BCH_BTREE_IDS()
|
||||||
#undef x
|
#undef x
|
||||||
BKEY_TYPE_btree,
|
BKEY_TYPE_btree,
|
||||||
@ -662,31 +662,37 @@ static inline enum btree_node_type btree_node_type(struct btree *b)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
|
#define BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS \
|
||||||
((1U << BKEY_TYPE_extents)| \
|
(BIT(BKEY_TYPE_extents)| \
|
||||||
(1U << BKEY_TYPE_alloc)| \
|
BIT(BKEY_TYPE_alloc)| \
|
||||||
(1U << BKEY_TYPE_inodes)| \
|
BIT(BKEY_TYPE_inodes)| \
|
||||||
(1U << BKEY_TYPE_stripes)| \
|
BIT(BKEY_TYPE_stripes)| \
|
||||||
(1U << BKEY_TYPE_reflink)| \
|
BIT(BKEY_TYPE_reflink)| \
|
||||||
(1U << BKEY_TYPE_btree))
|
BIT(BKEY_TYPE_btree))
|
||||||
|
|
||||||
#define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
|
#define BTREE_NODE_TYPE_HAS_MEM_TRIGGERS \
|
||||||
((1U << BKEY_TYPE_alloc)| \
|
(BIT(BKEY_TYPE_alloc)| \
|
||||||
(1U << BKEY_TYPE_inodes)| \
|
BIT(BKEY_TYPE_inodes)| \
|
||||||
(1U << BKEY_TYPE_stripes)| \
|
BIT(BKEY_TYPE_stripes)| \
|
||||||
(1U << BKEY_TYPE_snapshots))
|
BIT(BKEY_TYPE_snapshots))
|
||||||
|
|
||||||
#define BTREE_NODE_TYPE_HAS_TRIGGERS \
|
#define BTREE_NODE_TYPE_HAS_TRIGGERS \
|
||||||
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
|
(BTREE_NODE_TYPE_HAS_TRANS_TRIGGERS| \
|
||||||
BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
|
BTREE_NODE_TYPE_HAS_MEM_TRIGGERS)
|
||||||
|
|
||||||
#define BTREE_ID_IS_EXTENTS \
|
static inline bool btree_node_type_needs_gc(enum btree_node_type type)
|
||||||
((1U << BTREE_ID_extents)| \
|
{
|
||||||
(1U << BTREE_ID_reflink)| \
|
return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
|
||||||
(1U << BTREE_ID_freespace))
|
}
|
||||||
|
|
||||||
static inline bool btree_node_type_is_extents(enum btree_node_type type)
|
static inline bool btree_node_type_is_extents(enum btree_node_type type)
|
||||||
{
|
{
|
||||||
return (1U << type) & BTREE_ID_IS_EXTENTS;
|
const unsigned mask = 0
|
||||||
|
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_EXTENTS)) << nr)
|
||||||
|
BCH_BTREE_IDS()
|
||||||
|
#undef x
|
||||||
|
;
|
||||||
|
|
||||||
|
return (1U << type) & mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool btree_id_is_extents(enum btree_id btree)
|
static inline bool btree_id_is_extents(enum btree_id btree)
|
||||||
@ -694,29 +700,26 @@ static inline bool btree_id_is_extents(enum btree_id btree)
|
|||||||
return btree_node_type_is_extents((enum btree_node_type) btree);
|
return btree_node_type_is_extents((enum btree_node_type) btree);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BTREE_ID_HAS_SNAPSHOTS \
|
|
||||||
((1U << BTREE_ID_extents)| \
|
|
||||||
(1U << BTREE_ID_inodes)| \
|
|
||||||
(1U << BTREE_ID_dirents)| \
|
|
||||||
(1U << BTREE_ID_xattrs))
|
|
||||||
|
|
||||||
#define BTREE_ID_HAS_PTRS \
|
|
||||||
((1U << BTREE_ID_extents)| \
|
|
||||||
(1U << BTREE_ID_reflink))
|
|
||||||
|
|
||||||
static inline bool btree_type_has_snapshots(enum btree_id id)
|
static inline bool btree_type_has_snapshots(enum btree_id id)
|
||||||
{
|
{
|
||||||
return (1 << id) & BTREE_ID_HAS_SNAPSHOTS;
|
const unsigned mask = 0
|
||||||
|
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_SNAPSHOTS)) << nr)
|
||||||
|
BCH_BTREE_IDS()
|
||||||
|
#undef x
|
||||||
|
;
|
||||||
|
|
||||||
|
return (1U << id) & mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool btree_type_has_ptrs(enum btree_id id)
|
static inline bool btree_type_has_ptrs(enum btree_id id)
|
||||||
{
|
{
|
||||||
return (1 << id) & BTREE_ID_HAS_PTRS;
|
const unsigned mask = 0
|
||||||
}
|
#define x(name, nr, flags, ...) |((!!((flags) & BTREE_ID_DATA)) << nr)
|
||||||
|
BCH_BTREE_IDS()
|
||||||
|
#undef x
|
||||||
|
;
|
||||||
|
|
||||||
static inline bool btree_node_type_needs_gc(enum btree_node_type type)
|
return (1U << id) & mask;
|
||||||
{
|
|
||||||
return BTREE_NODE_TYPE_HAS_TRIGGERS & (1U << type);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct btree_root {
|
struct btree_root {
|
||||||
|
@ -74,15 +74,6 @@ int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
|
|||||||
|
|
||||||
int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool);
|
int bch2_btree_bit_mod(struct btree_trans *, enum btree_id, struct bpos, bool);
|
||||||
|
|
||||||
int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
|
|
||||||
struct btree *, unsigned);
|
|
||||||
void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
|
|
||||||
int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
|
|
||||||
struct btree *, struct bkey_i *,
|
|
||||||
unsigned, bool);
|
|
||||||
int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
|
|
||||||
struct bkey_i *, unsigned, bool);
|
|
||||||
|
|
||||||
int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id,
|
int __bch2_insert_snapshot_whiteouts(struct btree_trans *, enum btree_id,
|
||||||
struct bpos, struct bpos);
|
struct bpos, struct bpos);
|
||||||
|
|
||||||
@ -105,8 +96,9 @@ static inline int bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
|||||||
return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos);
|
return __bch2_insert_snapshot_whiteouts(trans, btree, old_pos, new_pos);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_trans_update_extent(struct btree_trans *, struct btree_iter *,
|
int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *,
|
||||||
struct bkey_i *, enum btree_update_flags);
|
enum btree_update_flags,
|
||||||
|
struct bkey_s_c, struct bkey_s_c);
|
||||||
|
|
||||||
int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
|
int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
|
||||||
enum btree_id, struct bpos);
|
enum btree_id, struct bpos);
|
||||||
|
@ -188,7 +188,7 @@ static void bch2_btree_node_free_inmem(struct btree_trans *trans,
|
|||||||
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
||||||
__btree_node_free(c, b);
|
__btree_node_free(c, b);
|
||||||
six_unlock_write(&b->c.lock);
|
six_unlock_write(&b->c.lock);
|
||||||
mark_btree_node_locked_noreset(path, level, SIX_LOCK_intent);
|
mark_btree_node_locked_noreset(path, level, BTREE_NODE_INTENT_LOCKED);
|
||||||
|
|
||||||
trans_for_each_path(trans, path)
|
trans_for_each_path(trans, path)
|
||||||
if (path->l[level].b == b) {
|
if (path->l[level].b == b) {
|
||||||
@ -720,7 +720,7 @@ err:
|
|||||||
|
|
||||||
mutex_unlock(&c->btree_interior_update_lock);
|
mutex_unlock(&c->btree_interior_update_lock);
|
||||||
|
|
||||||
mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_intent);
|
mark_btree_node_locked_noreset(path, b->c.level, BTREE_NODE_INTENT_LOCKED);
|
||||||
six_unlock_write(&b->c.lock);
|
six_unlock_write(&b->c.lock);
|
||||||
|
|
||||||
btree_node_write_if_need(c, b, SIX_LOCK_intent);
|
btree_node_write_if_need(c, b, SIX_LOCK_intent);
|
||||||
|
@ -154,6 +154,15 @@ static inline int bch2_foreground_maybe_merge(struct btree_trans *trans,
|
|||||||
btree_next_sib);
|
btree_next_sib);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int bch2_btree_node_rewrite(struct btree_trans *, struct btree_iter *,
|
||||||
|
struct btree *, unsigned);
|
||||||
|
void bch2_btree_node_rewrite_async(struct bch_fs *, struct btree *);
|
||||||
|
int bch2_btree_node_update_key(struct btree_trans *, struct btree_iter *,
|
||||||
|
struct btree *, struct bkey_i *,
|
||||||
|
unsigned, bool);
|
||||||
|
int bch2_btree_node_update_key_get_iter(struct btree_trans *, struct btree *,
|
||||||
|
struct bkey_i *, unsigned, bool);
|
||||||
|
|
||||||
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
|
void bch2_btree_set_root_for_read(struct bch_fs *, struct btree *);
|
||||||
void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
|
void bch2_btree_root_alloc(struct bch_fs *, enum btree_id);
|
||||||
|
|
||||||
|
@ -413,7 +413,7 @@ static int run_one_mem_trigger(struct btree_trans *trans,
|
|||||||
if (unlikely(flags & BTREE_TRIGGER_NORUN))
|
if (unlikely(flags & BTREE_TRIGGER_NORUN))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (!btree_node_type_needs_gc(i->btree_id))
|
if (!btree_node_type_needs_gc((enum btree_node_type) i->btree_id))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (old_ops->atomic_trigger == new_ops->atomic_trigger &&
|
if (old_ops->atomic_trigger == new_ops->atomic_trigger &&
|
||||||
@ -852,12 +852,11 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
|
|||||||
{
|
{
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
struct btree_insert_entry *i;
|
struct btree_insert_entry *i;
|
||||||
int ret, u64s_delta = 0;
|
int ret = 0, u64s_delta = 0;
|
||||||
|
|
||||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||||
struct printbuf buf = PRINTBUF;
|
|
||||||
|
|
||||||
trans_for_each_update(trans, i) {
|
trans_for_each_update(trans, i) {
|
||||||
|
struct printbuf buf = PRINTBUF;
|
||||||
enum bkey_invalid_flags invalid_flags = 0;
|
enum bkey_invalid_flags invalid_flags = 0;
|
||||||
|
|
||||||
if (!(flags & BTREE_INSERT_JOURNAL_REPLAY))
|
if (!(flags & BTREE_INSERT_JOURNAL_REPLAY))
|
||||||
@ -865,10 +864,13 @@ static inline int do_bch2_trans_commit(struct btree_trans *trans, unsigned flags
|
|||||||
|
|
||||||
if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
|
if (unlikely(bch2_bkey_invalid(c, bkey_i_to_s_c(i->k),
|
||||||
i->bkey_type, invalid_flags, &buf)))
|
i->bkey_type, invalid_flags, &buf)))
|
||||||
return bch2_trans_commit_bkey_invalid(trans, flags, i, &buf);
|
ret = bch2_trans_commit_bkey_invalid(trans, flags, i, &buf);
|
||||||
btree_insert_entry_checks(trans, i);
|
btree_insert_entry_checks(trans, i);
|
||||||
}
|
|
||||||
printbuf_exit(&buf);
|
printbuf_exit(&buf);
|
||||||
|
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
trans_for_each_update(trans, i) {
|
trans_for_each_update(trans, i) {
|
||||||
@ -1327,7 +1329,7 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
|||||||
struct bpos new_pos)
|
struct bpos new_pos)
|
||||||
{
|
{
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
struct btree_iter old_iter, new_iter;
|
struct btree_iter old_iter, new_iter = { NULL };
|
||||||
struct bkey_s_c old_k, new_k;
|
struct bkey_s_c old_k, new_k;
|
||||||
snapshot_id_list s;
|
snapshot_id_list s;
|
||||||
struct bkey_i *update;
|
struct bkey_i *update;
|
||||||
@ -1377,25 +1379,122 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
|||||||
if (ret)
|
if (ret)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
bch2_trans_iter_exit(trans, &new_iter);
|
||||||
bch2_trans_iter_exit(trans, &old_iter);
|
bch2_trans_iter_exit(trans, &old_iter);
|
||||||
darray_exit(&s);
|
darray_exit(&s);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_trans_update_extent(struct btree_trans *trans,
|
int bch2_trans_update_extent_overwrite(struct btree_trans *trans,
|
||||||
|
struct btree_iter *iter,
|
||||||
|
enum btree_update_flags flags,
|
||||||
|
struct bkey_s_c old,
|
||||||
|
struct bkey_s_c new)
|
||||||
|
{
|
||||||
|
enum btree_id btree_id = iter->btree_id;
|
||||||
|
struct bkey_i *update;
|
||||||
|
struct bpos new_start = bkey_start_pos(new.k);
|
||||||
|
bool front_split = bkey_lt(bkey_start_pos(old.k), new_start);
|
||||||
|
bool back_split = bkey_gt(old.k->p, new.k->p);
|
||||||
|
int ret = 0, compressed_sectors;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we're going to be splitting a compressed extent, note it
|
||||||
|
* so that __bch2_trans_commit() can increase our disk
|
||||||
|
* reservation:
|
||||||
|
*/
|
||||||
|
if (((front_split && back_split) ||
|
||||||
|
((front_split || back_split) && old.k->p.snapshot != new.k->p.snapshot)) &&
|
||||||
|
(compressed_sectors = bch2_bkey_sectors_compressed(old)))
|
||||||
|
trans->extra_journal_res += compressed_sectors;
|
||||||
|
|
||||||
|
if (front_split) {
|
||||||
|
update = bch2_bkey_make_mut_noupdate(trans, old);
|
||||||
|
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bch2_cut_back(new_start, update);
|
||||||
|
|
||||||
|
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
|
||||||
|
old.k->p, update->k.p) ?:
|
||||||
|
bch2_btree_insert_nonextent(trans, btree_id, update,
|
||||||
|
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we're overwriting in a different snapshot - middle split: */
|
||||||
|
if (old.k->p.snapshot != new.k->p.snapshot &&
|
||||||
|
(front_split || back_split)) {
|
||||||
|
update = bch2_bkey_make_mut_noupdate(trans, old);
|
||||||
|
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bch2_cut_front(new_start, update);
|
||||||
|
bch2_cut_back(new.k->p, update);
|
||||||
|
|
||||||
|
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
|
||||||
|
old.k->p, update->k.p) ?:
|
||||||
|
bch2_btree_insert_nonextent(trans, btree_id, update,
|
||||||
|
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bkey_le(old.k->p, new.k->p)) {
|
||||||
|
update = bch2_trans_kmalloc(trans, sizeof(*update));
|
||||||
|
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bkey_init(&update->k);
|
||||||
|
update->k.p = old.k->p;
|
||||||
|
update->k.p.snapshot = new.k->p.snapshot;
|
||||||
|
|
||||||
|
if (new.k->p.snapshot != old.k->p.snapshot) {
|
||||||
|
update->k.type = KEY_TYPE_whiteout;
|
||||||
|
} else if (btree_type_has_snapshots(btree_id)) {
|
||||||
|
ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
if (ret)
|
||||||
|
update->k.type = KEY_TYPE_whiteout;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = bch2_btree_insert_nonextent(trans, btree_id, update,
|
||||||
|
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (back_split) {
|
||||||
|
update = bch2_bkey_make_mut_noupdate(trans, old);
|
||||||
|
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
bch2_cut_front(new.k->p, update);
|
||||||
|
|
||||||
|
ret = bch2_trans_update_by_path(trans, iter->path, update,
|
||||||
|
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
||||||
|
flags, _RET_IP_);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int bch2_trans_update_extent(struct btree_trans *trans,
|
||||||
struct btree_iter *orig_iter,
|
struct btree_iter *orig_iter,
|
||||||
struct bkey_i *insert,
|
struct bkey_i *insert,
|
||||||
enum btree_update_flags flags)
|
enum btree_update_flags flags)
|
||||||
{
|
{
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
struct bpos start = bkey_start_pos(&insert->k);
|
|
||||||
struct bkey_i *update;
|
|
||||||
struct bkey_s_c k;
|
struct bkey_s_c k;
|
||||||
enum btree_id btree_id = orig_iter->btree_id;
|
enum btree_id btree_id = orig_iter->btree_id;
|
||||||
int ret = 0, compressed_sectors;
|
int ret = 0;
|
||||||
|
|
||||||
bch2_trans_iter_init(trans, &iter, btree_id, start,
|
bch2_trans_iter_init(trans, &iter, btree_id, bkey_start_pos(&insert->k),
|
||||||
BTREE_ITER_INTENT|
|
BTREE_ITER_INTENT|
|
||||||
BTREE_ITER_WITH_UPDATES|
|
BTREE_ITER_WITH_UPDATES|
|
||||||
BTREE_ITER_NOT_EXTENTS);
|
BTREE_ITER_NOT_EXTENTS);
|
||||||
@ -1416,90 +1515,14 @@ int bch2_trans_update_extent(struct btree_trans *trans,
|
|||||||
}
|
}
|
||||||
|
|
||||||
while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
|
while (bkey_gt(insert->k.p, bkey_start_pos(k.k))) {
|
||||||
bool front_split = bkey_lt(bkey_start_pos(k.k), start);
|
bool done = bkey_lt(insert->k.p, k.k->p);
|
||||||
bool back_split = bkey_gt(k.k->p, insert->k.p);
|
|
||||||
|
|
||||||
/*
|
ret = bch2_trans_update_extent_overwrite(trans, &iter, flags, k, bkey_i_to_s_c(insert));
|
||||||
* If we're going to be splitting a compressed extent, note it
|
|
||||||
* so that __bch2_trans_commit() can increase our disk
|
|
||||||
* reservation:
|
|
||||||
*/
|
|
||||||
if (((front_split && back_split) ||
|
|
||||||
((front_split || back_split) && k.k->p.snapshot != insert->k.p.snapshot)) &&
|
|
||||||
(compressed_sectors = bch2_bkey_sectors_compressed(k)))
|
|
||||||
trans->extra_journal_res += compressed_sectors;
|
|
||||||
|
|
||||||
if (front_split) {
|
|
||||||
update = bch2_bkey_make_mut_noupdate(trans, k);
|
|
||||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
bch2_cut_back(start, update);
|
|
||||||
|
|
||||||
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
|
|
||||||
k.k->p, update->k.p) ?:
|
|
||||||
bch2_btree_insert_nonextent(trans, btree_id, update,
|
|
||||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
}
|
|
||||||
|
|
||||||
if (k.k->p.snapshot != insert->k.p.snapshot &&
|
if (done)
|
||||||
(front_split || back_split)) {
|
|
||||||
update = bch2_bkey_make_mut_noupdate(trans, k);
|
|
||||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
bch2_cut_front(start, update);
|
|
||||||
bch2_cut_back(insert->k.p, update);
|
|
||||||
|
|
||||||
ret = bch2_insert_snapshot_whiteouts(trans, btree_id,
|
|
||||||
k.k->p, update->k.p) ?:
|
|
||||||
bch2_btree_insert_nonextent(trans, btree_id, update,
|
|
||||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bkey_le(k.k->p, insert->k.p)) {
|
|
||||||
update = bch2_trans_kmalloc(trans, sizeof(*update));
|
|
||||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
bkey_init(&update->k);
|
|
||||||
update->k.p = k.k->p;
|
|
||||||
update->k.p.snapshot = insert->k.p.snapshot;
|
|
||||||
|
|
||||||
if (insert->k.p.snapshot != k.k->p.snapshot) {
|
|
||||||
update->k.type = KEY_TYPE_whiteout;
|
|
||||||
} else if (btree_type_has_snapshots(btree_id)) {
|
|
||||||
ret = need_whiteout_for_snapshot(trans, btree_id, update->k.p);
|
|
||||||
if (ret < 0)
|
|
||||||
goto err;
|
|
||||||
if (ret)
|
|
||||||
update->k.type = KEY_TYPE_whiteout;
|
|
||||||
}
|
|
||||||
|
|
||||||
ret = bch2_btree_insert_nonextent(trans, btree_id, update,
|
|
||||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|flags);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (back_split) {
|
|
||||||
update = bch2_bkey_make_mut_noupdate(trans, k);
|
|
||||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
bch2_cut_front(insert->k.p, update);
|
|
||||||
|
|
||||||
ret = bch2_trans_update_by_path(trans, iter.path, update,
|
|
||||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE|
|
|
||||||
flags, _RET_IP_);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
|
||||||
next:
|
next:
|
||||||
bch2_btree_iter_advance(&iter);
|
bch2_btree_iter_advance(&iter);
|
||||||
k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
|
k = bch2_btree_iter_peek_upto(&iter, POS(insert->k.p.inode, U64_MAX));
|
||||||
@ -1515,18 +1538,8 @@ next:
|
|||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
if (!bkey_deleted(&insert->k)) {
|
if (!bkey_deleted(&insert->k))
|
||||||
/*
|
ret = bch2_btree_insert_nonextent(trans, btree_id, insert, flags);
|
||||||
* Rewinding iterators is expensive: get a new one and the one
|
|
||||||
* that points to the start of insert will be cloned from:
|
|
||||||
*/
|
|
||||||
bch2_trans_iter_exit(trans, &iter);
|
|
||||||
bch2_trans_iter_init(trans, &iter, btree_id, insert->k.p,
|
|
||||||
BTREE_ITER_NOT_EXTENTS|
|
|
||||||
BTREE_ITER_INTENT);
|
|
||||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
|
||||||
bch2_trans_update(trans, &iter, insert, flags);
|
|
||||||
}
|
|
||||||
err:
|
err:
|
||||||
bch2_trans_iter_exit(trans, &iter);
|
bch2_trans_iter_exit(trans, &iter);
|
||||||
|
|
||||||
|
@ -59,13 +59,13 @@ static inline int __darray_make_room(darray_void *d, size_t t_size, size_t more,
|
|||||||
#define darray_first(_d) ((_d).data[0])
|
#define darray_first(_d) ((_d).data[0])
|
||||||
#define darray_last(_d) ((_d).data[(_d).nr - 1])
|
#define darray_last(_d) ((_d).data[(_d).nr - 1])
|
||||||
|
|
||||||
#define darray_insert_item(_d, _pos, _item) \
|
#define darray_insert_item(_d, pos, _item) \
|
||||||
({ \
|
({ \
|
||||||
size_t pos = (_pos); \
|
size_t _pos = (pos); \
|
||||||
int _ret = darray_make_room((_d), 1); \
|
int _ret = darray_make_room((_d), 1); \
|
||||||
\
|
\
|
||||||
if (!_ret) \
|
if (!_ret) \
|
||||||
array_insert_item((_d)->data, (_d)->nr, pos, (_item)); \
|
array_insert_item((_d)->data, (_d)->nr, _pos, (_item)); \
|
||||||
_ret; \
|
_ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
|
100
libbcachefs/ec.c
100
libbcachefs/ec.c
@ -200,19 +200,22 @@ static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
|
|||||||
|
|
||||||
static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
|
static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
|
||||||
{
|
{
|
||||||
|
if (buf->key.k.type == KEY_TYPE_stripe) {
|
||||||
|
struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key);
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
for (i = 0; i < buf->key.v.nr_blocks; i++) {
|
for (i = 0; i < s->v.nr_blocks; i++) {
|
||||||
kvpfree(buf->data[i], buf->size << 9);
|
kvpfree(buf->data[i], buf->size << 9);
|
||||||
buf->data[i] = NULL;
|
buf->data[i] = NULL;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* XXX: this is a non-mempoolified memory allocation: */
|
/* XXX: this is a non-mempoolified memory allocation: */
|
||||||
static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
|
static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
|
||||||
unsigned offset, unsigned size)
|
unsigned offset, unsigned size)
|
||||||
{
|
{
|
||||||
struct bch_stripe *v = &buf->key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
unsigned csum_granularity = 1U << v->csum_granularity_bits;
|
unsigned csum_granularity = 1U << v->csum_granularity_bits;
|
||||||
unsigned end = offset + size;
|
unsigned end = offset + size;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
@ -228,7 +231,7 @@ static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
|
|||||||
|
|
||||||
memset(buf->valid, 0xFF, sizeof(buf->valid));
|
memset(buf->valid, 0xFF, sizeof(buf->valid));
|
||||||
|
|
||||||
for (i = 0; i < buf->key.v.nr_blocks; i++) {
|
for (i = 0; i < v->nr_blocks; i++) {
|
||||||
buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
|
buf->data[i] = kvpmalloc(buf->size << 9, GFP_KERNEL);
|
||||||
if (!buf->data[i])
|
if (!buf->data[i])
|
||||||
goto err;
|
goto err;
|
||||||
@ -245,7 +248,7 @@ err:
|
|||||||
static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
|
static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
|
||||||
unsigned block, unsigned offset)
|
unsigned block, unsigned offset)
|
||||||
{
|
{
|
||||||
struct bch_stripe *v = &buf->key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
unsigned csum_granularity = 1 << v->csum_granularity_bits;
|
unsigned csum_granularity = 1 << v->csum_granularity_bits;
|
||||||
unsigned end = buf->offset + buf->size;
|
unsigned end = buf->offset + buf->size;
|
||||||
unsigned len = min(csum_granularity, end - offset);
|
unsigned len = min(csum_granularity, end - offset);
|
||||||
@ -264,7 +267,7 @@ static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
|
|||||||
|
|
||||||
static void ec_generate_checksums(struct ec_stripe_buf *buf)
|
static void ec_generate_checksums(struct ec_stripe_buf *buf)
|
||||||
{
|
{
|
||||||
struct bch_stripe *v = &buf->key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
unsigned i, j, csums_per_device = stripe_csums_per_device(v);
|
unsigned i, j, csums_per_device = stripe_csums_per_device(v);
|
||||||
|
|
||||||
if (!v->csum_type)
|
if (!v->csum_type)
|
||||||
@ -281,7 +284,7 @@ static void ec_generate_checksums(struct ec_stripe_buf *buf)
|
|||||||
|
|
||||||
static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
|
static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
|
||||||
{
|
{
|
||||||
struct bch_stripe *v = &buf->key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
unsigned csum_granularity = 1 << v->csum_granularity_bits;
|
unsigned csum_granularity = 1 << v->csum_granularity_bits;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
@ -304,7 +307,7 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
|
|||||||
if (bch2_crc_cmp(want, got)) {
|
if (bch2_crc_cmp(want, got)) {
|
||||||
struct printbuf buf2 = PRINTBUF;
|
struct printbuf buf2 = PRINTBUF;
|
||||||
|
|
||||||
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key.k_i));
|
bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&buf->key));
|
||||||
|
|
||||||
bch_err_ratelimited(c,
|
bch_err_ratelimited(c,
|
||||||
"stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
|
"stripe checksum error for %ps at %u:%u: csum type %u, expected %llx got %llx\n%s",
|
||||||
@ -324,7 +327,7 @@ static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
|
|||||||
|
|
||||||
static void ec_generate_ec(struct ec_stripe_buf *buf)
|
static void ec_generate_ec(struct ec_stripe_buf *buf)
|
||||||
{
|
{
|
||||||
struct bch_stripe *v = &buf->key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
unsigned nr_data = v->nr_blocks - v->nr_redundant;
|
unsigned nr_data = v->nr_blocks - v->nr_redundant;
|
||||||
unsigned bytes = le16_to_cpu(v->sectors) << 9;
|
unsigned bytes = le16_to_cpu(v->sectors) << 9;
|
||||||
|
|
||||||
@ -333,13 +336,14 @@ static void ec_generate_ec(struct ec_stripe_buf *buf)
|
|||||||
|
|
||||||
static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
|
static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
|
||||||
{
|
{
|
||||||
return buf->key.v.nr_blocks -
|
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
bitmap_weight(buf->valid, buf->key.v.nr_blocks);
|
|
||||||
|
return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
|
static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
|
||||||
{
|
{
|
||||||
struct bch_stripe *v = &buf->key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
|
unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
|
||||||
unsigned nr_data = v->nr_blocks - v->nr_redundant;
|
unsigned nr_data = v->nr_blocks - v->nr_redundant;
|
||||||
unsigned bytes = buf->size << 9;
|
unsigned bytes = buf->size << 9;
|
||||||
@ -363,7 +367,7 @@ static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
|
|||||||
static void ec_block_endio(struct bio *bio)
|
static void ec_block_endio(struct bio *bio)
|
||||||
{
|
{
|
||||||
struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
|
struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
|
||||||
struct bch_stripe *v = &ec_bio->buf->key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v;
|
||||||
struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
|
struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
|
||||||
struct bch_dev *ca = ec_bio->ca;
|
struct bch_dev *ca = ec_bio->ca;
|
||||||
struct closure *cl = bio->bi_private;
|
struct closure *cl = bio->bi_private;
|
||||||
@ -388,11 +392,11 @@ static void ec_block_endio(struct bio *bio)
|
|||||||
static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
|
static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
|
||||||
blk_opf_t opf, unsigned idx, struct closure *cl)
|
blk_opf_t opf, unsigned idx, struct closure *cl)
|
||||||
{
|
{
|
||||||
struct bch_stripe *v = &buf->key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
unsigned offset = 0, bytes = buf->size << 9;
|
unsigned offset = 0, bytes = buf->size << 9;
|
||||||
struct bch_extent_ptr *ptr = &v->ptrs[idx];
|
struct bch_extent_ptr *ptr = &v->ptrs[idx];
|
||||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||||
enum bch_data_type data_type = idx < buf->key.v.nr_blocks - buf->key.v.nr_redundant
|
enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
|
||||||
? BCH_DATA_user
|
? BCH_DATA_user
|
||||||
: BCH_DATA_parity;
|
: BCH_DATA_parity;
|
||||||
int rw = op_is_write(opf);
|
int rw = op_is_write(opf);
|
||||||
@ -463,7 +467,7 @@ static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
|
|||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
goto err;
|
goto err;
|
||||||
}
|
}
|
||||||
bkey_reassemble(&stripe->key.k_i, k);
|
bkey_reassemble(&stripe->key, k);
|
||||||
err:
|
err:
|
||||||
bch2_trans_iter_exit(trans, &iter);
|
bch2_trans_iter_exit(trans, &iter);
|
||||||
return ret;
|
return ret;
|
||||||
@ -499,7 +503,7 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
|
|||||||
return -EIO;
|
return -EIO;
|
||||||
}
|
}
|
||||||
|
|
||||||
v = &buf->key.v;
|
v = &bkey_i_to_stripe(&buf->key)->v;
|
||||||
|
|
||||||
if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
|
if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
|
||||||
bch_err_ratelimited(c,
|
bch_err_ratelimited(c,
|
||||||
@ -875,6 +879,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
|||||||
struct ec_stripe_buf *s,
|
struct ec_stripe_buf *s,
|
||||||
struct bpos *bp_pos)
|
struct bpos *bp_pos)
|
||||||
{
|
{
|
||||||
|
struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
struct bch_backpointer bp;
|
struct bch_backpointer bp;
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
@ -926,7 +931,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
|||||||
if (extent_has_stripe_ptr(k, s->key.k.p.offset))
|
if (extent_has_stripe_ptr(k, s->key.k.p.offset))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
ptr_c = bkey_matches_stripe(&s->key.v, k, &block);
|
ptr_c = bkey_matches_stripe(v, k, &block);
|
||||||
/*
|
/*
|
||||||
* It doesn't generally make sense to erasure code cached ptrs:
|
* It doesn't generally make sense to erasure code cached ptrs:
|
||||||
* XXX: should we be incrementing a counter?
|
* XXX: should we be incrementing a counter?
|
||||||
@ -934,7 +939,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
|||||||
if (!ptr_c || ptr_c->cached)
|
if (!ptr_c || ptr_c->cached)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
dev = s->key.v.ptrs[block].dev;
|
dev = v->ptrs[block].dev;
|
||||||
|
|
||||||
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
|
n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
|
||||||
ret = PTR_ERR_OR_ZERO(n);
|
ret = PTR_ERR_OR_ZERO(n);
|
||||||
@ -950,7 +955,7 @@ static int ec_stripe_update_extent(struct btree_trans *trans,
|
|||||||
stripe_ptr = (struct bch_extent_stripe_ptr) {
|
stripe_ptr = (struct bch_extent_stripe_ptr) {
|
||||||
.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
|
.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
|
||||||
.block = block,
|
.block = block,
|
||||||
.redundancy = s->key.v.nr_redundant,
|
.redundancy = v->nr_redundant,
|
||||||
.idx = s->key.k.p.offset,
|
.idx = s->key.k.p.offset,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -968,7 +973,8 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
|
|||||||
unsigned block)
|
unsigned block)
|
||||||
{
|
{
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
struct bch_extent_ptr bucket = s->key.v.ptrs[block];
|
struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
|
||||||
|
struct bch_extent_ptr bucket = v->ptrs[block];
|
||||||
struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
|
struct bpos bucket_pos = PTR_BUCKET_POS(c, &bucket);
|
||||||
struct bpos bp_pos = POS_MIN;
|
struct bpos bp_pos = POS_MIN;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
@ -993,7 +999,7 @@ static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_b
|
|||||||
static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
|
static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
|
||||||
{
|
{
|
||||||
struct btree_trans trans;
|
struct btree_trans trans;
|
||||||
struct bch_stripe *v = &s->key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
|
||||||
unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
|
unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
@ -1057,7 +1063,7 @@ static void ec_stripe_create(struct ec_stripe_new *s)
|
|||||||
{
|
{
|
||||||
struct bch_fs *c = s->c;
|
struct bch_fs *c = s->c;
|
||||||
struct open_bucket *ob;
|
struct open_bucket *ob;
|
||||||
struct bch_stripe *v = &s->new_stripe.key.v;
|
struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
|
||||||
unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
|
unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
@ -1090,7 +1096,7 @@ static void ec_stripe_create(struct ec_stripe_new *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < nr_data; i++)
|
for (i = 0; i < nr_data; i++)
|
||||||
if (stripe_blockcount_get(&s->existing_stripe.key.v, i))
|
if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i))
|
||||||
swap(s->new_stripe.data[i],
|
swap(s->new_stripe.data[i],
|
||||||
s->existing_stripe.data[i]);
|
s->existing_stripe.data[i]);
|
||||||
|
|
||||||
@ -1117,7 +1123,8 @@ static void ec_stripe_create(struct ec_stripe_new *s)
|
|||||||
ret = bch2_trans_do(c, &s->res, NULL,
|
ret = bch2_trans_do(c, &s->res, NULL,
|
||||||
BTREE_INSERT_NOCHECK_RW|
|
BTREE_INSERT_NOCHECK_RW|
|
||||||
BTREE_INSERT_NOFAIL,
|
BTREE_INSERT_NOFAIL,
|
||||||
ec_stripe_key_update(&trans, &s->new_stripe.key,
|
ec_stripe_key_update(&trans,
|
||||||
|
bkey_i_to_stripe(&s->new_stripe.key),
|
||||||
!s->have_existing_stripe));
|
!s->have_existing_stripe));
|
||||||
if (ret) {
|
if (ret) {
|
||||||
bch_err(c, "error creating stripe: error creating stripe key");
|
bch_err(c, "error creating stripe: error creating stripe key");
|
||||||
@ -1279,14 +1286,14 @@ static bool may_create_new_stripe(struct bch_fs *c)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static void ec_stripe_key_init(struct bch_fs *c,
|
static void ec_stripe_key_init(struct bch_fs *c,
|
||||||
struct bkey_i_stripe *s,
|
struct bkey_i *k,
|
||||||
unsigned nr_data,
|
unsigned nr_data,
|
||||||
unsigned nr_parity,
|
unsigned nr_parity,
|
||||||
unsigned stripe_size)
|
unsigned stripe_size)
|
||||||
{
|
{
|
||||||
|
struct bkey_i_stripe *s = bkey_stripe_init(k);
|
||||||
unsigned u64s;
|
unsigned u64s;
|
||||||
|
|
||||||
bkey_stripe_init(&s->k_i);
|
|
||||||
s->v.sectors = cpu_to_le16(stripe_size);
|
s->v.sectors = cpu_to_le16(stripe_size);
|
||||||
s->v.algorithm = 0;
|
s->v.algorithm = 0;
|
||||||
s->v.nr_blocks = nr_data + nr_parity;
|
s->v.nr_blocks = nr_data + nr_parity;
|
||||||
@ -1325,8 +1332,8 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
|
|||||||
BCH_BKEY_PTRS_MAX) - h->redundancy;
|
BCH_BKEY_PTRS_MAX) - h->redundancy;
|
||||||
s->nr_parity = h->redundancy;
|
s->nr_parity = h->redundancy;
|
||||||
|
|
||||||
ec_stripe_key_init(c, &s->new_stripe.key, s->nr_data,
|
ec_stripe_key_init(c, &s->new_stripe.key,
|
||||||
s->nr_parity, h->blocksize);
|
s->nr_data, s->nr_parity, h->blocksize);
|
||||||
|
|
||||||
h->s = s;
|
h->s = s;
|
||||||
return 0;
|
return 0;
|
||||||
@ -1429,15 +1436,16 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
|
|||||||
struct bch_devs_mask devs = h->devs;
|
struct bch_devs_mask devs = h->devs;
|
||||||
struct open_bucket *ob;
|
struct open_bucket *ob;
|
||||||
struct open_buckets buckets;
|
struct open_buckets buckets;
|
||||||
|
struct bch_stripe *v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
|
||||||
unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
|
unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
|
||||||
bool have_cache = true;
|
bool have_cache = true;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
BUG_ON(h->s->new_stripe.key.v.nr_blocks != h->s->nr_data + h->s->nr_parity);
|
BUG_ON(v->nr_blocks != h->s->nr_data + h->s->nr_parity);
|
||||||
BUG_ON(h->s->new_stripe.key.v.nr_redundant != h->s->nr_parity);
|
BUG_ON(v->nr_redundant != h->s->nr_parity);
|
||||||
|
|
||||||
for_each_set_bit(i, h->s->blocks_gotten, h->s->new_stripe.key.v.nr_blocks) {
|
for_each_set_bit(i, h->s->blocks_gotten, v->nr_blocks) {
|
||||||
__clear_bit(h->s->new_stripe.key.v.ptrs[i].dev, devs.d);
|
__clear_bit(v->ptrs[i].dev, devs.d);
|
||||||
if (i < h->s->nr_data)
|
if (i < h->s->nr_data)
|
||||||
nr_have_data++;
|
nr_have_data++;
|
||||||
else
|
else
|
||||||
@ -1466,7 +1474,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
|
|||||||
BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
|
BUG_ON(j >= h->s->nr_data + h->s->nr_parity);
|
||||||
|
|
||||||
h->s->blocks[j] = buckets.v[i];
|
h->s->blocks[j] = buckets.v[i];
|
||||||
h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob);
|
v->ptrs[j] = bch2_ob_ptr(c, ob);
|
||||||
__set_bit(j, h->s->blocks_gotten);
|
__set_bit(j, h->s->blocks_gotten);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1492,7 +1500,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
|
|||||||
BUG_ON(j >= h->s->nr_data);
|
BUG_ON(j >= h->s->nr_data);
|
||||||
|
|
||||||
h->s->blocks[j] = buckets.v[i];
|
h->s->blocks[j] = buckets.v[i];
|
||||||
h->s->new_stripe.key.v.ptrs[j] = bch2_ob_ptr(c, ob);
|
v->ptrs[j] = bch2_ob_ptr(c, ob);
|
||||||
__set_bit(j, h->s->blocks_gotten);
|
__set_bit(j, h->s->blocks_gotten);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1542,6 +1550,8 @@ static s64 get_existing_stripe(struct bch_fs *c,
|
|||||||
static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h)
|
static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h)
|
||||||
{
|
{
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
|
struct bch_stripe *new_v = &bkey_i_to_stripe(&h->s->new_stripe.key)->v;
|
||||||
|
struct bch_stripe *existing_v;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
s64 idx;
|
s64 idx;
|
||||||
int ret;
|
int ret;
|
||||||
@ -1562,9 +1572,11 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(h->s->existing_stripe.key.v.nr_redundant != h->s->nr_parity);
|
existing_v = &bkey_i_to_stripe(&h->s->existing_stripe.key)->v;
|
||||||
h->s->nr_data = h->s->existing_stripe.key.v.nr_blocks -
|
|
||||||
h->s->existing_stripe.key.v.nr_redundant;
|
BUG_ON(existing_v->nr_redundant != h->s->nr_parity);
|
||||||
|
h->s->nr_data = existing_v->nr_blocks -
|
||||||
|
existing_v->nr_redundant;
|
||||||
|
|
||||||
ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
|
ret = ec_stripe_buf_init(&h->s->existing_stripe, 0, h->blocksize);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -1573,21 +1585,21 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
|
|||||||
}
|
}
|
||||||
|
|
||||||
BUG_ON(h->s->existing_stripe.size != h->blocksize);
|
BUG_ON(h->s->existing_stripe.size != h->blocksize);
|
||||||
BUG_ON(h->s->existing_stripe.size != le16_to_cpu(h->s->existing_stripe.key.v.sectors));
|
BUG_ON(h->s->existing_stripe.size != le16_to_cpu(existing_v->sectors));
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Free buckets we initially allocated - they might conflict with
|
* Free buckets we initially allocated - they might conflict with
|
||||||
* blocks from the stripe we're reusing:
|
* blocks from the stripe we're reusing:
|
||||||
*/
|
*/
|
||||||
for_each_set_bit(i, h->s->blocks_gotten, h->s->new_stripe.key.v.nr_blocks) {
|
for_each_set_bit(i, h->s->blocks_gotten, new_v->nr_blocks) {
|
||||||
bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]);
|
bch2_open_bucket_put(c, c->open_buckets + h->s->blocks[i]);
|
||||||
h->s->blocks[i] = 0;
|
h->s->blocks[i] = 0;
|
||||||
}
|
}
|
||||||
memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten));
|
memset(h->s->blocks_gotten, 0, sizeof(h->s->blocks_gotten));
|
||||||
memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated));
|
memset(h->s->blocks_allocated, 0, sizeof(h->s->blocks_allocated));
|
||||||
|
|
||||||
for (i = 0; i < h->s->existing_stripe.key.v.nr_blocks; i++) {
|
for (i = 0; i < existing_v->nr_blocks; i++) {
|
||||||
if (stripe_blockcount_get(&h->s->existing_stripe.key.v, i)) {
|
if (stripe_blockcount_get(existing_v, i)) {
|
||||||
__set_bit(i, h->s->blocks_gotten);
|
__set_bit(i, h->s->blocks_gotten);
|
||||||
__set_bit(i, h->s->blocks_allocated);
|
__set_bit(i, h->s->blocks_allocated);
|
||||||
}
|
}
|
||||||
@ -1595,7 +1607,7 @@ static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stri
|
|||||||
ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
|
ec_block_io(c, &h->s->existing_stripe, READ, i, &h->s->iodone);
|
||||||
}
|
}
|
||||||
|
|
||||||
bkey_copy(&h->s->new_stripe.key.k_i, &h->s->existing_stripe.key.k_i);
|
bkey_copy(&h->s->new_stripe.key, &h->s->existing_stripe.key);
|
||||||
h->s->have_existing_stripe = true;
|
h->s->have_existing_stripe = true;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1764,7 +1776,7 @@ static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
|
|||||||
if (!ca)
|
if (!ca)
|
||||||
goto found;
|
goto found;
|
||||||
|
|
||||||
for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++) {
|
for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) {
|
||||||
if (!h->s->blocks[i])
|
if (!h->s->blocks[i])
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -1922,7 +1934,7 @@ void bch2_fs_ec_exit(struct bch_fs *c)
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
if (h->s) {
|
if (h->s) {
|
||||||
for (i = 0; i < h->s->new_stripe.key.v.nr_blocks; i++)
|
for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++)
|
||||||
BUG_ON(h->s->blocks[i]);
|
BUG_ON(h->s->blocks[i]);
|
||||||
|
|
||||||
kfree(h->s);
|
kfree(h->s);
|
||||||
|
@ -138,10 +138,7 @@ struct ec_stripe_buf {
|
|||||||
|
|
||||||
void *data[BCH_BKEY_PTRS_MAX];
|
void *data[BCH_BKEY_PTRS_MAX];
|
||||||
|
|
||||||
union {
|
__BKEY_PADDED(key, 255);
|
||||||
struct bkey_i_stripe key;
|
|
||||||
u64 pad[255];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ec_stripe_head;
|
struct ec_stripe_head;
|
||||||
|
@ -688,7 +688,7 @@ bool bch2_extent_normalize(struct bch_fs *, struct bkey_s);
|
|||||||
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
|
void bch2_bkey_ptrs_to_text(struct printbuf *, struct bch_fs *,
|
||||||
struct bkey_s_c);
|
struct bkey_s_c);
|
||||||
int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c,
|
int bch2_bkey_ptrs_invalid(const struct bch_fs *, struct bkey_s_c,
|
||||||
unsigned, struct printbuf *);
|
enum bkey_invalid_flags, struct printbuf *);
|
||||||
|
|
||||||
void bch2_ptr_swab(struct bkey_s);
|
void bch2_ptr_swab(struct bkey_s);
|
||||||
|
|
||||||
|
@ -219,69 +219,6 @@ static int write_inode(struct btree_trans *trans,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int fsck_inode_rm(struct btree_trans *trans, u64 inum, u32 snapshot)
|
|
||||||
{
|
|
||||||
struct bch_fs *c = trans->c;
|
|
||||||
struct btree_iter iter = { NULL };
|
|
||||||
struct bkey_i_inode_generation delete;
|
|
||||||
struct bch_inode_unpacked inode_u;
|
|
||||||
struct bkey_s_c k;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
do {
|
|
||||||
ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
|
|
||||||
SPOS(inum, 0, snapshot),
|
|
||||||
SPOS(inum, U64_MAX, snapshot),
|
|
||||||
0, NULL) ?:
|
|
||||||
bch2_btree_delete_range_trans(trans, BTREE_ID_dirents,
|
|
||||||
SPOS(inum, 0, snapshot),
|
|
||||||
SPOS(inum, U64_MAX, snapshot),
|
|
||||||
0, NULL) ?:
|
|
||||||
bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs,
|
|
||||||
SPOS(inum, 0, snapshot),
|
|
||||||
SPOS(inum, U64_MAX, snapshot),
|
|
||||||
0, NULL);
|
|
||||||
} while (ret == -BCH_ERR_transaction_restart_nested);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
retry:
|
|
||||||
bch2_trans_begin(trans);
|
|
||||||
|
|
||||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
|
||||||
SPOS(0, inum, snapshot), BTREE_ITER_INTENT);
|
|
||||||
ret = bkey_err(k);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
if (!bkey_is_inode(k.k)) {
|
|
||||||
bch2_fs_inconsistent(c,
|
|
||||||
"inode %llu:%u not found when deleting",
|
|
||||||
inum, snapshot);
|
|
||||||
ret = -EIO;
|
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
bch2_inode_unpack(k, &inode_u);
|
|
||||||
|
|
||||||
/* Subvolume root? */
|
|
||||||
if (inode_u.bi_subvol)
|
|
||||||
bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum);
|
|
||||||
|
|
||||||
bkey_inode_generation_init(&delete.k_i);
|
|
||||||
delete.k.p = iter.pos;
|
|
||||||
delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
|
|
||||||
|
|
||||||
ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?:
|
|
||||||
bch2_trans_commit(trans, NULL, NULL,
|
|
||||||
BTREE_INSERT_NOFAIL);
|
|
||||||
err:
|
|
||||||
bch2_trans_iter_exit(trans, &iter);
|
|
||||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
|
||||||
goto retry;
|
|
||||||
|
|
||||||
return ret ?: -BCH_ERR_transaction_restart_nested;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
|
static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
|
||||||
{
|
{
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
@ -521,7 +458,6 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see
|
|||||||
u32 id, u32 ancestor)
|
u32 id, u32 ancestor)
|
||||||
{
|
{
|
||||||
ssize_t i;
|
ssize_t i;
|
||||||
u32 top = seen->ids.nr ? seen->ids.data[seen->ids.nr - 1].equiv : 0;
|
|
||||||
|
|
||||||
EBUG_ON(id > ancestor);
|
EBUG_ON(id > ancestor);
|
||||||
EBUG_ON(!bch2_snapshot_is_equiv(c, id));
|
EBUG_ON(!bch2_snapshot_is_equiv(c, id));
|
||||||
@ -529,7 +465,7 @@ static bool key_visible_in_snapshot(struct bch_fs *c, struct snapshots_seen *see
|
|||||||
|
|
||||||
/* @ancestor should be the snapshot most recently added to @seen */
|
/* @ancestor should be the snapshot most recently added to @seen */
|
||||||
EBUG_ON(ancestor != seen->pos.snapshot);
|
EBUG_ON(ancestor != seen->pos.snapshot);
|
||||||
EBUG_ON(ancestor != top);
|
EBUG_ON(ancestor != seen->ids.data[seen->ids.nr - 1].equiv);
|
||||||
|
|
||||||
if (id == ancestor)
|
if (id == ancestor)
|
||||||
return true;
|
return true;
|
||||||
@ -930,7 +866,7 @@ static int check_inode(struct btree_trans *trans,
|
|||||||
bch2_trans_unlock(trans);
|
bch2_trans_unlock(trans);
|
||||||
bch2_fs_lazy_rw(c);
|
bch2_fs_lazy_rw(c);
|
||||||
|
|
||||||
ret = fsck_inode_rm(trans, u.bi_inum, iter->pos.snapshot);
|
ret = bch2_inode_rm_snapshot(trans, u.bi_inum, iter->pos.snapshot);
|
||||||
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||||
bch_err(c, "error in fsck: error while deleting inode: %s",
|
bch_err(c, "error in fsck: error while deleting inode: %s",
|
||||||
bch2_err_str(ret));
|
bch2_err_str(ret));
|
||||||
@ -1198,19 +1134,13 @@ static int overlapping_extents_found(struct btree_trans *trans,
|
|||||||
|
|
||||||
BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
|
BUG_ON(bkey_le(pos1, bkey_start_pos(&pos2)));
|
||||||
|
|
||||||
prt_str(&buf, "\n ");
|
|
||||||
bch2_bpos_to_text(&buf, pos1);
|
|
||||||
prt_str(&buf, "\n ");
|
|
||||||
|
|
||||||
bch2_bkey_to_text(&buf, &pos2);
|
|
||||||
prt_str(&buf, "\n ");
|
|
||||||
|
|
||||||
bch2_trans_iter_init(trans, &iter, btree, SPOS(pos1.inode, pos1.offset - 1, snapshot), 0);
|
bch2_trans_iter_init(trans, &iter, btree, SPOS(pos1.inode, pos1.offset - 1, snapshot), 0);
|
||||||
k = bch2_btree_iter_peek_upto(&iter, POS(pos1.inode, U64_MAX));
|
k = bch2_btree_iter_peek_upto(&iter, POS(pos1.inode, U64_MAX));
|
||||||
ret = bkey_err(k);
|
ret = bkey_err(k);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
prt_str(&buf, "\n ");
|
||||||
bch2_bkey_val_to_text(&buf, c, k);
|
bch2_bkey_val_to_text(&buf, c, k);
|
||||||
|
|
||||||
if (!bpos_eq(pos1, k.k->p)) {
|
if (!bpos_eq(pos1, k.k->p)) {
|
||||||
|
@ -923,3 +923,66 @@ void bch2_inode_opts_get(struct bch_io_opts *opts, struct bch_fs *c,
|
|||||||
if (opts->nocow)
|
if (opts->nocow)
|
||||||
opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0;
|
opts->compression = opts->background_compression = opts->data_checksum = opts->erasure_code = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
|
||||||
|
{
|
||||||
|
struct bch_fs *c = trans->c;
|
||||||
|
struct btree_iter iter = { NULL };
|
||||||
|
struct bkey_i_inode_generation delete;
|
||||||
|
struct bch_inode_unpacked inode_u;
|
||||||
|
struct bkey_s_c k;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
do {
|
||||||
|
ret = bch2_btree_delete_range_trans(trans, BTREE_ID_extents,
|
||||||
|
SPOS(inum, 0, snapshot),
|
||||||
|
SPOS(inum, U64_MAX, snapshot),
|
||||||
|
0, NULL) ?:
|
||||||
|
bch2_btree_delete_range_trans(trans, BTREE_ID_dirents,
|
||||||
|
SPOS(inum, 0, snapshot),
|
||||||
|
SPOS(inum, U64_MAX, snapshot),
|
||||||
|
0, NULL) ?:
|
||||||
|
bch2_btree_delete_range_trans(trans, BTREE_ID_xattrs,
|
||||||
|
SPOS(inum, 0, snapshot),
|
||||||
|
SPOS(inum, U64_MAX, snapshot),
|
||||||
|
0, NULL);
|
||||||
|
} while (ret == -BCH_ERR_transaction_restart_nested);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
retry:
|
||||||
|
bch2_trans_begin(trans);
|
||||||
|
|
||||||
|
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||||
|
SPOS(0, inum, snapshot), BTREE_ITER_INTENT);
|
||||||
|
ret = bkey_err(k);
|
||||||
|
if (ret)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
if (!bkey_is_inode(k.k)) {
|
||||||
|
bch2_fs_inconsistent(c,
|
||||||
|
"inode %llu:%u not found when deleting",
|
||||||
|
inum, snapshot);
|
||||||
|
ret = -EIO;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
bch2_inode_unpack(k, &inode_u);
|
||||||
|
|
||||||
|
/* Subvolume root? */
|
||||||
|
if (inode_u.bi_subvol)
|
||||||
|
bch_warn(c, "deleting inode %llu marked as unlinked, but also a subvolume root!?", inode_u.bi_inum);
|
||||||
|
|
||||||
|
bkey_inode_generation_init(&delete.k_i);
|
||||||
|
delete.k.p = iter.pos;
|
||||||
|
delete.v.bi_generation = cpu_to_le32(inode_u.bi_generation + 1);
|
||||||
|
|
||||||
|
ret = bch2_trans_update(trans, &iter, &delete.k_i, 0) ?:
|
||||||
|
bch2_trans_commit(trans, NULL, NULL,
|
||||||
|
BTREE_INSERT_NOFAIL);
|
||||||
|
err:
|
||||||
|
bch2_trans_iter_exit(trans, &iter);
|
||||||
|
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||||
|
goto retry;
|
||||||
|
|
||||||
|
return ret ?: -BCH_ERR_transaction_restart_nested;
|
||||||
|
}
|
||||||
|
@ -198,4 +198,6 @@ struct bch_opts bch2_inode_opts_to_opts(struct bch_inode_unpacked *);
|
|||||||
void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *,
|
void bch2_inode_opts_get(struct bch_io_opts *, struct bch_fs *,
|
||||||
struct bch_inode_unpacked *);
|
struct bch_inode_unpacked *);
|
||||||
|
|
||||||
|
int bch2_inode_rm_snapshot(struct btree_trans *, u64, u32);
|
||||||
|
|
||||||
#endif /* _BCACHEFS_INODE_H */
|
#endif /* _BCACHEFS_INODE_H */
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
#include "super-io.h"
|
#include "super-io.h"
|
||||||
#include "util.h"
|
#include "util.h"
|
||||||
|
|
||||||
#define x(t, n) [n] = #t,
|
#define x(t, n, ...) [n] = #t,
|
||||||
|
|
||||||
const char * const bch2_error_actions[] = {
|
const char * const bch2_error_actions[] = {
|
||||||
BCH_ERROR_ACTIONS()
|
BCH_ERROR_ACTIONS()
|
||||||
@ -95,7 +95,7 @@ const char * const bch2_fs_usage_types[] = {
|
|||||||
|
|
||||||
#undef x
|
#undef x
|
||||||
|
|
||||||
int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res,
|
static int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res,
|
||||||
struct printbuf *err)
|
struct printbuf *err)
|
||||||
{
|
{
|
||||||
if (!val) {
|
if (!val) {
|
||||||
@ -113,7 +113,7 @@ int bch2_opt_fix_errors_parse(struct bch_fs *c, const char *val, u64 *res,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bch2_opt_fix_errors_to_text(struct printbuf *out,
|
static void bch2_opt_fix_errors_to_text(struct printbuf *out,
|
||||||
struct bch_fs *c,
|
struct bch_fs *c,
|
||||||
struct bch_sb *sb,
|
struct bch_sb *sb,
|
||||||
u64 v)
|
u64 v)
|
||||||
@ -121,10 +121,10 @@ void bch2_opt_fix_errors_to_text(struct printbuf *out,
|
|||||||
prt_str(out, bch2_fsck_fix_opts[v]);
|
prt_str(out, bch2_fsck_fix_opts[v]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct bch_opt_fn bch2_opt_fix_errors = {
|
#define bch2_opt_fix_errors (struct bch_opt_fn) { \
|
||||||
.parse = bch2_opt_fix_errors_parse,
|
.parse = bch2_opt_fix_errors_parse, \
|
||||||
.to_text = bch2_opt_fix_errors_to_text,
|
.to_text = bch2_opt_fix_errors_to_text, \
|
||||||
};
|
}
|
||||||
|
|
||||||
const char * const bch2_d_types[BCH_DT_MAX] = {
|
const char * const bch2_d_types[BCH_DT_MAX] = {
|
||||||
[DT_UNKNOWN] = "unknown",
|
[DT_UNKNOWN] = "unknown",
|
||||||
|
@ -83,7 +83,7 @@ static noinline struct snapshot_t *__snapshot_t_mut(struct bch_fs *c, u32 id)
|
|||||||
if (!new)
|
if (!new)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
old = c->snapshots;
|
old = rcu_dereference_protected(c->snapshots, true);
|
||||||
if (old)
|
if (old)
|
||||||
memcpy(new->s,
|
memcpy(new->s,
|
||||||
rcu_dereference_protected(c->snapshots, true)->s,
|
rcu_dereference_protected(c->snapshots, true)->s,
|
||||||
@ -698,6 +698,11 @@ err:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int cmp_le32(__le32 l, __le32 r)
|
||||||
|
{
|
||||||
|
return cmp_int(le32_to_cpu(l), le32_to_cpu(r));
|
||||||
|
}
|
||||||
|
|
||||||
static int check_snapshot(struct btree_trans *trans,
|
static int check_snapshot(struct btree_trans *trans,
|
||||||
struct btree_iter *iter,
|
struct btree_iter *iter,
|
||||||
struct bkey_s_c k)
|
struct bkey_s_c k)
|
||||||
@ -830,7 +835,7 @@ static int check_snapshot(struct btree_trans *trans,
|
|||||||
for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
|
for (i = 0; i < ARRAY_SIZE(u->v.skip); i++)
|
||||||
u->v.skip[i] = cpu_to_le32(snapshot_skiplist_get(c, parent_id));
|
u->v.skip[i] = cpu_to_le32(snapshot_skiplist_get(c, parent_id));
|
||||||
|
|
||||||
bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_int);
|
bubble_sort(u->v.skip, ARRAY_SIZE(u->v.skip), cmp_le32);
|
||||||
s = u->v;
|
s = u->v;
|
||||||
}
|
}
|
||||||
ret = 0;
|
ret = 0;
|
||||||
@ -946,7 +951,7 @@ int bch2_check_subvols(struct bch_fs *c)
|
|||||||
|
|
||||||
void bch2_fs_snapshots_exit(struct bch_fs *c)
|
void bch2_fs_snapshots_exit(struct bch_fs *c)
|
||||||
{
|
{
|
||||||
kfree(c->snapshots);
|
kfree(rcu_dereference_protected(c->snapshots, true));
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_snapshots_read(struct bch_fs *c)
|
int bch2_snapshots_read(struct bch_fs *c)
|
||||||
@ -1123,7 +1128,7 @@ static int create_snapids(struct btree_trans *trans, u32 parent, u32 tree,
|
|||||||
for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
|
for (j = 0; j < ARRAY_SIZE(n->v.skip); j++)
|
||||||
n->v.skip[j] = cpu_to_le32(snapshot_skiplist_get(c, parent));
|
n->v.skip[j] = cpu_to_le32(snapshot_skiplist_get(c, parent));
|
||||||
|
|
||||||
bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_int);
|
bubble_sort(n->v.skip, ARRAY_SIZE(n->v.skip), cmp_le32);
|
||||||
SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
|
SET_BCH_SNAPSHOT_SUBVOL(&n->v, true);
|
||||||
|
|
||||||
ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
|
ret = bch2_mark_snapshot(trans, BTREE_ID_snapshots, 0,
|
||||||
|
@ -261,16 +261,13 @@ struct bch_sb_field *bch2_sb_field_resize(struct bch_sb_handle *sb,
|
|||||||
|
|
||||||
/* Superblock validate: */
|
/* Superblock validate: */
|
||||||
|
|
||||||
static inline void __bch2_sb_layout_size_assert(void)
|
|
||||||
{
|
|
||||||
BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out)
|
static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out)
|
||||||
{
|
{
|
||||||
u64 offset, prev_offset, max_sectors;
|
u64 offset, prev_offset, max_sectors;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
|
BUILD_BUG_ON(sizeof(struct bch_sb_layout) != 512);
|
||||||
|
|
||||||
if (!uuid_equal(&layout->magic, &BCACHE_MAGIC) &&
|
if (!uuid_equal(&layout->magic, &BCACHE_MAGIC) &&
|
||||||
!uuid_equal(&layout->magic, &BCHFS_MAGIC)) {
|
!uuid_equal(&layout->magic, &BCHFS_MAGIC)) {
|
||||||
prt_printf(out, "Not a bcachefs superblock layout");
|
prt_printf(out, "Not a bcachefs superblock layout");
|
||||||
|
12
tools-util.c
12
tools-util.c
@ -330,21 +330,21 @@ struct fiemap_extent fiemap_iter_next(struct fiemap_iter *iter)
|
|||||||
{
|
{
|
||||||
struct fiemap_extent e;
|
struct fiemap_extent e;
|
||||||
|
|
||||||
BUG_ON(iter->idx > iter->f.fm_mapped_extents);
|
BUG_ON(iter->idx > iter->f->fm_mapped_extents);
|
||||||
|
|
||||||
if (iter->idx == iter->f.fm_mapped_extents) {
|
if (iter->idx == iter->f->fm_mapped_extents) {
|
||||||
xioctl(iter->fd, FS_IOC_FIEMAP, &iter->f);
|
xioctl(iter->fd, FS_IOC_FIEMAP, iter->f);
|
||||||
|
|
||||||
if (!iter->f.fm_mapped_extents)
|
if (!iter->f->fm_mapped_extents)
|
||||||
return (struct fiemap_extent) { .fe_length = 0 };
|
return (struct fiemap_extent) { .fe_length = 0 };
|
||||||
|
|
||||||
iter->idx = 0;
|
iter->idx = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
e = iter->f.fm_extents[iter->idx++];
|
e = iter->f->fm_extents[iter->idx++];
|
||||||
BUG_ON(!e.fe_length);
|
BUG_ON(!e.fe_length);
|
||||||
|
|
||||||
iter->f.fm_start = e.fe_logical + e.fe_length;
|
iter->f->fm_start = e.fe_logical + e.fe_length;
|
||||||
|
|
||||||
return e;
|
return e;
|
||||||
}
|
}
|
||||||
|
16
tools-util.h
16
tools-util.h
@ -115,8 +115,7 @@ static inline struct range hole_iter_next(struct hole_iter *iter)
|
|||||||
#include <linux/fiemap.h>
|
#include <linux/fiemap.h>
|
||||||
|
|
||||||
struct fiemap_iter {
|
struct fiemap_iter {
|
||||||
struct fiemap f;
|
struct fiemap *f;
|
||||||
struct fiemap_extent fe[1024];
|
|
||||||
unsigned idx;
|
unsigned idx;
|
||||||
int fd;
|
int fd;
|
||||||
};
|
};
|
||||||
@ -125,11 +124,20 @@ static inline void fiemap_iter_init(struct fiemap_iter *iter, int fd)
|
|||||||
{
|
{
|
||||||
memset(iter, 0, sizeof(*iter));
|
memset(iter, 0, sizeof(*iter));
|
||||||
|
|
||||||
iter->f.fm_extent_count = ARRAY_SIZE(iter->fe);
|
iter->f = xmalloc(sizeof(struct fiemap) +
|
||||||
iter->f.fm_length = FIEMAP_MAX_OFFSET;
|
sizeof(struct fiemap_extent) * 1024);
|
||||||
|
|
||||||
|
iter->f->fm_extent_count = 1024;
|
||||||
|
iter->f->fm_length = FIEMAP_MAX_OFFSET;
|
||||||
iter->fd = fd;
|
iter->fd = fd;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void fiemap_iter_exit(struct fiemap_iter *iter)
|
||||||
|
{
|
||||||
|
free(iter->f);
|
||||||
|
memset(iter, 0, sizeof(*iter));
|
||||||
|
}
|
||||||
|
|
||||||
struct fiemap_extent fiemap_iter_next(struct fiemap_iter *);
|
struct fiemap_extent fiemap_iter_next(struct fiemap_iter *);
|
||||||
|
|
||||||
#define fiemap_for_each(fd, iter, extent) \
|
#define fiemap_for_each(fd, iter, extent) \
|
||||||
|
Loading…
Reference in New Issue
Block a user