mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Temporary import of valgrind fixes for bcachefs branch.
This commit is contained in:
parent
37899250f3
commit
36e4b3147e
@ -152,6 +152,7 @@ void bch2_alloc_pack(struct bkey_i_alloc *dst,
|
|||||||
{
|
{
|
||||||
unsigned idx = 0;
|
unsigned idx = 0;
|
||||||
void *d = dst->v.data;
|
void *d = dst->v.data;
|
||||||
|
unsigned bytes;
|
||||||
|
|
||||||
dst->v.fields = 0;
|
dst->v.fields = 0;
|
||||||
dst->v.gen = src.gen;
|
dst->v.gen = src.gen;
|
||||||
@ -160,7 +161,9 @@ void bch2_alloc_pack(struct bkey_i_alloc *dst,
|
|||||||
BCH_ALLOC_FIELDS()
|
BCH_ALLOC_FIELDS()
|
||||||
#undef x
|
#undef x
|
||||||
|
|
||||||
set_bkey_val_bytes(&dst->k, (void *) d - (void *) &dst->v);
|
bytes = (void *) d - (void *) &dst->v;
|
||||||
|
set_bkey_val_bytes(&dst->k, bytes);
|
||||||
|
memset_u64s_tail(&dst->v, 0, bytes);
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
|
static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)
|
||||||
|
@ -83,6 +83,9 @@ static void btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
|
|||||||
if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp))
|
if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
memset(&b->data->csum, 0, sizeof b->data->csum);
|
||||||
|
b->data->flags = 0;
|
||||||
|
|
||||||
bc->used++;
|
bc->used++;
|
||||||
list_move(&b->list, &bc->freeable);
|
list_move(&b->list, &bc->freeable);
|
||||||
return;
|
return;
|
||||||
|
@ -95,6 +95,7 @@ void bch2_inode_pack(struct bkey_inode_buf *packed,
|
|||||||
u8 *end = (void *) &packed[1];
|
u8 *end = (void *) &packed[1];
|
||||||
u8 *last_nonzero_field = out;
|
u8 *last_nonzero_field = out;
|
||||||
unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
|
unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
|
||||||
|
unsigned bytes;
|
||||||
|
|
||||||
bkey_inode_init(&packed->inode.k_i);
|
bkey_inode_init(&packed->inode.k_i);
|
||||||
packed->inode.k.p.inode = inode->bi_inum;
|
packed->inode.k.p.inode = inode->bi_inum;
|
||||||
@ -117,10 +118,9 @@ void bch2_inode_pack(struct bkey_inode_buf *packed,
|
|||||||
out = last_nonzero_field;
|
out = last_nonzero_field;
|
||||||
nr_fields = last_nonzero_fieldnr;
|
nr_fields = last_nonzero_fieldnr;
|
||||||
|
|
||||||
set_bkey_val_bytes(&packed->inode.k, out - (u8 *) &packed->inode.v);
|
bytes = out - (u8 *) &packed->inode.v;
|
||||||
memset(out, 0,
|
set_bkey_val_bytes(&packed->inode.k, bytes);
|
||||||
(u8 *) &packed->inode.v +
|
memset_u64s_tail(&packed->inode.v, 0, bytes);
|
||||||
bkey_val_bytes(&packed->inode.k) - out);
|
|
||||||
|
|
||||||
SET_INODE_NR_FIELDS(&packed->inode.v, nr_fields);
|
SET_INODE_NR_FIELDS(&packed->inode.v, nr_fields);
|
||||||
|
|
||||||
|
@ -949,6 +949,25 @@ int bch2_fs_mark_dirty(struct bch_fs *c)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
entry_init_u64s(struct jset_entry *entry, unsigned u64s)
|
||||||
|
{
|
||||||
|
memset(entry, 0, u64s * sizeof(u64));
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The u64s field counts from the start of data, ignoring the shared
|
||||||
|
* fields.
|
||||||
|
*/
|
||||||
|
entry->u64s = u64s - 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
entry_init_size(struct jset_entry *entry, size_t size)
|
||||||
|
{
|
||||||
|
unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));
|
||||||
|
entry_init_u64s(entry, u64s);
|
||||||
|
}
|
||||||
|
|
||||||
struct jset_entry *
|
struct jset_entry *
|
||||||
bch2_journal_super_entries_add_common(struct bch_fs *c,
|
bch2_journal_super_entries_add_common(struct bch_fs *c,
|
||||||
struct jset_entry *entry,
|
struct jset_entry *entry,
|
||||||
@ -963,7 +982,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
|
|||||||
r < c->btree_roots + BTREE_ID_NR;
|
r < c->btree_roots + BTREE_ID_NR;
|
||||||
r++)
|
r++)
|
||||||
if (r->alive) {
|
if (r->alive) {
|
||||||
entry->u64s = r->key.u64s;
|
entry_init_u64s(entry, r->key.u64s + 1);
|
||||||
entry->btree_id = r - c->btree_roots;
|
entry->btree_id = r - c->btree_roots;
|
||||||
entry->level = r->level;
|
entry->level = r->level;
|
||||||
entry->type = BCH_JSET_ENTRY_btree_root;
|
entry->type = BCH_JSET_ENTRY_btree_root;
|
||||||
@ -988,8 +1007,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
|
|||||||
struct jset_entry_usage *u =
|
struct jset_entry_usage *u =
|
||||||
container_of(entry, struct jset_entry_usage, entry);
|
container_of(entry, struct jset_entry_usage, entry);
|
||||||
|
|
||||||
memset(u, 0, sizeof(*u));
|
entry_init_size(entry, sizeof(*u));
|
||||||
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
|
|
||||||
u->entry.type = BCH_JSET_ENTRY_usage;
|
u->entry.type = BCH_JSET_ENTRY_usage;
|
||||||
u->entry.btree_id = FS_USAGE_INODES;
|
u->entry.btree_id = FS_USAGE_INODES;
|
||||||
u->v = cpu_to_le64(c->usage_base->nr_inodes);
|
u->v = cpu_to_le64(c->usage_base->nr_inodes);
|
||||||
@ -1001,8 +1019,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
|
|||||||
struct jset_entry_usage *u =
|
struct jset_entry_usage *u =
|
||||||
container_of(entry, struct jset_entry_usage, entry);
|
container_of(entry, struct jset_entry_usage, entry);
|
||||||
|
|
||||||
memset(u, 0, sizeof(*u));
|
entry_init_size(entry, sizeof(*u));
|
||||||
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
|
|
||||||
u->entry.type = BCH_JSET_ENTRY_usage;
|
u->entry.type = BCH_JSET_ENTRY_usage;
|
||||||
u->entry.btree_id = FS_USAGE_KEY_VERSION;
|
u->entry.btree_id = FS_USAGE_KEY_VERSION;
|
||||||
u->v = cpu_to_le64(atomic64_read(&c->key_version));
|
u->v = cpu_to_le64(atomic64_read(&c->key_version));
|
||||||
@ -1014,8 +1031,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
|
|||||||
struct jset_entry_usage *u =
|
struct jset_entry_usage *u =
|
||||||
container_of(entry, struct jset_entry_usage, entry);
|
container_of(entry, struct jset_entry_usage, entry);
|
||||||
|
|
||||||
memset(u, 0, sizeof(*u));
|
entry_init_size(entry, sizeof(*u));
|
||||||
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
|
|
||||||
u->entry.type = BCH_JSET_ENTRY_usage;
|
u->entry.type = BCH_JSET_ENTRY_usage;
|
||||||
u->entry.btree_id = FS_USAGE_RESERVED;
|
u->entry.btree_id = FS_USAGE_RESERVED;
|
||||||
u->entry.level = i;
|
u->entry.level = i;
|
||||||
@ -1030,10 +1046,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
|
|||||||
struct jset_entry_data_usage *u =
|
struct jset_entry_data_usage *u =
|
||||||
container_of(entry, struct jset_entry_data_usage, entry);
|
container_of(entry, struct jset_entry_data_usage, entry);
|
||||||
|
|
||||||
int u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
|
entry_init_size(entry, sizeof(*u) + e->nr_devs);
|
||||||
sizeof(u64)) - 1;
|
|
||||||
memset(u, 0, u64s * sizeof(u64));
|
|
||||||
u->entry.u64s = u64s;
|
|
||||||
u->entry.type = BCH_JSET_ENTRY_data_usage;
|
u->entry.type = BCH_JSET_ENTRY_data_usage;
|
||||||
u->v = cpu_to_le64(c->usage_base->replicas[i]);
|
u->v = cpu_to_le64(c->usage_base->replicas[i]);
|
||||||
memcpy(&u->r, e, replicas_entry_bytes(e));
|
memcpy(&u->r, e, replicas_entry_bytes(e));
|
||||||
|
@ -628,6 +628,14 @@ static inline void memmove_u64s(void *dst, const void *src,
|
|||||||
__memmove_u64s_up(dst, src, u64s);
|
__memmove_u64s_up(dst, src, u64s);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
|
||||||
|
static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
|
||||||
|
{
|
||||||
|
unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
|
||||||
|
|
||||||
|
memset(s + bytes, c, rem);
|
||||||
|
}
|
||||||
|
|
||||||
void sort_cmp_size(void *base, size_t num, size_t size,
|
void sort_cmp_size(void *base, size_t num, size_t size,
|
||||||
int (*cmp_func)(const void *, const void *, size_t),
|
int (*cmp_func)(const void *, const void *, size_t),
|
||||||
void (*swap_func)(void *, void *, size_t));
|
void (*swap_func)(void *, void *, size_t));
|
||||||
|
Loading…
Reference in New Issue
Block a user