Temporary import of valgrind fixes for bcachefs branch.

This commit is contained in:
Justin Husted 2019-10-31 19:10:19 -07:00 committed by Kent Overstreet
parent 37899250f3
commit 36e4b3147e
5 changed files with 43 additions and 16 deletions

View File

@ -152,6 +152,7 @@ void bch2_alloc_pack(struct bkey_i_alloc *dst,
{
unsigned idx = 0;
void *d = dst->v.data;
unsigned bytes;
dst->v.fields = 0;
dst->v.gen = src.gen;
@ -160,7 +161,9 @@ void bch2_alloc_pack(struct bkey_i_alloc *dst,
BCH_ALLOC_FIELDS()
#undef x
set_bkey_val_bytes(&dst->k, (void *) d - (void *) &dst->v);
bytes = (void *) d - (void *) &dst->v;
set_bkey_val_bytes(&dst->k, bytes);
memset_u64s_tail(&dst->v, 0, bytes);
}
static unsigned bch_alloc_val_u64s(const struct bch_alloc *a)

View File

@ -83,6 +83,9 @@ static void btree_node_data_alloc(struct bch_fs *c, struct btree *b, gfp_t gfp)
if (bch2_btree_keys_alloc(b, btree_page_order(c), gfp))
goto err;
memset(&b->data->csum, 0, sizeof b->data->csum);
b->data->flags = 0;
bc->used++;
list_move(&b->list, &bc->freeable);
return;

View File

@ -95,6 +95,7 @@ void bch2_inode_pack(struct bkey_inode_buf *packed,
u8 *end = (void *) &packed[1];
u8 *last_nonzero_field = out;
unsigned nr_fields = 0, last_nonzero_fieldnr = 0;
unsigned bytes;
bkey_inode_init(&packed->inode.k_i);
packed->inode.k.p.inode = inode->bi_inum;
@ -117,10 +118,9 @@ void bch2_inode_pack(struct bkey_inode_buf *packed,
out = last_nonzero_field;
nr_fields = last_nonzero_fieldnr;
set_bkey_val_bytes(&packed->inode.k, out - (u8 *) &packed->inode.v);
memset(out, 0,
(u8 *) &packed->inode.v +
bkey_val_bytes(&packed->inode.k) - out);
bytes = out - (u8 *) &packed->inode.v;
set_bkey_val_bytes(&packed->inode.k, bytes);
memset_u64s_tail(&packed->inode.v, 0, bytes);
SET_INODE_NR_FIELDS(&packed->inode.v, nr_fields);

View File

@ -949,6 +949,25 @@ int bch2_fs_mark_dirty(struct bch_fs *c)
return ret;
}
static void
entry_init_u64s(struct jset_entry *entry, unsigned u64s)
{
memset(entry, 0, u64s * sizeof(u64));
/*
* The u64s field counts from the start of data, ignoring the shared
* fields.
*/
entry->u64s = u64s - 1;
}
static void
entry_init_size(struct jset_entry *entry, size_t size)
{
unsigned u64s = DIV_ROUND_UP(size, sizeof(u64));
entry_init_u64s(entry, u64s);
}
struct jset_entry *
bch2_journal_super_entries_add_common(struct bch_fs *c,
struct jset_entry *entry,
@ -963,7 +982,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
r < c->btree_roots + BTREE_ID_NR;
r++)
if (r->alive) {
entry->u64s = r->key.u64s;
entry_init_u64s(entry, r->key.u64s + 1);
entry->btree_id = r - c->btree_roots;
entry->level = r->level;
entry->type = BCH_JSET_ENTRY_btree_root;
@ -988,8 +1007,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
entry_init_size(entry, sizeof(*u));
u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = FS_USAGE_INODES;
u->v = cpu_to_le64(c->usage_base->nr_inodes);
@ -1001,8 +1019,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
entry_init_size(entry, sizeof(*u));
u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = FS_USAGE_KEY_VERSION;
u->v = cpu_to_le64(atomic64_read(&c->key_version));
@ -1014,8 +1031,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
struct jset_entry_usage *u =
container_of(entry, struct jset_entry_usage, entry);
memset(u, 0, sizeof(*u));
u->entry.u64s = DIV_ROUND_UP(sizeof(*u), sizeof(u64)) - 1;
entry_init_size(entry, sizeof(*u));
u->entry.type = BCH_JSET_ENTRY_usage;
u->entry.btree_id = FS_USAGE_RESERVED;
u->entry.level = i;
@ -1030,10 +1046,7 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
struct jset_entry_data_usage *u =
container_of(entry, struct jset_entry_data_usage, entry);
int u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
sizeof(u64)) - 1;
memset(u, 0, u64s * sizeof(u64));
u->entry.u64s = u64s;
entry_init_size(entry, sizeof(*u) + e->nr_devs);
u->entry.type = BCH_JSET_ENTRY_data_usage;
u->v = cpu_to_le64(c->usage_base->replicas[i]);
memcpy(&u->r, e, replicas_entry_bytes(e));

View File

@ -628,6 +628,14 @@ static inline void memmove_u64s(void *dst, const void *src,
__memmove_u64s_up(dst, src, u64s);
}
/* Set the last few bytes up to a u64 boundary given an offset into a buffer. */
static inline void memset_u64s_tail(void *s, int c, unsigned bytes)
{
unsigned rem = round_up(bytes, sizeof(u64)) - bytes;
memset(s + bytes, c, rem);
}
void sort_cmp_size(void *base, size_t num, size_t size,
int (*cmp_func)(const void *, const void *, size_t),
void (*swap_func)(void *, void *, size_t));