Update bcachefs sources to 07c2895cb3 bcachefs: Add a valgrind memcheck hint

This commit is contained in:
Kent Overstreet 2021-10-13 11:00:02 -04:00
parent 385dcecb96
commit e489658c49
13 changed files with 139 additions and 131 deletions

View File

@ -1 +1 @@
4114ced1db465b8f4e7f4d6a78aa11416a9ab5d9 07c2895cb3372c0e7b406ab13264de80c7ff19eb

View File

@ -165,11 +165,20 @@ static bool bch2_btree_node_upgrade(struct btree_trans *trans,
{ {
struct btree *b = path->l[level].b; struct btree *b = path->l[level].b;
EBUG_ON(btree_lock_want(path, level) != BTREE_NODE_INTENT_LOCKED);
if (!is_btree_node(path, level)) if (!is_btree_node(path, level))
return false; return false;
switch (btree_lock_want(path, level)) {
case BTREE_NODE_UNLOCKED:
BUG_ON(btree_node_locked(path, level));
return true;
case BTREE_NODE_READ_LOCKED:
BUG_ON(btree_node_intent_locked(path, level));
return bch2_btree_node_relock(trans, path, level);
case BTREE_NODE_INTENT_LOCKED:
break;
}
if (btree_node_intent_locked(path, level)) if (btree_node_intent_locked(path, level))
return true; return true;
@ -364,7 +373,8 @@ static void bch2_btree_path_verify_locks(struct btree_path *path)
unsigned l; unsigned l;
if (!path->nodes_locked) { if (!path->nodes_locked) {
BUG_ON(path->uptodate == BTREE_ITER_UPTODATE); BUG_ON(path->uptodate == BTREE_ITER_UPTODATE &&
btree_path_node(path, path->level));
return; return;
} }
@ -1351,7 +1361,8 @@ retry_all:
EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx))); EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
if (path->nodes_locked) if (path->nodes_locked ||
!btree_path_node(path, path->level))
i++; i++;
} }
@ -1866,13 +1877,14 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter) struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
{ {
struct btree_trans *trans = iter->trans;
struct btree *b = NULL; struct btree *b = NULL;
int ret; int ret;
EBUG_ON(iter->path->cached); EBUG_ON(iter->path->cached);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags); ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (ret) if (ret)
goto out; goto out;
@ -1884,7 +1896,11 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
bkey_init(&iter->k); bkey_init(&iter->k);
iter->k.p = iter->pos = b->key.k.p; iter->k.p = iter->pos = b->key.k.p;
iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_INTENT);
iter->path->should_be_locked = true; iter->path->should_be_locked = true;
BUG_ON(iter->path->uptodate);
out: out:
bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);
@ -1949,7 +1965,11 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
bkey_init(&iter->k); bkey_init(&iter->k);
iter->k.p = iter->pos = b->key.k.p; iter->k.p = iter->pos = b->key.k.p;
iter->path = btree_path_set_pos(trans, iter->path, b->key.k.p,
iter->flags & BTREE_ITER_INTENT);
iter->path->should_be_locked = true; iter->path->should_be_locked = true;
BUG_ON(iter->path->uptodate);
out: out:
bch2_btree_iter_verify_entry_exit(iter); bch2_btree_iter_verify_entry_exit(iter);
bch2_btree_iter_verify(iter); bch2_btree_iter_verify(iter);

View File

@ -1945,9 +1945,16 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree *new_hash = NULL; struct btree *new_hash = NULL;
struct btree_path *path = iter->path;
struct closure cl; struct closure cl;
int ret = 0; int ret = 0;
if (!btree_node_intent_locked(path, b->c.level) &&
!bch2_btree_path_upgrade(trans, path, b->c.level + 1)) {
btree_trans_restart(trans);
return -EINTR;
}
closure_init_stack(&cl); closure_init_stack(&cl);
/* /*
@ -1966,8 +1973,10 @@ int bch2_btree_node_update_key(struct btree_trans *trans, struct btree_iter *ite
new_hash = bch2_btree_node_mem_alloc(c); new_hash = bch2_btree_node_mem_alloc(c);
} }
path->intent_ref++;
ret = __bch2_btree_node_update_key(trans, iter, b, new_hash, ret = __bch2_btree_node_update_key(trans, iter, b, new_hash,
new_key, skip_triggers); new_key, skip_triggers);
--path->intent_ref;
if (new_hash) { if (new_hash) {
mutex_lock(&c->btree_cache.lock); mutex_lock(&c->btree_cache.lock);

View File

@ -191,34 +191,15 @@ int __bch2_dirent_read_target(struct btree_trans *trans,
if (likely(d.v->d_type != DT_SUBVOL)) { if (likely(d.v->d_type != DT_SUBVOL)) {
*inum = le64_to_cpu(d.v->d_inum); *inum = le64_to_cpu(d.v->d_inum);
} else { } else {
struct btree_iter iter; struct bch_subvolume s;
struct bkey_s_c k;
struct bkey_s_c_subvolume s;
int ret; int ret;
*subvol = le64_to_cpu(d.v->d_inum); *subvol = le64_to_cpu(d.v->d_inum);
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes,
POS(0, *subvol),
BTREE_ITER_CACHED);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_subvolume) { ret = bch2_subvolume_get(trans, *subvol, !is_fsck, BTREE_ITER_CACHED, &s);
ret = -ENOENT;
goto err;
}
s = bkey_s_c_to_subvolume(k); *snapshot = le32_to_cpu(s.snapshot);
*snapshot = le32_to_cpu(s.v->snapshot); *inum = le64_to_cpu(s.inode);
*inum = le64_to_cpu(s.v->inode);
err:
if (ret == -ENOENT && !is_fsck)
bch2_fs_inconsistent(trans->c, "pointer to missing subvolume %u",
*subvol);
bch2_trans_iter_exit(trans, &iter);
} }
return ret; return ret;

View File

@ -67,26 +67,14 @@ int bch2_create_trans(struct btree_trans *trans,
if (!snapshot_src.inum) { if (!snapshot_src.inum) {
/* Inode wasn't specified, just snapshot: */ /* Inode wasn't specified, just snapshot: */
struct btree_iter subvol_iter; struct bch_subvolume s;
struct bkey_s_c k;
bch2_trans_iter_init(trans, &subvol_iter, BTREE_ID_subvolumes,
POS(0, snapshot_src.subvol), 0);
k = bch2_btree_iter_peek_slot(&subvol_iter);
ret = bkey_err(k);
if (!ret && k.k->type != KEY_TYPE_subvolume) {
bch_err(c, "subvolume %u not found",
snapshot_src.subvol);
ret = -ENOENT;
}
if (!ret)
snapshot_src.inum = le64_to_cpu(bkey_s_c_to_subvolume(k).v->inode);
bch2_trans_iter_exit(trans, &subvol_iter);
ret = bch2_subvolume_get(trans, snapshot_src.subvol, true,
BTREE_ITER_CACHED, &s);
if (ret) if (ret)
goto err; goto err;
snapshot_src.inum = le64_to_cpu(s.inode);
} }
ret = bch2_inode_peek(trans, &inode_iter, new_inode, snapshot_src, ret = bch2_inode_peek(trans, &inode_iter, new_inode, snapshot_src,
@ -279,18 +267,33 @@ int bch2_unlink_trans(struct btree_trans *trans,
if (ret) if (ret)
goto err; goto err;
if (deleting_snapshot == 1 && !inode_u->bi_subvol) {
ret = -ENOENT;
goto err;
}
if (deleting_snapshot <= 0 && S_ISDIR(inode_u->bi_mode)) { if (deleting_snapshot <= 0 && S_ISDIR(inode_u->bi_mode)) {
ret = bch2_empty_dir_trans(trans, inum); ret = bch2_empty_dir_trans(trans, inum);
if (ret) if (ret)
goto err; goto err;
} }
if (inode_u->bi_subvol) { if (deleting_snapshot < 0 &&
inode_u->bi_subvol) {
struct bch_subvolume s;
ret = bch2_subvolume_get(trans, inode_u->bi_subvol, true,
BTREE_ITER_CACHED|
BTREE_ITER_WITH_UPDATES,
&s);
if (ret)
goto err;
if (BCH_SUBVOLUME_SNAP(&s))
deleting_snapshot = 1;
}
if (deleting_snapshot == 1) {
if (!inode_u->bi_subvol) {
ret = -ENOENT;
goto err;
}
ret = bch2_subvolume_delete(trans, inode_u->bi_subvol, ret = bch2_subvolume_delete(trans, inode_u->bi_subvol,
deleting_snapshot); deleting_snapshot);
if (ret) if (ret)
@ -309,6 +312,8 @@ int bch2_unlink_trans(struct btree_trans *trans,
ret = bch2_btree_iter_traverse(&dirent_iter); ret = bch2_btree_iter_traverse(&dirent_iter);
if (ret) if (ret)
goto err; goto err;
} else {
bch2_inode_nlink_dec(inode_u);
} }
if (inode_u->bi_dir == dirent_iter.pos.inode && if (inode_u->bi_dir == dirent_iter.pos.inode &&
@ -319,7 +324,6 @@ int bch2_unlink_trans(struct btree_trans *trans,
dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now; dir_u->bi_mtime = dir_u->bi_ctime = inode_u->bi_ctime = now;
dir_u->bi_nlink -= is_subdir_for_nlink(inode_u); dir_u->bi_nlink -= is_subdir_for_nlink(inode_u);
bch2_inode_nlink_dec(inode_u);
ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc, ret = bch2_hash_delete_at(trans, bch2_dirent_hash_desc,
&dir_hash, &dirent_iter, &dir_hash, &dirent_iter,

View File

@ -103,29 +103,14 @@ static int snapshot_lookup_subvol(struct btree_trans *trans, u32 snapshot,
static int __subvol_lookup(struct btree_trans *trans, u32 subvol, static int __subvol_lookup(struct btree_trans *trans, u32 subvol,
u32 *snapshot, u64 *inum) u32 *snapshot, u64 *inum)
{ {
struct btree_iter iter; struct bch_subvolume s;
struct bkey_s_c k;
int ret; int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes, ret = bch2_subvolume_get(trans, subvol, false, 0, &s);
POS(0, subvol), 0);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
goto err;
if (k.k->type != KEY_TYPE_subvolume) { *snapshot = le32_to_cpu(s.snapshot);
bch_err(trans->c, "subvolume %u not fonud", subvol); *inum = le64_to_cpu(s.inode);
ret = -ENOENT;
goto err;
}
*snapshot = le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot);
*inum = le64_to_cpu(bkey_s_c_to_subvolume(k).v->inode);
err:
bch2_trans_iter_exit(trans, &iter);
return ret; return ret;
} }
static int subvol_lookup(struct btree_trans *trans, u32 subvol, static int subvol_lookup(struct btree_trans *trans, u32 subvol,

View File

@ -51,7 +51,8 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
BTREE_ITER_PREFETCH| BTREE_ITER_PREFETCH|
BTREE_ITER_ALL_SNAPSHOTS); BTREE_ITER_ALL_SNAPSHOTS);
while ((k = bch2_btree_iter_peek(&iter)).k && while ((bch2_trans_begin(&trans),
(k = bch2_btree_iter_peek(&iter)).k) &&
!(ret = bkey_err(k))) { !(ret = bkey_err(k))) {
if (!bch2_bkey_has_device(k, dev_idx)) { if (!bch2_bkey_has_device(k, dev_idx)) {
bch2_btree_iter_advance(&iter); bch2_btree_iter_advance(&iter);
@ -72,8 +73,6 @@ static int __bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags
*/ */
bch2_extent_normalize(c, bkey_i_to_s(sk.k)); bch2_extent_normalize(c, bkey_i_to_s(sk.k));
bch2_btree_iter_set_pos(&iter, bkey_start_pos(&sk.k->k));
ret = bch2_btree_iter_traverse(&iter) ?: ret = bch2_btree_iter_traverse(&iter) ?:
bch2_trans_update(&trans, &iter, sk.k, bch2_trans_update(&trans, &iter, sk.k,
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?: BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
@ -125,12 +124,14 @@ static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
closure_init_stack(&cl); closure_init_stack(&cl);
for (id = 0; id < BTREE_ID_NR; id++) { for (id = 0; id < BTREE_ID_NR; id++) {
for_each_btree_node(&trans, iter, id, POS_MIN, bch2_trans_node_iter_init(&trans, &iter, id, POS_MIN, 0, 0,
BTREE_ITER_PREFETCH, b) { BTREE_ITER_PREFETCH);
retry:
while (bch2_trans_begin(&trans),
(b = bch2_btree_iter_peek_node(&iter))) {
if (!bch2_bkey_has_device(bkey_i_to_s_c(&b->key), if (!bch2_bkey_has_device(bkey_i_to_s_c(&b->key),
dev_idx)) dev_idx))
continue; goto next;
bch2_bkey_buf_copy(&k, c, &b->key); bch2_bkey_buf_copy(&k, c, &b->key);
@ -143,14 +144,16 @@ retry:
ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, false); ret = bch2_btree_node_update_key(&trans, &iter, b, k.k, false);
if (ret == -EINTR) { if (ret == -EINTR) {
b = bch2_btree_iter_peek_node(&iter);
ret = 0; ret = 0;
goto retry; continue;
} }
if (ret) { if (ret) {
bch_err(c, "Error updating btree node key: %i", ret); bch_err(c, "Error updating btree node key: %i", ret);
break; break;
} }
next:
bch2_btree_iter_next_node(&iter);
} }
bch2_trans_iter_exit(&trans, &iter); bch2_trans_iter_exit(&trans, &iter);

View File

@ -883,9 +883,11 @@ static int bch2_move_btree(struct bch_fs *c,
id++) { id++) {
stats->btree_id = id; stats->btree_id = id;
for_each_btree_node(&trans, iter, id, bch2_trans_node_iter_init(&trans, &iter, id, POS_MIN, 0, 0,
id == start_btree_id ? start_pos : POS_MIN, BTREE_ITER_PREFETCH);
BTREE_ITER_PREFETCH, b) {
while (bch2_trans_begin(&trans),
(b = bch2_btree_iter_peek_node(&iter))) {
if (kthread && kthread_should_stop()) if (kthread && kthread_should_stop())
break; break;
@ -911,6 +913,7 @@ static int bch2_move_btree(struct bch_fs *c,
b->data->keys.seq, 0) ?: ret; b->data->keys.seq, 0) ?: ret;
next: next:
bch2_trans_cond_resched(&trans); bch2_trans_cond_resched(&trans);
bch2_btree_iter_next_node(&iter);
} }
bch2_trans_iter_exit(&trans, &iter); bch2_trans_iter_exit(&trans, &iter);
@ -943,16 +946,9 @@ static enum data_cmd rereplicate_pred(struct bch_fs *c, void *arg,
struct data_opts *data_opts) struct data_opts *data_opts)
{ {
unsigned nr_good = bch2_bkey_durability(c, k); unsigned nr_good = bch2_bkey_durability(c, k);
unsigned replicas = 0; unsigned replicas = bkey_is_btree_ptr(k.k)
? c->opts.metadata_replicas
switch (k.k->type) { : io_opts->data_replicas;
case KEY_TYPE_btree_ptr:
replicas = c->opts.metadata_replicas;
break;
case KEY_TYPE_extent:
replicas = io_opts->data_replicas;
break;
}
if (!nr_good || nr_good >= replicas) if (!nr_good || nr_good >= replicas)
return DATA_SKIP; return DATA_SKIP;

View File

@ -89,23 +89,6 @@ int bch2_mark_snapshot(struct bch_fs *c,
return 0; return 0;
} }
static int subvol_lookup(struct btree_trans *trans, unsigned id, struct bch_subvolume *s)
{
struct btree_iter iter;
struct bkey_s_c k;
int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes, POS(0, id), 0);
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -ENOENT;
if (!ret)
*s = *bkey_s_c_to_subvolume(k).v;
bch2_trans_iter_exit(trans, &iter);
return ret;
}
static int snapshot_lookup(struct btree_trans *trans, u32 id, static int snapshot_lookup(struct btree_trans *trans, u32 id,
struct bch_snapshot *s) struct bch_snapshot *s)
{ {
@ -195,7 +178,7 @@ static int bch2_snapshot_check(struct btree_trans *trans,
int ret; int ret;
id = le32_to_cpu(s.v->subvol); id = le32_to_cpu(s.v->subvol);
ret = lockrestart_do(trans, subvol_lookup(trans, id, &subvol)); ret = lockrestart_do(trans, bch2_subvolume_get(trans, id, 0, false, &subvol));
if (ret == -ENOENT) if (ret == -ENOENT)
bch_err(trans->c, "snapshot node %llu has nonexistent subvolume %u", bch_err(trans->c, "snapshot node %llu has nonexistent subvolume %u",
s.k->p.offset, id); s.k->p.offset, id);
@ -798,34 +781,44 @@ void bch2_subvolume_to_text(struct printbuf *out, struct bch_fs *c,
le32_to_cpu(s.v->snapshot)); le32_to_cpu(s.v->snapshot));
} }
int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol, int bch2_subvolume_get(struct btree_trans *trans, unsigned subvol,
u32 *snapid) bool inconsistent_if_not_found,
int iter_flags,
struct bch_subvolume *s)
{ {
struct btree_iter iter; struct btree_iter iter;
struct bkey_s_c k; struct bkey_s_c k;
int ret; int ret;
bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes, bch2_trans_iter_init(trans, &iter, BTREE_ID_subvolumes, POS(0, subvol),
POS(0, subvol), iter_flags);
BTREE_ITER_CACHED|
BTREE_ITER_WITH_UPDATES);
k = bch2_btree_iter_peek_slot(&iter); k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k); ret = bkey_err(k) ?: k.k->type == KEY_TYPE_subvolume ? 0 : -ENOENT;
if (ret)
goto err;
if (k.k->type != KEY_TYPE_subvolume) { if (ret == -ENOENT && inconsistent_if_not_found)
bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol); bch2_fs_inconsistent(trans->c, "missing subvolume %u", subvol);
ret = -EIO; if (!ret)
goto err; *s = *bkey_s_c_to_subvolume(k).v;
}
*snapid = le32_to_cpu(bkey_s_c_to_subvolume(k).v->snapshot);
err:
bch2_trans_iter_exit(trans, &iter); bch2_trans_iter_exit(trans, &iter);
return ret; return ret;
} }
int bch2_subvolume_get_snapshot(struct btree_trans *trans, u32 subvol,
u32 *snapid)
{
struct bch_subvolume s;
int ret;
ret = bch2_subvolume_get(trans, subvol, true,
BTREE_ITER_CACHED|
BTREE_ITER_WITH_UPDATES,
&s);
*snapid = le32_to_cpu(s.snapshot);
return ret;
}
/* XXX: mark snapshot id for deletion, walk btree and delete: */ /* XXX: mark snapshot id for deletion, walk btree and delete: */
int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid, int bch2_subvolume_delete(struct btree_trans *trans, u32 subvolid,
int deleting_snapshot) int deleting_snapshot)

View File

@ -104,6 +104,8 @@ void bch2_subvolume_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c)
.val_to_text = bch2_subvolume_to_text, \ .val_to_text = bch2_subvolume_to_text, \
} }
int bch2_subvolume_get(struct btree_trans *, unsigned,
bool, int, struct bch_subvolume *);
int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *); int bch2_subvolume_get_snapshot(struct btree_trans *, u32, u32 *);
int bch2_subvolume_delete(struct btree_trans *, u32, int); int bch2_subvolume_delete(struct btree_trans *, u32, int);

View File

@ -1452,15 +1452,18 @@ static int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
bch2_trans_init(&trans, c, 0, 0); bch2_trans_init(&trans, c, 0, 0);
for (i = 0; i < ca->mi.nbuckets; i++) { for (i = 0; i < ca->mi.nbuckets; i++) {
ret = bch2_btree_key_cache_flush(&trans, ret = lockrestart_do(&trans,
BTREE_ID_alloc, POS(ca->dev_idx, i)); bch2_btree_key_cache_flush(&trans,
BTREE_ID_alloc, POS(ca->dev_idx, i)));
if (ret) if (ret)
break; break;
} }
bch2_trans_exit(&trans); bch2_trans_exit(&trans);
if (ret) if (ret) {
bch_err(c, "error %i removing dev alloc info", ret);
return ret; return ret;
}
return bch2_btree_delete_range(c, BTREE_ID_alloc, return bch2_btree_delete_range(c, BTREE_ID_alloc,
POS(ca->dev_idx, 0), POS(ca->dev_idx, 0),

View File

@ -887,9 +887,14 @@ void eytzinger0_find_test(void)
*/ */
u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr) u64 *bch2_acc_percpu_u64s(u64 __percpu *p, unsigned nr)
{ {
u64 *ret = this_cpu_ptr(p); u64 *ret;
int cpu; int cpu;
/* access to pcpu vars has to be blocked by other locking */
preempt_disable();
ret = this_cpu_ptr(p);
preempt_enable();
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
u64 *i = per_cpu_ptr(p, cpu); u64 *i = per_cpu_ptr(p, cpu);

View File

@ -4,6 +4,10 @@
#include <linux/string.h> #include <linux/string.h>
#include <asm/unaligned.h> #include <asm/unaligned.h>
#ifdef CONFIG_VALGRIND
#include <valgrind/memcheck.h>
#endif
#include "varint.h" #include "varint.h"
/** /**
@ -95,6 +99,9 @@ int bch2_varint_encode_fast(u8 *out, u64 v)
*/ */
int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out) int bch2_varint_decode_fast(const u8 *in, const u8 *end, u64 *out)
{ {
#ifdef CONFIG_VALGRIND
VALGRIND_MAKE_MEM_DEFINED(in, 8);
#endif
u64 v = get_unaligned_le64(in); u64 v = get_unaligned_le64(in);
unsigned bytes = ffz(*in) + 1; unsigned bytes = ffz(*in) + 1;