Update bcachefs sources to 4a7a003763f5 bcachefs: Opt_durability can now be set via bch2_opt_set_sb()

This commit is contained in:
Kent Overstreet 2024-07-15 16:54:30 -04:00
parent 2d60567685
commit 32fbdff2a9
22 changed files with 153 additions and 125 deletions

View File

@ -1 +1 @@
4d1d53862afb768e4edcf7256f8fe0634c96b40e
4a7a003763f594fcb798c13b4644521083f885b3

View File

@ -227,7 +227,7 @@ struct bch_sb *bch2_format(struct bch_opt_strs fs_opt_strs,
? bch2_opt_get_by_id(&fs_opts, opt_id)
: bch2_opt_get_by_id(&bch2_opts_default, opt_id);
__bch2_opt_set_sb(sb.sb, &bch2_opt_table[opt_id], v);
__bch2_opt_set_sb(sb.sb, -1, &bch2_opt_table[opt_id], v);
}
struct timespec now;

View File

@ -166,7 +166,7 @@ static void write_data(struct bch_fs *c,
closure_call(&op.cl, bch2_write, NULL, NULL);
BUG_ON(!(op.flags & BCH_WRITE_DONE));
BUG_ON(!(op.flags & BCH_WRITE_SUBMITTED));
dst_inode->bi_sectors += len >> 9;
if (op.error)

View File

@ -662,8 +662,14 @@ alloc:
goto alloc;
}
err:
if (!ob)
if (!ob) {
rcu_read_lock();
struct task_struct *t = rcu_dereference(c->copygc_thread);
if (t)
wake_up_process(t);
rcu_read_unlock();
ob = ERR_PTR(-BCH_ERR_no_buckets_found);
}
if (!IS_ERR(ob))
ob->data_type = data_type;

View File

@ -981,7 +981,7 @@ struct bch_fs {
struct bch_fs_rebalance rebalance;
/* COPYGC */
struct task_struct *copygc_thread;
struct task_struct __rcu *copygc_thread;
struct write_point copygc_write_point;
s64 copygc_wait_at;
s64 copygc_wait;

View File

@ -46,8 +46,6 @@ void bch2_btree_node_io_unlock(struct btree *b)
void bch2_btree_node_io_lock(struct btree *b)
{
bch2_assert_btree_nodes_not_locked();
wait_on_bit_lock_io(&b->flags, BTREE_NODE_write_in_flight,
TASK_UNINTERRUPTIBLE);
}
@ -66,16 +64,12 @@ void __bch2_btree_node_wait_on_write(struct btree *b)
void bch2_btree_node_wait_on_read(struct btree *b)
{
bch2_assert_btree_nodes_not_locked();
wait_on_bit_io(&b->flags, BTREE_NODE_read_in_flight,
TASK_UNINTERRUPTIBLE);
}
void bch2_btree_node_wait_on_write(struct btree *b)
{
bch2_assert_btree_nodes_not_locked();
wait_on_bit_io(&b->flags, BTREE_NODE_write_in_flight,
TASK_UNINTERRUPTIBLE);
}

View File

@ -3282,7 +3282,8 @@ bool bch2_current_has_btree_trans(struct bch_fs *c)
struct btree_trans *trans;
bool ret = false;
list_for_each_entry(trans, &c->btree_trans_list, list)
if (trans->locking_wait.task == current) {
if (trans->locking_wait.task == current &&
trans->locked) {
ret = true;
break;
}

View File

@ -13,16 +13,6 @@ void bch2_btree_lock_init(struct btree_bkey_cached_common *b,
lockdep_set_notrack_class(&b->lock);
}
#ifdef CONFIG_LOCKDEP
void bch2_assert_btree_nodes_not_locked(void)
{
#if 0
//Re-enable when lock_class_is_held() is merged:
BUG_ON(lock_class_is_held(&bch2_btree_node_lock_key));
#endif
}
#endif
/* Btree node locking: */
struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,

View File

@ -15,12 +15,6 @@
void bch2_btree_lock_init(struct btree_bkey_cached_common *, enum six_lock_init_flags);
#ifdef CONFIG_LOCKDEP
void bch2_assert_btree_nodes_not_locked(void);
#else
static inline void bch2_assert_btree_nodes_not_locked(void) {}
#endif
void bch2_trans_unlock_noassert(struct btree_trans *);
static inline bool is_btree_node(struct btree_path *path, unsigned l)

View File

@ -13,7 +13,8 @@ int __bch2_darray_resize(darray_char *d, size_t element_size, size_t new_size, g
if (!data)
return -ENOMEM;
memcpy(data, d->data, d->size * element_size);
if (d->size)
memcpy(data, d->data, d->size * element_size);
if (d->data != d->preallocated)
kvfree(d->data);
d->data = data;

View File

@ -1147,34 +1147,27 @@ void __bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
struct btree_iter iter;
struct bkey_buf sk;
struct bkey_s_c k;
u32 snapshot;
int ret;
BUG_ON(flags & BCH_READ_NODECODE);
bch2_bkey_buf_init(&sk);
retry:
bch2_trans_begin(trans);
iter = (struct btree_iter) { NULL };
ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
if (ret)
goto err;
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
SPOS(inum.inum, bvec_iter.bi_sector, snapshot),
POS(inum.inum, bvec_iter.bi_sector),
BTREE_ITER_slots);
while (1) {
unsigned bytes, sectors, offset_into_extent;
enum btree_id data_btree = BTREE_ID_extents;
/*
* read_extent -> io_time_reset may cause a transaction restart
* without returning an error, we need to check for that here:
*/
ret = bch2_trans_relock(trans);
bch2_trans_begin(trans);
u32 snapshot;
ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
if (ret)
break;
goto err;
bch2_btree_iter_set_snapshot(&iter, snapshot);
bch2_btree_iter_set_pos(&iter,
POS(inum.inum, bvec_iter.bi_sector));
@ -1182,7 +1175,7 @@ retry:
k = bch2_btree_iter_peek_slot(&iter);
ret = bkey_err(k);
if (ret)
break;
goto err;
offset_into_extent = iter.pos.offset -
bkey_start_offset(k.k);
@ -1193,7 +1186,7 @@ retry:
ret = bch2_read_indirect_extent(trans, &data_btree,
&offset_into_extent, &sk);
if (ret)
break;
goto err;
k = bkey_i_to_s_c(sk.k);
@ -1213,7 +1206,7 @@ retry:
data_btree, k,
offset_into_extent, failed, flags);
if (ret)
break;
goto err;
if (flags & BCH_READ_LAST_FRAGMENT)
break;
@ -1223,16 +1216,16 @@ retry:
ret = btree_trans_too_many_iters(trans);
if (ret)
goto err;
err:
if (ret &&
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
ret != READ_RETRY &&
ret != READ_RETRY_AVOID)
break;
}
err:
bch2_trans_iter_exit(trans, &iter);
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
ret == READ_RETRY ||
ret == READ_RETRY_AVOID)
goto retry;
bch2_trans_put(trans);
bch2_bkey_buf_exit(&sk, c);

View File

@ -554,7 +554,7 @@ out:
err:
keys->top = keys->keys;
op->error = ret;
op->flags |= BCH_WRITE_DONE;
op->flags |= BCH_WRITE_SUBMITTED;
goto out;
}
@ -589,7 +589,7 @@ static CLOSURE_CALLBACK(bch2_write_index)
struct workqueue_struct *wq = index_update_wq(op);
unsigned long flags;
if ((op->flags & BCH_WRITE_DONE) &&
if ((op->flags & BCH_WRITE_SUBMITTED) &&
(op->flags & BCH_WRITE_MOVE))
bch2_bio_free_pages_pool(op->c, &op->wbio.bio);
@ -634,7 +634,7 @@ void bch2_write_point_do_index_updates(struct work_struct *work)
__bch2_write_index(op);
if (!(op->flags & BCH_WRITE_DONE))
if (!(op->flags & BCH_WRITE_SUBMITTED))
__bch2_write(op);
else
bch2_write_done(&op->cl);
@ -1318,7 +1318,7 @@ retry:
wbio_init(bio)->put_bio = true;
bio->bi_opf = op->wbio.bio.bi_opf;
} else {
op->flags |= BCH_WRITE_DONE;
op->flags |= BCH_WRITE_SUBMITTED;
}
op->pos.offset += bio_sectors(bio);
@ -1332,7 +1332,7 @@ retry:
op->insert_keys.top, true);
bch2_keylist_push(&op->insert_keys);
if (op->flags & BCH_WRITE_DONE)
if (op->flags & BCH_WRITE_SUBMITTED)
break;
bch2_btree_iter_advance(&iter);
}
@ -1347,14 +1347,14 @@ err:
op->pos.inode, op->pos.offset << 9,
"%s: btree lookup error %s", __func__, bch2_err_str(ret));
op->error = ret;
op->flags |= BCH_WRITE_DONE;
op->flags |= BCH_WRITE_SUBMITTED;
}
bch2_trans_put(trans);
darray_exit(&buckets);
/* fallback to cow write path? */
if (!(op->flags & BCH_WRITE_DONE)) {
if (!(op->flags & BCH_WRITE_SUBMITTED)) {
closure_sync(&op->cl);
__bch2_nocow_write_done(op);
op->insert_keys.top = op->insert_keys.keys;
@ -1410,7 +1410,7 @@ static void __bch2_write(struct bch_write_op *op)
if (unlikely(op->opts.nocow && c->opts.nocow_enabled)) {
bch2_nocow_write(op);
if (op->flags & BCH_WRITE_DONE)
if (op->flags & BCH_WRITE_SUBMITTED)
goto out_nofs_restore;
}
again:
@ -1465,7 +1465,7 @@ again:
bch2_alloc_sectors_done_inlined(c, wp);
err:
if (ret <= 0) {
op->flags |= BCH_WRITE_DONE;
op->flags |= BCH_WRITE_SUBMITTED;
if (ret < 0) {
if (!(op->flags & BCH_WRITE_ALLOC_NOWAIT))
@ -1501,7 +1501,7 @@ err:
* once, as that signals backpressure to the caller.
*/
if ((op->flags & BCH_WRITE_SYNC) ||
(!(op->flags & BCH_WRITE_DONE) &&
(!(op->flags & BCH_WRITE_SUBMITTED) &&
!(op->flags & BCH_WRITE_IN_WORKER))) {
if (closure_sync_timeout(&op->cl, HZ * 10)) {
bch2_print_allocator_stuck(c);
@ -1510,7 +1510,7 @@ err:
__bch2_write_index(op);
if (!(op->flags & BCH_WRITE_DONE))
if (!(op->flags & BCH_WRITE_SUBMITTED))
goto again;
bch2_write_done(&op->cl);
} else {
@ -1532,7 +1532,7 @@ static void bch2_write_data_inline(struct bch_write_op *op, unsigned data_len)
memset(&op->failed, 0, sizeof(op->failed));
op->flags |= BCH_WRITE_WROTE_DATA_INLINE;
op->flags |= BCH_WRITE_DONE;
op->flags |= BCH_WRITE_SUBMITTED;
bch2_check_set_feature(op->c, BCH_FEATURE_inline_data);

View File

@ -33,7 +33,7 @@ void bch2_submit_wbio_replicas(struct bch_write_bio *, struct bch_fs *,
x(SYNC) \
x(MOVE) \
x(IN_WORKER) \
x(DONE) \
x(SUBMITTED) \
x(IO_ERROR) \
x(CONVERT_UNWRITTEN)

View File

@ -205,6 +205,17 @@ void bch2_journal_space_available(struct journal *j)
j->can_discard = can_discard;
if (nr_online < metadata_replicas_required(c)) {
struct printbuf buf = PRINTBUF;
prt_printf(&buf, "insufficient writeable journal devices available: have %u, need %u\n"
"rw journal devs:", nr_online, metadata_replicas_required(c));
rcu_read_lock();
for_each_member_device_rcu(c, ca, &c->rw_devs[BCH_DATA_journal])
prt_printf(&buf, " %s", ca->name);
rcu_read_unlock();
bch_err(c, "%s", buf.buf);
printbuf_exit(&buf);
ret = JOURNAL_ERR_insufficient_devices;
goto out;
}

View File

@ -111,11 +111,11 @@ static inline u128_u u128_shl(u128_u i, s8 shift)
{
u128_u r;
r.lo = i.lo << shift;
r.lo = i.lo << (shift & 63);
if (shift < 64)
r.hi = (i.hi << shift) | (i.lo >> (64 - shift));
r.hi = (i.hi << (shift & 63)) | (i.lo >> (-shift & 63));
else {
r.hi = i.lo << (shift - 64);
r.hi = i.lo << (-shift & 63);
r.lo = 0;
}
return r;

View File

@ -357,19 +357,18 @@ static int bch2_copygc_thread(void *arg)
}
last = atomic64_read(&clock->now);
wait = bch2_copygc_wait_amount(c);
wait = max_t(long, 0, bch2_copygc_wait_amount(c) - clock->max_slop);
if (wait > clock->max_slop) {
if (wait > 0) {
c->copygc_wait_at = last;
c->copygc_wait = last + wait;
move_buckets_wait(&ctxt, buckets, true);
trace_and_count(c, copygc_wait, c, wait, last + wait);
bch2_kthread_io_clock_wait(clock, last + wait,
MAX_SCHEDULE_TIMEOUT);
trace_and_count(c, copygc_wait, c, wait, c->copygc_wait);
bch2_io_clock_schedule_timeout(clock, c->copygc_wait);
continue;
}
c->copygc_wait = 0;
c->copygc_wait = c->copygc_wait_at = 0;
c->copygc_running = true;
ret = bch2_copygc(&ctxt, buckets, &did_work);
@ -401,9 +400,10 @@ static int bch2_copygc_thread(void *arg)
void bch2_copygc_stop(struct bch_fs *c)
{
if (c->copygc_thread) {
kthread_stop(c->copygc_thread);
put_task_struct(c->copygc_thread);
struct task_struct *t = rcu_dereference_protected(c->copygc_thread, true);
if (t) {
kthread_stop(t);
put_task_struct(t);
}
c->copygc_thread = NULL;
}
@ -430,8 +430,8 @@ int bch2_copygc_start(struct bch_fs *c)
get_task_struct(t);
c->copygc_thread = t;
wake_up_process(c->copygc_thread);
rcu_assign_pointer(c->copygc_thread, t);
wake_up_process(t);
return 0;
}

View File

@ -230,6 +230,8 @@ const struct bch_option bch2_opt_table[] = {
#define OPT_STR_NOLIMIT(_choices) .type = BCH_OPT_STR, \
.min = 0, .max = U64_MAX, \
.choices = _choices
#define OPT_BITFIELD(_choices) .type = BCH_OPT_BITFIELD, \
.choices = _choices
#define OPT_FN(_fn) .type = BCH_OPT_FN, .fn = _fn
#define x(_name, _bits, _flags, _type, _sb_opt, _default, _hint, _help) \
@ -376,6 +378,13 @@ int bch2_opt_parse(struct bch_fs *c,
*res = ret;
break;
case BCH_OPT_BITFIELD: {
s64 v = bch2_read_flag_list(val, opt->choices);
if (v < 0)
return v;
*res = v;
break;
}
case BCH_OPT_FN:
ret = opt->fn.parse(c, val, res, err);
@ -608,10 +617,20 @@ int bch2_opts_from_sb(struct bch_opts *opts, struct bch_sb *sb)
return 0;
}
void __bch2_opt_set_sb(struct bch_sb *sb, const struct bch_option *opt, u64 v)
struct bch_dev_sb_opt_set {
void (*set_sb)(struct bch_member *, u64);
};
static const struct bch_dev_sb_opt_set bch2_dev_sb_opt_setters [] = {
#define x(n, set) [Opt_##n] = { .set_sb = SET_##set },
BCH_DEV_OPT_SETTERS()
#undef x
};
void __bch2_opt_set_sb(struct bch_sb *sb, int dev_idx,
const struct bch_option *opt, u64 v)
{
if (opt->set_sb == SET_BCH2_NO_SB_OPT)
return;
enum bch_opt_id id = opt - bch2_opt_table;
if (opt->flags & OPT_SB_FIELD_SECTORS)
v >>= 9;
@ -619,16 +638,35 @@ void __bch2_opt_set_sb(struct bch_sb *sb, const struct bch_option *opt, u64 v)
if (opt->flags & OPT_SB_FIELD_ILOG2)
v = ilog2(v);
opt->set_sb(sb, v);
if (opt->flags & OPT_SB_FIELD_ONE_BIAS)
v++;
if (opt->flags & OPT_FS) {
if (opt->set_sb != SET_BCH2_NO_SB_OPT)
opt->set_sb(sb, v);
}
if ((opt->flags & OPT_DEVICE) && dev_idx >= 0) {
if (WARN(!bch2_member_exists(sb, dev_idx),
"tried to set device option %s on nonexistent device %i",
opt->attr.name, dev_idx))
return;
struct bch_member *m = bch2_members_v2_get_mut(sb, dev_idx);
const struct bch_dev_sb_opt_set *set = bch2_dev_sb_opt_setters + id;
if (set->set_sb)
set->set_sb(m, v);
else
pr_err("option %s cannot be set via opt_set_sb()", opt->attr.name);
}
}
void bch2_opt_set_sb(struct bch_fs *c, const struct bch_option *opt, u64 v)
void bch2_opt_set_sb(struct bch_fs *c, struct bch_dev *ca,
const struct bch_option *opt, u64 v)
{
if (opt->set_sb == SET_BCH2_NO_SB_OPT)
return;
mutex_lock(&c->sb_lock);
__bch2_opt_set_sb(c->disk_sb.sb, opt, v);
__bch2_opt_set_sb(c->disk_sb.sb, ca ? ca->dev_idx : -1, opt, v);
bch2_write_super(c);
mutex_unlock(&c->sb_lock);
}

View File

@ -53,23 +53,25 @@ void SET_BCH2_NO_SB_OPT(struct bch_sb *, u64);
/* When can be set: */
enum opt_flags {
OPT_FS = (1 << 0), /* Filesystem option */
OPT_DEVICE = (1 << 1), /* Device option */
OPT_INODE = (1 << 2), /* Inode option */
OPT_FORMAT = (1 << 3), /* May be specified at format time */
OPT_MOUNT = (1 << 4), /* May be specified at mount time */
OPT_RUNTIME = (1 << 5), /* May be specified at runtime */
OPT_HUMAN_READABLE = (1 << 6),
OPT_MUST_BE_POW_2 = (1 << 7), /* Must be power of 2 */
OPT_SB_FIELD_SECTORS = (1 << 8),/* Superblock field is >> 9 of actual value */
OPT_SB_FIELD_ILOG2 = (1 << 9), /* Superblock field is ilog2 of actual value */
OPT_HIDDEN = (1 << 10),
OPT_FS = BIT(0), /* Filesystem option */
OPT_DEVICE = BIT(1), /* Device option */
OPT_INODE = BIT(2), /* Inode option */
OPT_FORMAT = BIT(3), /* May be specified at format time */
OPT_MOUNT = BIT(4), /* May be specified at mount time */
OPT_RUNTIME = BIT(5), /* May be specified at runtime */
OPT_HUMAN_READABLE = BIT(6),
OPT_MUST_BE_POW_2 = BIT(7), /* Must be power of 2 */
OPT_SB_FIELD_SECTORS = BIT(8), /* Superblock field is >> 9 of actual value */
OPT_SB_FIELD_ILOG2 = BIT(9), /* Superblock field is ilog2 of actual value */
OPT_SB_FIELD_ONE_BIAS = BIT(10), /* 0 means default value */
OPT_HIDDEN = BIT(11),
};
enum opt_type {
BCH_OPT_BOOL,
BCH_OPT_UINT,
BCH_OPT_STR,
BCH_OPT_BITFIELD,
BCH_OPT_FN,
};
@ -467,11 +469,16 @@ enum fsck_err_opts {
BCH2_NO_SB_OPT, 0, \
"size", "Size of filesystem on device") \
x(durability, u8, \
OPT_DEVICE, \
OPT_DEVICE|OPT_SB_FIELD_ONE_BIAS, \
OPT_UINT(0, BCH_REPLICAS_MAX), \
BCH2_NO_SB_OPT, 1, \
"n", "Data written to this device will be considered\n"\
"to have already been replicated n times") \
x(data_allowed, u8, \
OPT_DEVICE, \
OPT_BITFIELD(__bch2_data_types), \
BCH2_NO_SB_OPT, BIT(BCH_DATA_journal)|BIT(BCH_DATA_btree)|BIT(BCH_DATA_user),\
"types", "Allowed data types for this device: journal, btree, and/or user")\
x(btree_node_prefetch, u8, \
OPT_FS|OPT_MOUNT|OPT_RUNTIME, \
OPT_BOOL(), \
@ -479,6 +486,11 @@ enum fsck_err_opts {
NULL, "BTREE_ITER_prefetch casuse btree nodes to be\n"\
" prefetched sequentially")
#define BCH_DEV_OPT_SETTERS() \
x(discard, BCH_MEMBER_DISCARD) \
x(durability, BCH_MEMBER_DURABILITY) \
x(data_allowed, BCH_MEMBER_DATA_ALLOWED)
struct bch_opts {
#define x(_name, _bits, ...) unsigned _name##_defined:1;
BCH_OPTS()
@ -558,8 +570,10 @@ void bch2_opt_set_by_id(struct bch_opts *, enum bch_opt_id, u64);
u64 bch2_opt_from_sb(struct bch_sb *, enum bch_opt_id);
int bch2_opts_from_sb(struct bch_opts *, struct bch_sb *);
void __bch2_opt_set_sb(struct bch_sb *, const struct bch_option *, u64);
void bch2_opt_set_sb(struct bch_fs *, const struct bch_option *, u64);
void __bch2_opt_set_sb(struct bch_sb *, int, const struct bch_option *, u64);
struct bch_dev;
void bch2_opt_set_sb(struct bch_fs *, struct bch_dev *, const struct bch_option *, u64);
int bch2_opt_lookup(const char *);
int bch2_opt_validate(const struct bch_option *, u64, struct printbuf *);

View File

@ -674,7 +674,7 @@ STORE(bch2_fs_opts_dir)
if (ret < 0)
goto err;
bch2_opt_set_sb(c, opt, v);
bch2_opt_set_sb(c, NULL, opt, v);
bch2_opt_set_by_id(&c->opts, id, v);
if (v &&
@ -823,27 +823,13 @@ STORE(bch2_dev)
if (attr == &sysfs_discard) {
bool v = strtoul_or_return(buf);
mutex_lock(&c->sb_lock);
mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
if (v != BCH_MEMBER_DISCARD(mi)) {
SET_BCH_MEMBER_DISCARD(mi, v);
bch2_write_super(c);
}
mutex_unlock(&c->sb_lock);
bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_discard, v);
}
if (attr == &sysfs_durability) {
u64 v = strtoul_or_return(buf);
mutex_lock(&c->sb_lock);
mi = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
if (v + 1 != BCH_MEMBER_DURABILITY(mi)) {
SET_BCH_MEMBER_DURABILITY(mi, v + 1);
bch2_write_super(c);
}
mutex_unlock(&c->sb_lock);
bch2_opt_set_sb(c, ca, bch2_opt_table + Opt_durability, v);
}
if (attr == &sysfs_label) {

View File

@ -204,7 +204,7 @@ STRTO_H(strtoll, long long)
STRTO_H(strtoull, unsigned long long)
STRTO_H(strtou64, u64)
u64 bch2_read_flag_list(char *opt, const char * const list[])
u64 bch2_read_flag_list(const char *opt, const char * const list[])
{
u64 ret = 0;
char *p, *s, *d = kstrdup(opt, GFP_KERNEL);

View File

@ -309,7 +309,7 @@ static inline int bch2_strtoul_h(const char *cp, long *res)
bool bch2_is_zero(const void *, size_t);
u64 bch2_read_flag_list(char *, const char * const[]);
u64 bch2_read_flag_list(const char *, const char * const[]);
void bch2_prt_u64_base2_nbits(struct printbuf *, u64, unsigned);
void bch2_prt_u64_base2(struct printbuf *, u64);

View File

@ -85,7 +85,7 @@ int bch2_varint_encode_fast(u8 *out, u64 v)
if (likely(bytes < 9)) {
v <<= bytes;
v |= ~(~0 << (bytes - 1));
v |= ~(~0U << (bytes - 1));
} else {
*out++ = 255;
bytes = 9;