Update bcachefs sources to ba398d2906 bcachefs: Fix reflink repair code

This commit is contained in:
Kent Overstreet 2022-02-13 04:07:30 -05:00
parent 9c79275419
commit a1d66a2a4e
13 changed files with 857 additions and 124 deletions

View File

@ -1 +1 @@
b84661c042c7d5caaab3f79661d04789070bea78 ba398d29060ecc2e2c9d6292a94ddc181761de1a

145
include/linux/siphash.h Normal file
View File

@ -0,0 +1,145 @@
/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* This file is provided under a dual BSD/GPLv2 license.
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
*
* This implementation is specifically for SipHash2-4 for a secure PRF
* and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
* hashtables.
*/
#ifndef _LINUX_SIPHASH_H
#define _LINUX_SIPHASH_H
#include <linux/types.h>
#include <linux/kernel.h>
#define SIPHASH_ALIGNMENT __alignof__(u64)
typedef struct {
u64 key[2];
} siphash_key_t;
static inline bool siphash_key_is_zero(const siphash_key_t *key)
{
return !(key->key[0] | key->key[1]);
}
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key);
#endif
u64 siphash_1u64(const u64 a, const siphash_key_t *key);
u64 siphash_2u64(const u64 a, const u64 b, const siphash_key_t *key);
u64 siphash_3u64(const u64 a, const u64 b, const u64 c,
const siphash_key_t *key);
u64 siphash_4u64(const u64 a, const u64 b, const u64 c, const u64 d,
const siphash_key_t *key);
u64 siphash_1u32(const u32 a, const siphash_key_t *key);
u64 siphash_3u32(const u32 a, const u32 b, const u32 c,
const siphash_key_t *key);
static inline u64 siphash_2u32(const u32 a, const u32 b,
const siphash_key_t *key)
{
return siphash_1u64((u64)b << 32 | a, key);
}
static inline u64 siphash_4u32(const u32 a, const u32 b, const u32 c,
const u32 d, const siphash_key_t *key)
{
return siphash_2u64((u64)b << 32 | a, (u64)d << 32 | c, key);
}
static inline u64 ___siphash_aligned(const __le64 *data, size_t len,
const siphash_key_t *key)
{
if (__builtin_constant_p(len) && len == 4)
return siphash_1u32(le32_to_cpup((const __le32 *)data), key);
if (__builtin_constant_p(len) && len == 8)
return siphash_1u64(le64_to_cpu(data[0]), key);
if (__builtin_constant_p(len) && len == 16)
return siphash_2u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
key);
if (__builtin_constant_p(len) && len == 24)
return siphash_3u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
le64_to_cpu(data[2]), key);
if (__builtin_constant_p(len) && len == 32)
return siphash_4u64(le64_to_cpu(data[0]), le64_to_cpu(data[1]),
le64_to_cpu(data[2]), le64_to_cpu(data[3]),
key);
return __siphash_aligned(data, len, key);
}
/**
* siphash - compute 64-bit siphash PRF value
* @data: buffer to hash
* @size: size of @data
* @key: the siphash key
*/
static inline u64 siphash(const void *data, size_t len,
const siphash_key_t *key)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (!IS_ALIGNED((unsigned long)data, SIPHASH_ALIGNMENT))
return __siphash_unaligned(data, len, key);
#endif
return ___siphash_aligned(data, len, key);
}
#define HSIPHASH_ALIGNMENT __alignof__(unsigned long)
typedef struct {
unsigned long key[2];
} hsiphash_key_t;
u32 __hsiphash_aligned(const void *data, size_t len,
const hsiphash_key_t *key);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key);
#endif
u32 hsiphash_1u32(const u32 a, const hsiphash_key_t *key);
u32 hsiphash_2u32(const u32 a, const u32 b, const hsiphash_key_t *key);
u32 hsiphash_3u32(const u32 a, const u32 b, const u32 c,
const hsiphash_key_t *key);
u32 hsiphash_4u32(const u32 a, const u32 b, const u32 c, const u32 d,
const hsiphash_key_t *key);
static inline u32 ___hsiphash_aligned(const __le32 *data, size_t len,
const hsiphash_key_t *key)
{
if (__builtin_constant_p(len) && len == 4)
return hsiphash_1u32(le32_to_cpu(data[0]), key);
if (__builtin_constant_p(len) && len == 8)
return hsiphash_2u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
key);
if (__builtin_constant_p(len) && len == 12)
return hsiphash_3u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
le32_to_cpu(data[2]), key);
if (__builtin_constant_p(len) && len == 16)
return hsiphash_4u32(le32_to_cpu(data[0]), le32_to_cpu(data[1]),
le32_to_cpu(data[2]), le32_to_cpu(data[3]),
key);
return __hsiphash_aligned(data, len, key);
}
/**
* hsiphash - compute 32-bit hsiphash PRF value
* @data: buffer to hash
* @size: size of @data
* @key: the hsiphash key
*/
static inline u32 hsiphash(const void *data, size_t len,
const hsiphash_key_t *key)
{
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
if (!IS_ALIGNED((unsigned long)data, HSIPHASH_ALIGNMENT))
return __hsiphash_unaligned(data, len, key);
#endif
return ___hsiphash_aligned(data, len, key);
}
#endif /* _LINUX_SIPHASH_H */

View File

@ -540,7 +540,8 @@ static void find_reclaimable_buckets_lru(struct bch_fs *c, struct bch_dev *ca)
continue; continue;
if (!m.data_type && if (!m.data_type &&
bch2_bucket_needs_journal_commit(c, last_seq_ondisk, bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
last_seq_ondisk,
ca->dev_idx, b)) { ca->dev_idx, b)) {
ca->buckets_waiting_on_journal++; ca->buckets_waiting_on_journal++;
continue; continue;

View File

@ -1938,30 +1938,17 @@ int bch2_gc_gens(struct bch_fs *c)
} }
} }
for_each_member_device(ca, c, i) { for_each_btree_key(&trans, iter, BTREE_ID_alloc, POS_MIN,
for_each_btree_key(&trans, iter, BTREE_ID_alloc, BTREE_ITER_PREFETCH, k, ret) {
POS(ca->dev_idx, ca->mi.first_bucket), ret = __bch2_trans_do(&trans, NULL, NULL,
BTREE_ITER_SLOTS| BTREE_INSERT_NOFAIL,
BTREE_ITER_PREFETCH, k, ret) { bch2_alloc_write_oldest_gen(&trans, &iter));
if (bkey_cmp(iter.pos, POS(ca->dev_idx, ca->mi.nbuckets)) >= 0)
break;
ret = __bch2_trans_do(&trans, NULL, NULL,
BTREE_INSERT_LAZY_RW|
BTREE_INSERT_NOFAIL,
bch2_alloc_write_oldest_gen(&trans, &iter));
if (ret) {
bch_err(c, "error writing oldest_gen: %i", ret);
break;
}
}
bch2_trans_iter_exit(&trans, &iter);
if (ret) { if (ret) {
percpu_ref_put(&ca->ref); bch_err(c, "error writing oldest_gen: %i", ret);
break; break;
} }
} }
bch2_trans_iter_exit(&trans, &iter);
c->gc_gens_btree = 0; c->gc_gens_btree = 0;
c->gc_gens_pos = POS_MIN; c->gc_gens_pos = POS_MIN;

View File

@ -2317,6 +2317,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
ret = bkey_err(k2); ret = bkey_err(k2);
if (ret) { if (ret) {
k = k2; k = k2;
bch2_btree_iter_set_pos(iter, iter->pos);
goto out; goto out;
} }

View File

@ -399,10 +399,11 @@ static inline void do_btree_insert_one(struct btree_trans *trans,
} }
} }
static noinline void bch2_trans_mark_gc(struct btree_trans *trans) static noinline int bch2_trans_mark_gc(struct btree_trans *trans)
{ {
struct bch_fs *c = trans->c; struct bch_fs *c = trans->c;
struct btree_insert_entry *i; struct btree_insert_entry *i;
int ret = 0;
trans_for_each_update(trans, i) { trans_for_each_update(trans, i) {
/* /*
@ -411,10 +412,15 @@ static noinline void bch2_trans_mark_gc(struct btree_trans *trans)
*/ */
BUG_ON(i->cached || i->level); BUG_ON(i->cached || i->level);
if (gc_visited(c, gc_pos_btree_node(insert_l(i)->b))) if (gc_visited(c, gc_pos_btree_node(insert_l(i)->b))) {
bch2_mark_update(trans, i->path, i->k, ret = bch2_mark_update(trans, i->path, i->k,
i->flags|BTREE_TRIGGER_GC); i->flags|BTREE_TRIGGER_GC);
if (ret)
break;
}
} }
return ret;
} }
static inline int static inline int
@ -513,11 +519,17 @@ bch2_trans_commit_write_locked(struct btree_trans *trans,
return BTREE_INSERT_NEED_MARK_REPLICAS; return BTREE_INSERT_NEED_MARK_REPLICAS;
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) if (BTREE_NODE_TYPE_HAS_MEM_TRIGGERS & (1U << i->bkey_type)) {
bch2_mark_update(trans, i->path, i->k, i->flags); ret = bch2_mark_update(trans, i->path, i->k, i->flags);
if (ret)
return ret;
}
if (unlikely(c->gc_pos.phase)) if (unlikely(c->gc_pos.phase)) {
bch2_trans_mark_gc(trans); ret = bch2_trans_mark_gc(trans);
if (ret)
return ret;
}
trans_for_each_update(trans, i) trans_for_each_update(trans, i)
do_btree_insert_one(trans, i); do_btree_insert_one(trans, i);

View File

@ -537,11 +537,15 @@ static int bch2_mark_alloc(struct btree_trans *trans,
} }
if (old_u.data_type && !new_u.data_type && new_u.journal_seq) { if (old_u.data_type && !new_u.data_type && new_u.journal_seq) {
ret = bch2_set_bucket_needs_journal_commit(c, ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
c->journal.flushed_seq_ondisk,
new_u.dev, new_u.bucket, new_u.dev, new_u.bucket,
new_u.journal_seq); new_u.journal_seq);
if (ret) if (ret) {
bch2_fs_fatal_error(c,
"error setting bucket_needs_journal_commit: %i", ret);
return ret; return ret;
}
} }
ca = bch_dev_bkey_exists(c, new_u.dev); ca = bch_dev_bkey_exists(c, new_u.dev);

View File

@ -2,36 +2,46 @@
#include "bcachefs.h" #include "bcachefs.h"
#include "buckets_waiting_for_journal.h" #include "buckets_waiting_for_journal.h"
#include <linux/jhash.h> #include <linux/random.h>
static u32 hash_seeds[] = { static inline struct bucket_hashed *
2168153708, bucket_hash(struct buckets_waiting_for_journal_table *t,
1262039142, unsigned hash_seed_idx, u64 dev_bucket)
1183479835,
};
static inline unsigned bucket_hash(u64 dev_bucket, unsigned hash_seed_idx)
{ {
return jhash_2words(dev_bucket << 32, dev_bucket, hash_seeds[hash_seed_idx]); unsigned h = siphash_1u64(dev_bucket, &t->hash_seeds[hash_seed_idx]);
BUG_ON(!is_power_of_2(t->size));
return t->d + (h & (t->size - 1));
} }
bool bch2_bucket_needs_journal_commit(struct bch_fs *c, static void bucket_table_init(struct buckets_waiting_for_journal_table *t, size_t size)
{
unsigned i;
t->size = size;
for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++)
get_random_bytes(&t->hash_seeds[i], sizeof(t->hash_seeds[i]));
memset(t->d, 0, sizeof(t->d[0]) * size);
}
bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
u64 flushed_seq, u64 flushed_seq,
unsigned dev, u64 bucket) unsigned dev, u64 bucket)
{ {
struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal; struct buckets_waiting_for_journal_table *t;
u64 dev_bucket = (u64) dev << 56 | bucket; u64 dev_bucket = (u64) dev << 56 | bucket;
bool ret = false; bool ret = false;
unsigned i; unsigned i;
mutex_lock(&b->lock); mutex_lock(&b->lock);
BUG_ON(!is_power_of_2(b->nr)); t = b->t;
for (i = 0; i < ARRAY_SIZE(hash_seeds); i++) { for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
u32 h = bucket_hash(dev_bucket, i) & (b->nr - 1); struct bucket_hashed *h = bucket_hash(t, i, dev_bucket);
if (b->d[h].dev_bucket == dev_bucket) { if (h->dev_bucket == dev_bucket) {
ret = b->d[h].journal_seq > flushed_seq; ret = h->journal_seq > flushed_seq;
break; break;
} }
} }
@ -41,66 +51,23 @@ bool bch2_bucket_needs_journal_commit(struct bch_fs *c,
return ret; return ret;
} }
static int bch2_buckets_waiting_for_journal_rehash(struct bch_fs *c) static bool bucket_table_insert(struct buckets_waiting_for_journal_table *t,
struct bucket_hashed *new,
u64 flushed_seq)
{ {
struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal; struct bucket_hashed *last_evicted = NULL;
u64 flushed_seq = c->journal.flushed_seq_ondisk;
unsigned i, j, h, new_nr = b->nr * 2, elements = 0;
struct bucket_hashed *new_table;
new_table = kvmalloc_array(new_nr, sizeof(*new_table), __GFP_ZERO);
if (!new_table)
return -ENOMEM;
for (i = 0; i < b->nr; i++) {
if (b->d[i].journal_seq < flushed_seq)
continue;
for (j = 0; j < ARRAY_SIZE(hash_seeds); j++) {
h = bucket_hash(b->d[i].dev_bucket, j);
if ((h & (b->nr - 1)) == i)
break;
}
BUG_ON(j == ARRAY_SIZE(hash_seeds));
BUG_ON(new_table[h & (new_nr - 1)].dev_bucket);
new_table[h & (new_nr - 1)] = b->d[i];
elements++;
}
kvfree(b->d);
b->nr = new_nr;
b->d = new_table;
return 0;
}
int bch2_set_bucket_needs_journal_commit(struct bch_fs *c, unsigned dev, u64 bucket,
u64 journal_seq)
{
struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
struct bucket_hashed new = {
.dev_bucket = (u64) dev << 56 | bucket,
.journal_seq = journal_seq,
}, *last_evicted = NULL;
u64 flushed_seq = c->journal.flushed_seq_ondisk;
unsigned tries, i; unsigned tries, i;
int ret = 0;
mutex_lock(&b->lock); for (tries = 0; tries < 10; tries++) {
BUG_ON(!is_power_of_2(b->nr));
retry:
for (tries = 0; tries < 5; tries++) {
struct bucket_hashed *old, *victim = NULL; struct bucket_hashed *old, *victim = NULL;
for (i = 0; i < ARRAY_SIZE(hash_seeds); i++) { for (i = 0; i < ARRAY_SIZE(t->hash_seeds); i++) {
old = b->d + (bucket_hash(new.dev_bucket, i) & (b->nr - 1)); old = bucket_hash(t, i, new->dev_bucket);
if (old->dev_bucket == new.dev_bucket || if (old->dev_bucket == new->dev_bucket ||
old->journal_seq <= flushed_seq) { old->journal_seq <= flushed_seq) {
*old = new; *old = *new;
goto out; return true;
} }
if (last_evicted != old) if (last_evicted != old)
@ -112,13 +79,64 @@ retry:
break; break;
/* Failed to find an empty slot: */ /* Failed to find an empty slot: */
swap(new, *victim); swap(*new, *victim);
last_evicted = victim; last_evicted = victim;
} }
ret = bch2_buckets_waiting_for_journal_rehash(c); return false;
if (!ret) }
goto retry;
int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *b,
u64 flushed_seq,
unsigned dev, u64 bucket,
u64 journal_seq)
{
struct buckets_waiting_for_journal_table *t, *n;
struct bucket_hashed tmp, new = {
.dev_bucket = (u64) dev << 56 | bucket,
.journal_seq = journal_seq,
};
size_t i, new_size, nr_elements = 1, nr_rehashes = 0;
int ret = 0;
mutex_lock(&b->lock);
if (likely(bucket_table_insert(b->t, &new, flushed_seq)))
goto out;
t = b->t;
for (i = 0; i < t->size; i++)
nr_elements += t->d[i].journal_seq > flushed_seq;
new_size = nr_elements < t->size / 3 ? t->size : t->size * 2;
n = kvmalloc(sizeof(*n) + sizeof(n->d[0]) * new_size, GFP_KERNEL);
if (!n) {
ret = -ENOMEM;
goto out;
}
retry_rehash:
nr_rehashes++;
bucket_table_init(n, new_size);
tmp = new;
BUG_ON(!bucket_table_insert(n, &tmp, flushed_seq));
for (i = 0; i < t->size; i++) {
if (t->d[i].journal_seq <= flushed_seq)
continue;
tmp = t->d[i];
if (!bucket_table_insert(n, &tmp, flushed_seq))
goto retry_rehash;
}
b->t = n;
kvfree(t);
pr_debug("took %zu rehashes, table at %zu/%zu elements",
nr_rehashes, nr_elements, b->t->size);
out: out:
mutex_unlock(&b->lock); mutex_unlock(&b->lock);
@ -129,19 +147,21 @@ void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *c)
{ {
struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal; struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
kvfree(b->d); kvfree(b->t);
} }
#define INITIAL_TABLE_SIZE 8
int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *c) int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *c)
{ {
struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal; struct buckets_waiting_for_journal *b = &c->buckets_waiting_for_journal;
mutex_init(&b->lock); mutex_init(&b->lock);
b->nr = 8; b->t = kvmalloc(sizeof(*b->t) + sizeof(b->t->d[0]) * INITIAL_TABLE_SIZE, GFP_KERNEL);
b->d = kvmalloc_array(b->nr, sizeof(*b->d), __GFP_ZERO); if (!b->t)
if (!b->d)
return -ENOMEM; return -ENOMEM;
bucket_table_init(b->t, INITIAL_TABLE_SIZE);
return 0; return 0;
} }

View File

@ -4,8 +4,10 @@
#include "buckets_waiting_for_journal_types.h" #include "buckets_waiting_for_journal_types.h"
bool bch2_bucket_needs_journal_commit(struct bch_fs *, u64, unsigned, u64); bool bch2_bucket_needs_journal_commit(struct buckets_waiting_for_journal *,
int bch2_set_bucket_needs_journal_commit(struct bch_fs *, unsigned, u64, u64); u64, unsigned, u64);
int bch2_set_bucket_needs_journal_commit(struct buckets_waiting_for_journal *,
u64, unsigned, u64, u64);
void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *); void bch2_fs_buckets_waiting_for_journal_exit(struct bch_fs *);
int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *); int bch2_fs_buckets_waiting_for_journal_init(struct bch_fs *);

View File

@ -2,15 +2,22 @@
#ifndef _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H #ifndef _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H
#define _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H #define _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H
#include <linux/siphash.h>
struct bucket_hashed { struct bucket_hashed {
u64 dev_bucket; u64 dev_bucket;
u64 journal_seq; u64 journal_seq;
}; };
struct buckets_waiting_for_journal_table {
size_t size;
siphash_key_t hash_seeds[3];
struct bucket_hashed d[];
};
struct buckets_waiting_for_journal { struct buckets_waiting_for_journal {
struct mutex lock; struct mutex lock;
size_t nr; struct buckets_waiting_for_journal_table *t;
struct bucket_hashed *d;
}; };
#endif /* _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H */ #endif /* _BUCKETS_WAITING_FOR_JOURNAL_TYPES_H */

View File

@ -1893,9 +1893,8 @@ static void bch2_read_endio(struct bio *bio)
return; return;
} }
if (rbio->pick.ptr.cached && if (((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) ||
(((rbio->flags & BCH_READ_RETRY_IF_STALE) && race_fault()) || ptr_stale(ca, &rbio->pick.ptr)) {
ptr_stale(ca, &rbio->pick.ptr))) {
atomic_long_inc(&c->read_realloc_races); atomic_long_inc(&c->read_realloc_races);
if (rbio->flags & BCH_READ_RETRY_IF_STALE) if (rbio->flags & BCH_READ_RETRY_IF_STALE)

View File

@ -499,6 +499,17 @@ STORE(bch2_fs)
/* Debugging: */ /* Debugging: */
if (!test_bit(BCH_FS_RW, &c->flags))
return -EROFS;
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
}
if (attr == &sysfs_trigger_gc) { if (attr == &sysfs_trigger_gc) {
/* /*
* Full gc is currently incompatible with btree key cache: * Full gc is currently incompatible with btree key cache:
@ -512,14 +523,6 @@ STORE(bch2_fs)
#endif #endif
} }
if (attr == &sysfs_prune_cache) {
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
c->btree_cache.shrink.scan_objects(&c->btree_cache.shrink, &sc);
}
#ifdef CONFIG_BCACHEFS_TESTS #ifdef CONFIG_BCACHEFS_TESTS
if (attr == &sysfs_perf_test) { if (attr == &sysfs_perf_test) {
char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp; char *tmp = kstrdup(buf, GFP_KERNEL), *p = tmp;

552
linux/siphash.c Normal file
View File

@ -0,0 +1,552 @@
/* Copyright (C) 2016 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
*
* This file is provided under a dual BSD/GPLv2 license.
*
* SipHash: a fast short-input PRF
* https://131002.net/siphash/
*
* This implementation is specifically for SipHash2-4 for a secure PRF
* and HalfSipHash1-3/SipHash1-3 for an insecure PRF only suitable for
* hashtables.
*/
#include <linux/siphash.h>
#include <linux/bitops.h>
#include <asm/unaligned.h>
#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
#include <linux/dcache.h>
#include <asm/word-at-a-time.h>
#endif
#define SIPROUND \
do { \
v0 += v1; v1 = rol64(v1, 13); v1 ^= v0; v0 = rol64(v0, 32); \
v2 += v3; v3 = rol64(v3, 16); v3 ^= v2; \
v0 += v3; v3 = rol64(v3, 21); v3 ^= v0; \
v2 += v1; v1 = rol64(v1, 17); v1 ^= v2; v2 = rol64(v2, 32); \
} while (0)
#define PREAMBLE(len) \
u64 v0 = 0x736f6d6570736575ULL; \
u64 v1 = 0x646f72616e646f6dULL; \
u64 v2 = 0x6c7967656e657261ULL; \
u64 v3 = 0x7465646279746573ULL; \
u64 b = ((u64)(len)) << 56; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \
v1 ^= key->key[1]; \
v0 ^= key->key[0];
#define POSTAMBLE \
v3 ^= b; \
SIPROUND; \
SIPROUND; \
v0 ^= b; \
v2 ^= 0xff; \
SIPROUND; \
SIPROUND; \
SIPROUND; \
SIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
u64 __siphash_aligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
const u8 left = len & (sizeof(u64) - 1);
u64 m;
PREAMBLE(len)
for (; data != end; data += sizeof(u64)) {
m = le64_to_cpup(data);
v3 ^= m;
SIPROUND;
SIPROUND;
v0 ^= m;
}
#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
if (left)
b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
bytemask_from_count(left)));
#else
switch (left) {
case 7: b |= ((u64)end[6]) << 48; fallthrough;
case 6: b |= ((u64)end[5]) << 40; fallthrough;
case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= le32_to_cpup(data); break;
case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
#endif
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_aligned);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u64 __siphash_unaligned(const void *data, size_t len, const siphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
const u8 left = len & (sizeof(u64) - 1);
u64 m;
PREAMBLE(len)
for (; data != end; data += sizeof(u64)) {
m = get_unaligned_le64(data);
v3 ^= m;
SIPROUND;
SIPROUND;
v0 ^= m;
}
#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
if (left)
b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
bytemask_from_count(left)));
#else
switch (left) {
case 7: b |= ((u64)end[6]) << 48; fallthrough;
case 6: b |= ((u64)end[5]) << 40; fallthrough;
case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= get_unaligned_le32(end); break;
case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
#endif
POSTAMBLE
}
EXPORT_SYMBOL(__siphash_unaligned);
#endif
/**
* siphash_1u64 - compute 64-bit siphash PRF value of a u64
* @first: first u64
* @key: the siphash key
*/
u64 siphash_1u64(const u64 first, const siphash_key_t *key)
{
PREAMBLE(8)
v3 ^= first;
SIPROUND;
SIPROUND;
v0 ^= first;
POSTAMBLE
}
EXPORT_SYMBOL(siphash_1u64);
/**
* siphash_2u64 - compute 64-bit siphash PRF value of 2 u64
* @first: first u64
* @second: second u64
* @key: the siphash key
*/
u64 siphash_2u64(const u64 first, const u64 second, const siphash_key_t *key)
{
PREAMBLE(16)
v3 ^= first;
SIPROUND;
SIPROUND;
v0 ^= first;
v3 ^= second;
SIPROUND;
SIPROUND;
v0 ^= second;
POSTAMBLE
}
EXPORT_SYMBOL(siphash_2u64);
/**
* siphash_3u64 - compute 64-bit siphash PRF value of 3 u64
* @first: first u64
* @second: second u64
* @third: third u64
* @key: the siphash key
*/
u64 siphash_3u64(const u64 first, const u64 second, const u64 third,
const siphash_key_t *key)
{
PREAMBLE(24)
v3 ^= first;
SIPROUND;
SIPROUND;
v0 ^= first;
v3 ^= second;
SIPROUND;
SIPROUND;
v0 ^= second;
v3 ^= third;
SIPROUND;
SIPROUND;
v0 ^= third;
POSTAMBLE
}
EXPORT_SYMBOL(siphash_3u64);
/**
* siphash_4u64 - compute 64-bit siphash PRF value of 4 u64
* @first: first u64
* @second: second u64
* @third: third u64
* @forth: forth u64
* @key: the siphash key
*/
u64 siphash_4u64(const u64 first, const u64 second, const u64 third,
const u64 forth, const siphash_key_t *key)
{
PREAMBLE(32)
v3 ^= first;
SIPROUND;
SIPROUND;
v0 ^= first;
v3 ^= second;
SIPROUND;
SIPROUND;
v0 ^= second;
v3 ^= third;
SIPROUND;
SIPROUND;
v0 ^= third;
v3 ^= forth;
SIPROUND;
SIPROUND;
v0 ^= forth;
POSTAMBLE
}
EXPORT_SYMBOL(siphash_4u64);
u64 siphash_1u32(const u32 first, const siphash_key_t *key)
{
PREAMBLE(4)
b |= first;
POSTAMBLE
}
EXPORT_SYMBOL(siphash_1u32);
u64 siphash_3u32(const u32 first, const u32 second, const u32 third,
const siphash_key_t *key)
{
u64 combined = (u64)second << 32 | first;
PREAMBLE(12)
v3 ^= combined;
SIPROUND;
SIPROUND;
v0 ^= combined;
b |= third;
POSTAMBLE
}
EXPORT_SYMBOL(siphash_3u32);
#if BITS_PER_LONG == 64
/* Note that on 64-bit, we make HalfSipHash1-3 actually be SipHash1-3, for
* performance reasons. On 32-bit, below, we actually implement HalfSipHash1-3.
*/
#define HSIPROUND SIPROUND
#define HPREAMBLE(len) PREAMBLE(len)
#define HPOSTAMBLE \
v3 ^= b; \
HSIPROUND; \
v0 ^= b; \
v2 ^= 0xff; \
HSIPROUND; \
HSIPROUND; \
HSIPROUND; \
return (v0 ^ v1) ^ (v2 ^ v3);
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
const u8 left = len & (sizeof(u64) - 1);
u64 m;
HPREAMBLE(len)
for (; data != end; data += sizeof(u64)) {
m = le64_to_cpup(data);
v3 ^= m;
HSIPROUND;
v0 ^= m;
}
#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
if (left)
b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
bytemask_from_count(left)));
#else
switch (left) {
case 7: b |= ((u64)end[6]) << 48; fallthrough;
case 6: b |= ((u64)end[5]) << 40; fallthrough;
case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= le32_to_cpup(data); break;
case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
#endif
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u64));
const u8 left = len & (sizeof(u64) - 1);
u64 m;
HPREAMBLE(len)
for (; data != end; data += sizeof(u64)) {
m = get_unaligned_le64(data);
v3 ^= m;
HSIPROUND;
v0 ^= m;
}
#if defined(CONFIG_DCACHE_WORD_ACCESS) && BITS_PER_LONG == 64
if (left)
b |= le64_to_cpu((__force __le64)(load_unaligned_zeropad(data) &
bytemask_from_count(left)));
#else
switch (left) {
case 7: b |= ((u64)end[6]) << 48; fallthrough;
case 6: b |= ((u64)end[5]) << 40; fallthrough;
case 5: b |= ((u64)end[4]) << 32; fallthrough;
case 4: b |= get_unaligned_le32(end); break;
case 3: b |= ((u64)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
#endif
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
#endif
/**
* hsiphash_1u32 - compute 64-bit hsiphash PRF value of a u32
* @first: first u32
* @key: the hsiphash key
*/
u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
{
HPREAMBLE(4)
b |= first;
HPOSTAMBLE
}
EXPORT_SYMBOL(hsiphash_1u32);
/**
* hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
* @first: first u32
* @second: second u32
* @key: the hsiphash key
*/
u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
{
u64 combined = (u64)second << 32 | first;
HPREAMBLE(8)
v3 ^= combined;
HSIPROUND;
v0 ^= combined;
HPOSTAMBLE
}
EXPORT_SYMBOL(hsiphash_2u32);
/**
* hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
* @first: first u32
* @second: second u32
* @third: third u32
* @key: the hsiphash key
*/
u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
const hsiphash_key_t *key)
{
u64 combined = (u64)second << 32 | first;
HPREAMBLE(12)
v3 ^= combined;
HSIPROUND;
v0 ^= combined;
b |= third;
HPOSTAMBLE
}
EXPORT_SYMBOL(hsiphash_3u32);
/**
* hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
* @first: first u32
* @second: second u32
* @third: third u32
* @forth: forth u32
* @key: the hsiphash key
*/
u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
const u32 forth, const hsiphash_key_t *key)
{
u64 combined = (u64)second << 32 | first;
HPREAMBLE(16)
v3 ^= combined;
HSIPROUND;
v0 ^= combined;
combined = (u64)forth << 32 | third;
v3 ^= combined;
HSIPROUND;
v0 ^= combined;
HPOSTAMBLE
}
EXPORT_SYMBOL(hsiphash_4u32);
#else
#define HSIPROUND \
do { \
v0 += v1; v1 = rol32(v1, 5); v1 ^= v0; v0 = rol32(v0, 16); \
v2 += v3; v3 = rol32(v3, 8); v3 ^= v2; \
v0 += v3; v3 = rol32(v3, 7); v3 ^= v0; \
v2 += v1; v1 = rol32(v1, 13); v1 ^= v2; v2 = rol32(v2, 16); \
} while (0)
#define HPREAMBLE(len) \
u32 v0 = 0; \
u32 v1 = 0; \
u32 v2 = 0x6c796765U; \
u32 v3 = 0x74656462U; \
u32 b = ((u32)(len)) << 24; \
v3 ^= key->key[1]; \
v2 ^= key->key[0]; \
v1 ^= key->key[1]; \
v0 ^= key->key[0];
#define HPOSTAMBLE \
v3 ^= b; \
HSIPROUND; \
v0 ^= b; \
v2 ^= 0xff; \
HSIPROUND; \
HSIPROUND; \
HSIPROUND; \
return v1 ^ v3;
u32 __hsiphash_aligned(const void *data, size_t len, const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u32));
const u8 left = len & (sizeof(u32) - 1);
u32 m;
HPREAMBLE(len)
for (; data != end; data += sizeof(u32)) {
m = le32_to_cpup(data);
v3 ^= m;
HSIPROUND;
v0 ^= m;
}
switch (left) {
case 3: b |= ((u32)end[2]) << 16; fallthrough;
case 2: b |= le16_to_cpup(data); break;
case 1: b |= end[0];
}
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_aligned);
#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
u32 __hsiphash_unaligned(const void *data, size_t len,
const hsiphash_key_t *key)
{
const u8 *end = data + len - (len % sizeof(u32));
const u8 left = len & (sizeof(u32) - 1);
u32 m;
HPREAMBLE(len)
for (; data != end; data += sizeof(u32)) {
m = get_unaligned_le32(data);
v3 ^= m;
HSIPROUND;
v0 ^= m;
}
switch (left) {
case 3: b |= ((u32)end[2]) << 16; fallthrough;
case 2: b |= get_unaligned_le16(end); break;
case 1: b |= end[0];
}
HPOSTAMBLE
}
EXPORT_SYMBOL(__hsiphash_unaligned);
#endif
/**
* hsiphash_1u32 - compute 32-bit hsiphash PRF value of a u32
* @first: first u32
* @key: the hsiphash key
*/
u32 hsiphash_1u32(const u32 first, const hsiphash_key_t *key)
{
HPREAMBLE(4)
v3 ^= first;
HSIPROUND;
v0 ^= first;
HPOSTAMBLE
}
EXPORT_SYMBOL(hsiphash_1u32);
/**
* hsiphash_2u32 - compute 32-bit hsiphash PRF value of 2 u32
* @first: first u32
* @second: second u32
* @key: the hsiphash key
*/
u32 hsiphash_2u32(const u32 first, const u32 second, const hsiphash_key_t *key)
{
HPREAMBLE(8)
v3 ^= first;
HSIPROUND;
v0 ^= first;
v3 ^= second;
HSIPROUND;
v0 ^= second;
HPOSTAMBLE
}
EXPORT_SYMBOL(hsiphash_2u32);
/**
* hsiphash_3u32 - compute 32-bit hsiphash PRF value of 3 u32
* @first: first u32
* @second: second u32
* @third: third u32
* @key: the hsiphash key
*/
u32 hsiphash_3u32(const u32 first, const u32 second, const u32 third,
const hsiphash_key_t *key)
{
HPREAMBLE(12)
v3 ^= first;
HSIPROUND;
v0 ^= first;
v3 ^= second;
HSIPROUND;
v0 ^= second;
v3 ^= third;
HSIPROUND;
v0 ^= third;
HPOSTAMBLE
}
EXPORT_SYMBOL(hsiphash_3u32);
/**
* hsiphash_4u32 - compute 32-bit hsiphash PRF value of 4 u32
* @first: first u32
* @second: second u32
* @third: third u32
* @forth: forth u32
* @key: the hsiphash key
*/
u32 hsiphash_4u32(const u32 first, const u32 second, const u32 third,
const u32 forth, const hsiphash_key_t *key)
{
HPREAMBLE(16)
v3 ^= first;
HSIPROUND;
v0 ^= first;
v3 ^= second;
HSIPROUND;
v0 ^= second;
v3 ^= third;
HSIPROUND;
v0 ^= third;
v3 ^= forth;
HSIPROUND;
v0 ^= forth;
HPOSTAMBLE
}
EXPORT_SYMBOL(hsiphash_4u32);
#endif