mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
fixes for 32 bit builds/non x86 archs
This commit is contained in:
parent
ff86d47221
commit
3588fa621c
@ -1,13 +1,13 @@
|
||||
#ifndef _ASM_UNALIGNED_H
|
||||
#define _ASM_UNALIGNED_H
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
# include <linux/unaligned/le_struct.h>
|
||||
# include <linux/unaligned/be_byteshift.h>
|
||||
# include <linux/unaligned/generic.h>
|
||||
# define get_unaligned __get_unaligned_le
|
||||
# define put_unaligned __put_unaligned_le
|
||||
#elif defined(__BIG_ENDIAN)
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
# include <linux/unaligned/be_struct.h>
|
||||
# include <linux/unaligned/le_byteshift.h>
|
||||
# include <linux/unaligned/generic.h>
|
||||
|
@ -4,11 +4,58 @@
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define xchg(p, v) \
|
||||
__atomic_exchange_n(p, v, __ATOMIC_SEQ_CST)
|
||||
typedef struct {
|
||||
int counter;
|
||||
} atomic_t;
|
||||
|
||||
#define xchg_acquire(p, v) \
|
||||
__atomic_exchange_n(p, v, __ATOMIC_ACQUIRE)
|
||||
typedef struct {
|
||||
long counter;
|
||||
} atomic_long_t;
|
||||
|
||||
typedef struct {
|
||||
u64 counter;
|
||||
} atomic64_t;
|
||||
|
||||
#ifndef C11_ATOMICS
|
||||
|
||||
#include <urcu/uatomic.h>
|
||||
|
||||
#if (CAA_BITS_PER_LONG != 64)
|
||||
#define ATOMIC64_SPINLOCK
|
||||
#endif
|
||||
|
||||
#define __ATOMIC_READ(p) uatomic_read(p)
|
||||
#define __ATOMIC_SET(p, v) uatomic_set(p, v)
|
||||
#define __ATOMIC_ADD_RETURN(v, p) uatomic_add_return(p, v)
|
||||
#define __ATOMIC_SUB_RETURN(v, p) uatomic_sub_return(p, v)
|
||||
#define __ATOMIC_ADD(v, p) uatomic_add(p, v)
|
||||
#define __ATOMIC_SUB(v, p) uatomic_sub(p, v)
|
||||
#define __ATOMIC_INC(p) uatomic_inc(p)
|
||||
#define __ATOMIC_DEC(p) uatomic_dec(p)
|
||||
|
||||
#define xchg(p, v) uatomic_xchg(p, v)
|
||||
#define xchg_acquire(p, v) uatomic_xchg(p, v)
|
||||
#define cmpxchg(p, old, new) uatomic_cmpxchg(p, old, new)
|
||||
#define cmpxchg_acquire(p, old, new) uatomic_cmpxchg(p, old, new)
|
||||
|
||||
#define smp_mb__before_atomic() cmm_smp_mb__before_uatomic_add()
|
||||
#define smp_mb__after_atomic() cmm_smp_mb__after_uatomic_add()
|
||||
#define smp_wmb() cmm_smp_wmb()
|
||||
#define smp_rmb() cmm_smp_rmb()
|
||||
#define smp_mb() cmm_smp_mb()
|
||||
#define smp_read_barrier_depends() cmm_smp_read_barrier_depends()
|
||||
|
||||
#else /* C11_ATOMICS */
|
||||
|
||||
#define __ATOMIC_READ(p) __atomic_load_n(p, __ATOMIC_RELAXED)
|
||||
#define __ATOMIC_SET(p, v) __atomic_store_n(p, v, __ATOMIC_RELAXED)
|
||||
#define __ATOMIC_ADD_RETURN(v, p) __atomic_add_fetch(p, v, __ATOMIC_RELAXED)
|
||||
#define __ATOMIC_ADD_RETURN_RELEASE(v, p) \
|
||||
__atomic_add_fetch(p, v, __ATOMIC_RELEASE)
|
||||
#define __ATOMIC_SUB_RETURN(v, p) __atomic_sub_fetch(p, v, __ATOMIC_RELAXED)
|
||||
|
||||
#define xchg(p, v) __atomic_exchange_n(p, v, __ATOMIC_SEQ_CST)
|
||||
#define xchg_acquire(p, v) __atomic_exchange_n(p, v, __ATOMIC_ACQUIRE)
|
||||
|
||||
#define cmpxchg(p, old, new) \
|
||||
({ \
|
||||
@ -37,6 +84,8 @@
|
||||
#define smp_mb() __atomic_thread_fence(__ATOMIC_SEQ_CST)
|
||||
#define smp_read_barrier_depends()
|
||||
|
||||
#endif
|
||||
|
||||
#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
|
||||
|
||||
#define smp_load_acquire(p) \
|
||||
@ -52,199 +101,179 @@ do { \
|
||||
WRITE_ONCE(*p, v); \
|
||||
} while (0)
|
||||
|
||||
typedef struct {
|
||||
int counter;
|
||||
} atomic_t;
|
||||
/* atomic interface: */
|
||||
|
||||
static inline int atomic_read(const atomic_t *v)
|
||||
{
|
||||
return __atomic_load_n(&v->counter, __ATOMIC_RELAXED);
|
||||
#ifndef __ATOMIC_ADD
|
||||
#define __ATOMIC_ADD(i, v) __ATOMIC_ADD_RETURN(i, v)
|
||||
#endif
|
||||
|
||||
#ifndef __ATOMIC_ADD_RETURN_RELEASE
|
||||
#define __ATOMIC_ADD_RETURN_RELEASE(i, v) \
|
||||
({ smp_mb__before_atomic(); __ATOMIC_ADD_RETURN(i, v); })
|
||||
#endif
|
||||
|
||||
#ifndef __ATOMIC_SUB
|
||||
#define __ATOMIC_SUB(i, v) __ATOMIC_SUB_RETURN(i, v)
|
||||
#endif
|
||||
|
||||
#ifndef __ATOMIC_INC_RETURN
|
||||
#define __ATOMIC_INC_RETURN(v) __ATOMIC_ADD_RETURN(1, v)
|
||||
#endif
|
||||
|
||||
#ifndef __ATOMIC_DEC_RETURN
|
||||
#define __ATOMIC_DEC_RETURN(v) __ATOMIC_SUB_RETURN(1, v)
|
||||
#endif
|
||||
|
||||
#ifndef __ATOMIC_INC
|
||||
#define __ATOMIC_INC(v) __ATOMIC_ADD(1, v)
|
||||
#endif
|
||||
|
||||
#ifndef __ATOMIC_DEC
|
||||
#define __ATOMIC_DEC(v) __ATOMIC_SUB(1, v)
|
||||
#endif
|
||||
|
||||
#define DEF_ATOMIC_OPS(a_type, i_type) \
|
||||
static inline i_type a_type##_read(const a_type##_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_READ(&v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline void a_type##_set(a_type##_t *v, i_type i) \
|
||||
{ \
|
||||
return __ATOMIC_SET(&v->counter, i); \
|
||||
} \
|
||||
\
|
||||
static inline i_type a_type##_add_return(i_type i, a_type##_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_ADD_RETURN(i, &v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline i_type a_type##_add_return_release(i_type i, a_type##_t *v)\
|
||||
{ \
|
||||
return __ATOMIC_ADD_RETURN_RELEASE(i, &v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline i_type a_type##_sub_return(i_type i, a_type##_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_SUB_RETURN(i, &v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline void a_type##_add(i_type i, a_type##_t *v) \
|
||||
{ \
|
||||
__ATOMIC_ADD(i, &v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline void a_type##_sub(i_type i, a_type##_t *v) \
|
||||
{ \
|
||||
__ATOMIC_SUB(i, &v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline i_type a_type##_inc_return(a_type##_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_INC_RETURN(&v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline i_type a_type##_dec_return(a_type##_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_DEC_RETURN(&v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline void a_type##_inc(a_type##_t *v) \
|
||||
{ \
|
||||
__ATOMIC_INC(&v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline void a_type##_dec(a_type##_t *v) \
|
||||
{ \
|
||||
__ATOMIC_DEC(&v->counter); \
|
||||
} \
|
||||
\
|
||||
static inline bool a_type##_add_negative(i_type i, a_type##_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_ADD_RETURN(i, &v->counter) < 0; \
|
||||
} \
|
||||
\
|
||||
static inline bool a_type##_sub_and_test(i_type i, a_type##_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_SUB_RETURN(i, &v->counter) == 0; \
|
||||
} \
|
||||
\
|
||||
static inline bool a_type##_inc_and_test(a_type##_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_INC_RETURN(&v->counter) == 0; \
|
||||
} \
|
||||
\
|
||||
static inline bool a_type##_dec_and_test(a_type##_t *v) \
|
||||
{ \
|
||||
return __ATOMIC_DEC_RETURN(&v->counter) == 0; \
|
||||
} \
|
||||
\
|
||||
static inline i_type a_type##_add_unless(a_type##_t *v, i_type a, i_type u)\
|
||||
{ \
|
||||
i_type old, c = __ATOMIC_READ(&v->counter); \
|
||||
while (c != u && (old = cmpxchg(&v->counter, c, c + a)) != c) \
|
||||
c = old; \
|
||||
return c; \
|
||||
} \
|
||||
\
|
||||
static inline bool a_type##_inc_not_zero(a_type##_t *v) \
|
||||
{ \
|
||||
return a_type##_add_unless(v, 1, 0); \
|
||||
} \
|
||||
\
|
||||
static inline i_type a_type##_xchg(a_type##_t *v, i_type i) \
|
||||
{ \
|
||||
return xchg(&v->counter, i); \
|
||||
} \
|
||||
\
|
||||
static inline i_type a_type##_cmpxchg(a_type##_t *v, i_type old, i_type new)\
|
||||
{ \
|
||||
return cmpxchg(&v->counter, old, new); \
|
||||
} \
|
||||
\
|
||||
static inline i_type a_type##_cmpxchg_acquire(a_type##_t *v, i_type old, i_type new)\
|
||||
{ \
|
||||
return cmpxchg_acquire(&v->counter, old, new); \
|
||||
}
|
||||
|
||||
static inline void atomic_set(atomic_t *v, int i)
|
||||
DEF_ATOMIC_OPS(atomic, int)
|
||||
DEF_ATOMIC_OPS(atomic_long, long)
|
||||
|
||||
#ifndef ATOMIC64_SPINLOCK
|
||||
DEF_ATOMIC_OPS(atomic64, s64)
|
||||
#else
|
||||
s64 atomic64_read(const atomic64_t *v);
|
||||
void atomic64_set(atomic64_t *v, s64);
|
||||
|
||||
s64 atomic64_add_return(s64, atomic64_t *);
|
||||
s64 atomic64_sub_return(s64, atomic64_t *);
|
||||
void atomic64_add(s64, atomic64_t *);
|
||||
void atomic64_sub(s64, atomic64_t *);
|
||||
|
||||
s64 atomic64_xchg(atomic64_t *, s64);
|
||||
s64 atomic64_cmpxchg(atomic64_t *, s64, s64);
|
||||
|
||||
#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
|
||||
#define atomic64_inc(v) atomic64_add(1LL, (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
|
||||
#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
|
||||
#define atomic64_dec(v) atomic64_sub(1LL, (v))
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
|
||||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
|
||||
|
||||
static inline s64 atomic64_add_return_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
__atomic_store_n(&v->counter, i, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_add_return(int i, atomic_t *v)
|
||||
{
|
||||
return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_sub_return(int i, atomic_t *v)
|
||||
{
|
||||
return __atomic_sub_fetch(&v->counter, i, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline int atomic_add_negative(int i, atomic_t *v)
|
||||
{
|
||||
return atomic_add_return(i, v) < 0;
|
||||
}
|
||||
|
||||
static inline void atomic_add(int i, atomic_t *v)
|
||||
{
|
||||
atomic_add_return(i, v);
|
||||
}
|
||||
|
||||
static inline void atomic_sub(int i, atomic_t *v)
|
||||
{
|
||||
atomic_sub_return(i, v);
|
||||
}
|
||||
|
||||
static inline void atomic_inc(atomic_t *v)
|
||||
{
|
||||
atomic_add(1, v);
|
||||
}
|
||||
|
||||
static inline void atomic_dec(atomic_t *v)
|
||||
{
|
||||
atomic_sub(1, v);
|
||||
}
|
||||
|
||||
#define atomic_dec_return(v) atomic_sub_return(1, (v))
|
||||
#define atomic_inc_return(v) atomic_add_return(1, (v))
|
||||
|
||||
#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
|
||||
#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
|
||||
#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
|
||||
|
||||
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
|
||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
|
||||
|
||||
static inline int atomic_add_unless(atomic_t *v, int a, int u)
|
||||
{
|
||||
int c, old;
|
||||
c = atomic_read(v);
|
||||
while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
|
||||
c = old;
|
||||
return c;
|
||||
}
|
||||
|
||||
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
|
||||
|
||||
typedef struct {
|
||||
long counter;
|
||||
} atomic_long_t;
|
||||
|
||||
static inline long atomic_long_read(const atomic_long_t *v)
|
||||
{
|
||||
return __atomic_load_n(&v->counter, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void atomic_long_set(atomic_long_t *v, long i)
|
||||
{
|
||||
__atomic_store_n(&v->counter, i, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline long atomic_long_add_return(long i, atomic_long_t *v)
|
||||
{
|
||||
return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline long atomic_long_sub_return(long i, atomic_long_t *v)
|
||||
{
|
||||
return __atomic_sub_fetch(&v->counter, i, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void atomic_long_add(long i, atomic_long_t *v)
|
||||
{
|
||||
atomic_long_add_return(i, v);
|
||||
}
|
||||
|
||||
static inline void atomic_long_sub(long i, atomic_long_t *v)
|
||||
{
|
||||
atomic_long_sub_return(i, v);
|
||||
}
|
||||
|
||||
static inline void atomic_long_inc(atomic_long_t *v)
|
||||
{
|
||||
atomic_long_add(1, v);
|
||||
}
|
||||
|
||||
static inline void atomic_long_dec(atomic_long_t *v)
|
||||
{
|
||||
atomic_long_sub(1, v);
|
||||
}
|
||||
|
||||
static inline long atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
|
||||
{
|
||||
return cmpxchg(&v->counter, old, new);
|
||||
}
|
||||
|
||||
static inline bool atomic_long_inc_not_zero(atomic_long_t *i)
|
||||
{
|
||||
long old, v = atomic_long_read(i);
|
||||
|
||||
do {
|
||||
if (!(old = v))
|
||||
return false;
|
||||
} while ((v = atomic_long_cmpxchg(i, old, old + 1)) != old);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define atomic_long_sub_and_test(i, v) (atomic_long_sub_return((i), (v)) == 0)
|
||||
|
||||
typedef struct {
|
||||
u64 counter;
|
||||
} atomic64_t;
|
||||
|
||||
static inline s64 atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
return __atomic_load_n(&v->counter, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void atomic64_set(atomic64_t *v, s64 i)
|
||||
{
|
||||
__atomic_store_n(&v->counter, i, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline s64 atomic64_add_return(s64 i, atomic64_t *v)
|
||||
{
|
||||
return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline s64 atomic64_sub_return(s64 i, atomic64_t *v)
|
||||
{
|
||||
return __atomic_sub_fetch(&v->counter, i, __ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
static inline void atomic64_add(s64 i, atomic64_t *v)
|
||||
{
|
||||
atomic64_add_return(i, v);
|
||||
}
|
||||
|
||||
static inline void atomic64_sub(s64 i, atomic64_t *v)
|
||||
{
|
||||
atomic64_sub_return(i, v);
|
||||
}
|
||||
|
||||
static inline void atomic64_inc(atomic64_t *v)
|
||||
{
|
||||
atomic64_add(1, v);
|
||||
}
|
||||
|
||||
static inline void atomic64_dec(atomic64_t *v)
|
||||
{
|
||||
atomic64_sub(1, v);
|
||||
}
|
||||
|
||||
#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
|
||||
#define atomic64_inc_return(v) atomic64_add_return(1, (v))
|
||||
|
||||
static inline s64 atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
|
||||
{
|
||||
return cmpxchg(&v->counter, old, new);
|
||||
smp_mb__before_atomic();
|
||||
return atomic64_add_return(i, v);
|
||||
}
|
||||
|
||||
static inline s64 atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
|
||||
{
|
||||
return cmpxchg_acquire(&v->counter, old, new);
|
||||
return atomic64_cmpxchg(v, old, new);
|
||||
}
|
||||
|
||||
static inline s64 atomic64_add_return_release(s64 i, atomic64_t *v)
|
||||
{
|
||||
return __atomic_add_fetch(&v->counter, i, __ATOMIC_RELEASE);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* __TOOLS_LINUX_ATOMIC_H */
|
||||
|
@ -1,6 +1,8 @@
|
||||
#ifndef _LINUX_BACKING_DEV_H
|
||||
#define _LINUX_BACKING_DEV_H
|
||||
|
||||
#include <linux/list.h>
|
||||
|
||||
typedef int (congested_fn)(void *, int);
|
||||
|
||||
enum wb_congested_state {
|
||||
|
@ -1,7 +1,8 @@
|
||||
#ifndef __TOOLS_LINUX_CACHE_H
|
||||
#define __TOOLS_LINUX_CACHE_H
|
||||
|
||||
#define L1_CACHE_BYTES 64
|
||||
#define L1_CACHE_SHIFT 6
|
||||
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
|
||||
#define SMP_CACHE_BYTES L1_CACHE_BYTES
|
||||
|
||||
#define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
|
||||
|
@ -5,7 +5,7 @@ struct super_block;
|
||||
struct inode;
|
||||
|
||||
/* The hash is always the low bits of hash_len */
|
||||
#ifdef __LITTLE_ENDIAN
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define HASH_LEN_DECLARE u32 hash; u32 len
|
||||
#else
|
||||
#define HASH_LEN_DECLARE u32 len; u32 hash
|
||||
|
@ -111,17 +111,11 @@
|
||||
#define cpu_relax() do {} while (0)
|
||||
#define cpu_relax_lowlatency() do {} while (0)
|
||||
|
||||
__printf(1, 2)
|
||||
static inline void panic(const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
|
||||
va_start(args, fmt);
|
||||
vprintf(fmt, args);
|
||||
va_end(args);
|
||||
|
||||
BUG();
|
||||
}
|
||||
#define panic(fmt, ...) \
|
||||
do { \
|
||||
printf(fmt, ##__VA_ARGS__); \
|
||||
BUG(); \
|
||||
} while (0)
|
||||
|
||||
unsigned long simple_strtoul(const char *,char **,unsigned int);
|
||||
long simple_strtol(const char *,char **,unsigned int);
|
||||
|
@ -5,6 +5,13 @@
|
||||
|
||||
struct page;
|
||||
|
||||
#ifndef PAGE_SIZE
|
||||
|
||||
#define PAGE_SIZE 4096UL
|
||||
#define PAGE_MASK (~(PAGE_SIZE - 1))
|
||||
|
||||
#endif
|
||||
|
||||
#define virt_to_page(p) \
|
||||
((struct page *) (((unsigned long) (p)) & PAGE_MASK))
|
||||
#define offset_in_page(p) ((unsigned long) (p) & ~PAGE_MASK)
|
||||
|
@ -100,7 +100,7 @@ struct bchfs_handle bchu_fs_open_by_dev(const char *, unsigned *);
|
||||
|
||||
static inline void bchu_disk_add(struct bchfs_handle fs, char *dev)
|
||||
{
|
||||
struct bch_ioctl_disk i = { .dev = (__u64) dev, };
|
||||
struct bch_ioctl_disk i = { .dev = (unsigned long) dev, };
|
||||
|
||||
xioctl(fs.ioctl_fd, BCH_IOCTL_DISK_ADD, &i);
|
||||
}
|
||||
@ -118,7 +118,7 @@ static inline void bchu_disk_remove(struct bchfs_handle fs, unsigned dev_idx,
|
||||
|
||||
static inline void bchu_disk_online(struct bchfs_handle fs, char *dev)
|
||||
{
|
||||
struct bch_ioctl_disk i = { .dev = (__u64) dev, };
|
||||
struct bch_ioctl_disk i = { .dev = (unsigned long) dev, };
|
||||
|
||||
xioctl(fs.ioctl_fd, BCH_IOCTL_DISK_ONLINE, &i);
|
||||
}
|
||||
@ -173,7 +173,7 @@ static inline struct bch_sb *bchu_read_super(struct bchfs_handle fs, unsigned id
|
||||
sb = xrealloc(sb, size);
|
||||
struct bch_ioctl_read_super i = {
|
||||
.size = size,
|
||||
.sb = (u64) sb,
|
||||
.sb = (unsigned long) sb,
|
||||
};
|
||||
|
||||
if (idx != -1) {
|
||||
|
188
linux/atomic64.c
Normal file
188
linux/atomic64.c
Normal file
@ -0,0 +1,188 @@
|
||||
/*
|
||||
* Generic implementation of 64-bit atomics using spinlocks,
|
||||
* useful on processors that don't have 64-bit atomic instructions.
|
||||
*
|
||||
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version
|
||||
* 2 of the License, or (at your option) any later version.
|
||||
*/
|
||||
#include <linux/types.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#ifdef ATOMIC64_SPINLOCK
|
||||
|
||||
/*
|
||||
* We use a hashed array of spinlocks to provide exclusive access
|
||||
* to each atomic64_t variable. Since this is expected to used on
|
||||
* systems with small numbers of CPUs (<= 4 or so), we use a
|
||||
* relatively small array of 16 spinlocks to avoid wasting too much
|
||||
* memory on the spinlock array.
|
||||
*/
|
||||
#define NR_LOCKS 16
|
||||
|
||||
/*
|
||||
* Ensure each lock is in a separate cacheline.
|
||||
*/
|
||||
static union {
|
||||
raw_spinlock_t lock;
|
||||
char pad[L1_CACHE_BYTES];
|
||||
} atomic64_lock[NR_LOCKS] ____cacheline_aligned_in_smp = {
|
||||
[0 ... (NR_LOCKS - 1)] = {
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
|
||||
},
|
||||
};
|
||||
|
||||
static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
|
||||
{
|
||||
unsigned long addr = (unsigned long) v;
|
||||
|
||||
addr >>= L1_CACHE_SHIFT;
|
||||
addr ^= (addr >> 8) ^ (addr >> 16);
|
||||
return &atomic64_lock[addr & (NR_LOCKS - 1)].lock;
|
||||
}
|
||||
|
||||
long long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
|
||||
void atomic64_set(atomic64_t *v, long long i)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
v->counter = i;
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP(op, c_op) \
|
||||
void atomic64_##op(long long a, atomic64_t *v) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
raw_spinlock_t *lock = lock_addr(v); \
|
||||
\
|
||||
raw_spin_lock_irqsave(lock, flags); \
|
||||
v->counter c_op a; \
|
||||
raw_spin_unlock_irqrestore(lock, flags); \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OP_RETURN(op, c_op) \
|
||||
long long atomic64_##op##_return(long long a, atomic64_t *v) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
raw_spinlock_t *lock = lock_addr(v); \
|
||||
long long val; \
|
||||
\
|
||||
raw_spin_lock_irqsave(lock, flags); \
|
||||
val = (v->counter c_op a); \
|
||||
raw_spin_unlock_irqrestore(lock, flags); \
|
||||
return val; \
|
||||
}
|
||||
|
||||
#define ATOMIC64_FETCH_OP(op, c_op) \
|
||||
long long atomic64_fetch_##op(long long a, atomic64_t *v) \
|
||||
{ \
|
||||
unsigned long flags; \
|
||||
raw_spinlock_t *lock = lock_addr(v); \
|
||||
long long val; \
|
||||
\
|
||||
raw_spin_lock_irqsave(lock, flags); \
|
||||
val = v->counter; \
|
||||
v->counter c_op a; \
|
||||
raw_spin_unlock_irqrestore(lock, flags); \
|
||||
return val; \
|
||||
}
|
||||
|
||||
#define ATOMIC64_OPS(op, c_op) \
|
||||
ATOMIC64_OP(op, c_op) \
|
||||
ATOMIC64_OP_RETURN(op, c_op) \
|
||||
ATOMIC64_FETCH_OP(op, c_op)
|
||||
|
||||
ATOMIC64_OPS(add, +=)
|
||||
ATOMIC64_OPS(sub, -=)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#define ATOMIC64_OPS(op, c_op) \
|
||||
ATOMIC64_OP(op, c_op) \
|
||||
ATOMIC64_OP_RETURN(op, c_op) \
|
||||
ATOMIC64_FETCH_OP(op, c_op)
|
||||
|
||||
ATOMIC64_OPS(and, &=)
|
||||
ATOMIC64_OPS(or, |=)
|
||||
ATOMIC64_OPS(xor, ^=)
|
||||
|
||||
#undef ATOMIC64_OPS
|
||||
#undef ATOMIC64_FETCH_OP
|
||||
#undef ATOMIC64_OP_RETURN
|
||||
#undef ATOMIC64_OP
|
||||
|
||||
long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter - 1;
|
||||
if (val >= 0)
|
||||
v->counter = val;
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
|
||||
long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
if (val == o)
|
||||
v->counter = n;
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
|
||||
long long atomic64_xchg(atomic64_t *v, long long new)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
long long val;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
val = v->counter;
|
||||
v->counter = new;
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return val;
|
||||
}
|
||||
|
||||
int atomic64_add_unless(atomic64_t *v, long long a, long long u)
|
||||
{
|
||||
unsigned long flags;
|
||||
raw_spinlock_t *lock = lock_addr(v);
|
||||
int ret = 0;
|
||||
|
||||
raw_spin_lock_irqsave(lock, flags);
|
||||
if (v->counter != u) {
|
||||
v->counter += a;
|
||||
ret = 1;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
@ -62,9 +62,9 @@ typedef uintptr_t uptrval;
|
||||
#define LZ4_ARCH64 0
|
||||
#endif
|
||||
|
||||
#if defined(__LITTLE_ENDIAN)
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define LZ4_LITTLE_ENDIAN 1
|
||||
#else
|
||||
#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
|
||||
#define LZ4_LITTLE_ENDIAN 0
|
||||
#endif
|
||||
|
||||
|
@ -3,6 +3,10 @@
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
|
||||
/* hack for mips: */
|
||||
#define CONFIG_RCU_HAVE_FUTEX 1
|
||||
#include <urcu/futex.h>
|
||||
|
||||
#include <linux/math64.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
11
tools-util.c
11
tools-util.c
@ -534,8 +534,6 @@ static u32 crc32c_sse42(u32 crc, const void *buf, size_t size)
|
||||
|
||||
static void *resolve_crc32c(void)
|
||||
{
|
||||
__builtin_cpu_init();
|
||||
|
||||
#ifdef __x86_64__
|
||||
if (__builtin_cpu_supports("sse4.2"))
|
||||
return crc32c_sse42;
|
||||
@ -548,8 +546,15 @@ static void *resolve_crc32c(void)
|
||||
*/
|
||||
#ifdef HAVE_WORKING_IFUNC
|
||||
|
||||
static void *ifunc_resolve_crc32c(void)
|
||||
{
|
||||
__builtin_cpu_init();
|
||||
|
||||
return resolve_crc32c
|
||||
}
|
||||
|
||||
u32 crc32c(u32, const void *, size_t)
|
||||
__attribute__((ifunc("resolve_crc32c")));
|
||||
__attribute__((ifunc("ifunc_resolve_crc32c")));
|
||||
|
||||
#else
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user