mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-02 00:00:03 +03:00
Delete more shim layer code
This commit is contained in:
parent
94cafcb8e5
commit
825d180c07
@ -1,85 +0,0 @@
|
||||
#ifndef _LINUX_BITREV_H
|
||||
#define _LINUX_BITREV_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifdef CONFIG_HAVE_ARCH_BITREVERSE
|
||||
#include <asm/bitrev.h>
|
||||
|
||||
#define __bitrev32 __arch_bitrev32
|
||||
#define __bitrev16 __arch_bitrev16
|
||||
#define __bitrev8 __arch_bitrev8
|
||||
|
||||
#else
|
||||
extern u8 const byte_rev_table[256];
|
||||
static inline u8 __bitrev8(u8 byte)
|
||||
{
|
||||
return byte_rev_table[byte];
|
||||
}
|
||||
|
||||
static inline u16 __bitrev16(u16 x)
|
||||
{
|
||||
return (__bitrev8(x & 0xff) << 8) | __bitrev8(x >> 8);
|
||||
}
|
||||
|
||||
static inline u32 __bitrev32(u32 x)
|
||||
{
|
||||
return (__bitrev16(x & 0xffff) << 16) | __bitrev16(x >> 16);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HAVE_ARCH_BITREVERSE */
|
||||
|
||||
#define __constant_bitrev32(x) \
|
||||
({ \
|
||||
u32 __x = x; \
|
||||
__x = (__x >> 16) | (__x << 16); \
|
||||
__x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \
|
||||
__x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \
|
||||
__x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \
|
||||
__x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \
|
||||
__x; \
|
||||
})
|
||||
|
||||
#define __constant_bitrev16(x) \
|
||||
({ \
|
||||
u16 __x = x; \
|
||||
__x = (__x >> 8) | (__x << 8); \
|
||||
__x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \
|
||||
__x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \
|
||||
__x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \
|
||||
__x; \
|
||||
})
|
||||
|
||||
#define __constant_bitrev8(x) \
|
||||
({ \
|
||||
u8 __x = x; \
|
||||
__x = (__x >> 4) | (__x << 4); \
|
||||
__x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \
|
||||
__x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \
|
||||
__x; \
|
||||
})
|
||||
|
||||
#define bitrev32(x) \
|
||||
({ \
|
||||
u32 __x = x; \
|
||||
__builtin_constant_p(__x) ? \
|
||||
__constant_bitrev32(__x) : \
|
||||
__bitrev32(__x); \
|
||||
})
|
||||
|
||||
#define bitrev16(x) \
|
||||
({ \
|
||||
u16 __x = x; \
|
||||
__builtin_constant_p(__x) ? \
|
||||
__constant_bitrev16(__x) : \
|
||||
__bitrev16(__x); \
|
||||
})
|
||||
|
||||
#define bitrev8(x) \
|
||||
({ \
|
||||
u8 __x = x; \
|
||||
__builtin_constant_p(__x) ? \
|
||||
__constant_bitrev8(__x) : \
|
||||
__bitrev8(__x) ; \
|
||||
})
|
||||
#endif /* _LINUX_BITREV_H */
|
@ -17,34 +17,10 @@
|
||||
|
||||
#include <linux/fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
struct device;
|
||||
struct file_operations;
|
||||
struct vfsmount;
|
||||
struct srcu_struct;
|
||||
|
||||
struct debugfs_blob_wrapper {
|
||||
void *data;
|
||||
unsigned long size;
|
||||
};
|
||||
|
||||
struct debugfs_reg32 {
|
||||
char *name;
|
||||
unsigned long offset;
|
||||
};
|
||||
|
||||
struct debugfs_regset32 {
|
||||
const struct debugfs_reg32 *regs;
|
||||
int nregs;
|
||||
void __iomem *base;
|
||||
};
|
||||
|
||||
extern struct dentry *arch_debugfs_dir;
|
||||
|
||||
extern struct srcu_struct debugfs_srcu;
|
||||
|
||||
#include <linux/err.h>
|
||||
|
||||
@ -55,189 +31,16 @@ static inline struct dentry *debugfs_create_file(const char *name, umode_t mode,
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
|
||||
struct dentry *parent, void *data,
|
||||
const struct file_operations *fops,
|
||||
loff_t file_size)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_dir(const char *name,
|
||||
struct dentry *parent)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_symlink(const char *name,
|
||||
struct dentry *parent,
|
||||
const char *dest)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_automount(const char *name,
|
||||
struct dentry *parent,
|
||||
struct vfsmount *(*f)(void *),
|
||||
void *data)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline void debugfs_remove(struct dentry *dentry)
|
||||
{ }
|
||||
|
||||
static inline void debugfs_remove_recursive(struct dentry *dentry)
|
||||
{ }
|
||||
|
||||
static inline int debugfs_use_file_start(const struct dentry *dentry,
|
||||
int *srcu_idx)
|
||||
__acquires(&debugfs_srcu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void debugfs_use_file_finish(int srcu_idx)
|
||||
__releases(&debugfs_srcu)
|
||||
{ }
|
||||
|
||||
#define DEFINE_DEBUGFS_ATTRIBUTE(__fops, __get, __set, __fmt) \
|
||||
static const struct file_operations __fops = { 0 }
|
||||
|
||||
static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
|
||||
struct dentry *new_dir, char *new_name)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u8 *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u16 *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u32 *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u64 *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u8 *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u16 *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u32 *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u64 *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
size_t *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
|
||||
struct dentry *parent, atomic_t *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
bool *value)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
struct debugfs_blob_wrapper *blob)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_regset32(const char *name,
|
||||
umode_t mode, struct dentry *parent,
|
||||
struct debugfs_regset32 *regset)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
|
||||
int nregs, void __iomem *base, char *prefix)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool debugfs_initialized(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
|
||||
struct dentry *parent,
|
||||
u32 *array, u32 elements)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct dentry *debugfs_create_devm_seqfile(struct device *dev,
|
||||
const char *name,
|
||||
struct dentry *parent,
|
||||
int (*read_fn)(struct seq_file *s,
|
||||
void *data))
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline ssize_t debugfs_read_file_bool(struct file *file,
|
||||
char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static inline ssize_t debugfs_write_file_bool(struct file *file,
|
||||
const char __user *user_buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -20,10 +20,8 @@
|
||||
#include <linux/bug.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
struct kset;
|
||||
@ -52,7 +50,7 @@ struct kobject {
|
||||
struct kset *kset;
|
||||
struct kobj_type *ktype;
|
||||
struct kernfs_node *sd; /* sysfs directory entry */
|
||||
struct kref kref;
|
||||
atomic_t ref;
|
||||
unsigned int state_initialized:1;
|
||||
unsigned int state_in_sysfs:1;
|
||||
unsigned int state_add_uevent_sent:1;
|
||||
@ -64,18 +62,13 @@ struct kset {
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
static inline struct kobj_type *get_ktype(struct kobject *kobj)
|
||||
{
|
||||
return kobj->ktype;
|
||||
}
|
||||
|
||||
#define kobject_add(...) 0
|
||||
|
||||
static inline void kobject_init(struct kobject *kobj, struct kobj_type *ktype)
|
||||
{
|
||||
memset(kobj, 0, sizeof(*kobj));
|
||||
|
||||
kref_init(&kobj->kref);
|
||||
atomic_set(&kobj->ref, 1);
|
||||
kobj->ktype = ktype;
|
||||
kobj->state_initialized = 1;
|
||||
}
|
||||
@ -84,7 +77,7 @@ static inline void kobject_del(struct kobject *kobj);
|
||||
|
||||
static inline void kobject_cleanup(struct kobject *kobj)
|
||||
{
|
||||
struct kobj_type *t = get_ktype(kobj);
|
||||
struct kobj_type *t = kobj->ktype;
|
||||
|
||||
/* remove from sysfs if the caller did not do it */
|
||||
if (kobj->state_in_sysfs)
|
||||
@ -94,19 +87,13 @@ static inline void kobject_cleanup(struct kobject *kobj)
|
||||
t->release(kobj);
|
||||
}
|
||||
|
||||
static inline void kobject_release(struct kref *kref)
|
||||
{
|
||||
struct kobject *kobj = container_of(kref, struct kobject, kref);
|
||||
|
||||
kobject_cleanup(kobj);
|
||||
}
|
||||
|
||||
static inline void kobject_put(struct kobject *kobj)
|
||||
{
|
||||
BUG_ON(!kobj);
|
||||
BUG_ON(!kobj->state_initialized);
|
||||
|
||||
kref_put(&kobj->kref, kobject_release);
|
||||
if (atomic_dec_and_test(&kobj->ref))
|
||||
kobject_cleanup(kobj);
|
||||
}
|
||||
|
||||
static inline void kobject_del(struct kobject *kobj)
|
||||
@ -130,7 +117,7 @@ static inline struct kobject *kobject_get(struct kobject *kobj)
|
||||
BUG_ON(!kobj);
|
||||
BUG_ON(!kobj->state_initialized);
|
||||
|
||||
kref_get(&kobj->kref);
|
||||
atomic_inc(&kobj->ref);
|
||||
return kobj;
|
||||
}
|
||||
|
||||
|
@ -1,138 +0,0 @@
|
||||
/*
|
||||
* kref.h - library routines for handling generic reference counted objects
|
||||
*
|
||||
* Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com>
|
||||
* Copyright (C) 2004 IBM Corp.
|
||||
*
|
||||
* based on kobject.h which was:
|
||||
* Copyright (C) 2002-2003 Patrick Mochel <mochel@osdl.org>
|
||||
* Copyright (C) 2002-2003 Open Source Development Labs
|
||||
*
|
||||
* This file is released under the GPLv2.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _KREF_H_
|
||||
#define _KREF_H_
|
||||
|
||||
#include <linux/bug.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
|
||||
struct kref {
|
||||
atomic_t refcount;
|
||||
};
|
||||
|
||||
/**
|
||||
* kref_init - initialize object.
|
||||
* @kref: object in question.
|
||||
*/
|
||||
static inline void kref_init(struct kref *kref)
|
||||
{
|
||||
atomic_set(&kref->refcount, 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* kref_get - increment refcount for object.
|
||||
* @kref: object.
|
||||
*/
|
||||
static inline void kref_get(struct kref *kref)
|
||||
{
|
||||
/* If refcount was 0 before incrementing then we have a race
|
||||
* condition when this kref is freeing by some other thread right now.
|
||||
* In this case one should use kref_get_unless_zero()
|
||||
*/
|
||||
WARN_ON_ONCE(atomic_inc_return(&kref->refcount) < 2);
|
||||
}
|
||||
|
||||
/**
|
||||
* kref_sub - subtract a number of refcounts for object.
|
||||
* @kref: object.
|
||||
* @count: Number of recounts to subtract.
|
||||
* @release: pointer to the function that will clean up the object when the
|
||||
* last reference to the object is released.
|
||||
* This pointer is required, and it is not acceptable to pass kfree
|
||||
* in as this function. If the caller does pass kfree to this
|
||||
* function, you will be publicly mocked mercilessly by the kref
|
||||
* maintainer, and anyone else who happens to notice it. You have
|
||||
* been warned.
|
||||
*
|
||||
* Subtract @count from the refcount, and if 0, call release().
|
||||
* Return 1 if the object was removed, otherwise return 0. Beware, if this
|
||||
* function returns 0, you still can not count on the kref from remaining in
|
||||
* memory. Only use the return value if you want to see if the kref is now
|
||||
* gone, not present.
|
||||
*/
|
||||
static inline int kref_sub(struct kref *kref, unsigned int count,
|
||||
void (*release)(struct kref *kref))
|
||||
{
|
||||
WARN_ON(release == NULL);
|
||||
|
||||
if (atomic_sub_and_test((int) count, &kref->refcount)) {
|
||||
release(kref);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kref_put - decrement refcount for object.
|
||||
* @kref: object.
|
||||
* @release: pointer to the function that will clean up the object when the
|
||||
* last reference to the object is released.
|
||||
* This pointer is required, and it is not acceptable to pass kfree
|
||||
* in as this function. If the caller does pass kfree to this
|
||||
* function, you will be publicly mocked mercilessly by the kref
|
||||
* maintainer, and anyone else who happens to notice it. You have
|
||||
* been warned.
|
||||
*
|
||||
* Decrement the refcount, and if 0, call release().
|
||||
* Return 1 if the object was removed, otherwise return 0. Beware, if this
|
||||
* function returns 0, you still can not count on the kref from remaining in
|
||||
* memory. Only use the return value if you want to see if the kref is now
|
||||
* gone, not present.
|
||||
*/
|
||||
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
|
||||
{
|
||||
return kref_sub(kref, 1, release);
|
||||
}
|
||||
|
||||
static inline int kref_put_mutex(struct kref *kref,
|
||||
void (*release)(struct kref *kref),
|
||||
struct mutex *lock)
|
||||
{
|
||||
WARN_ON(release == NULL);
|
||||
if (unlikely(!atomic_add_unless(&kref->refcount, -1, 1))) {
|
||||
mutex_lock(lock);
|
||||
if (unlikely(!atomic_dec_and_test(&kref->refcount))) {
|
||||
mutex_unlock(lock);
|
||||
return 0;
|
||||
}
|
||||
release(kref);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kref_get_unless_zero - Increment refcount for object unless it is zero.
|
||||
* @kref: object.
|
||||
*
|
||||
* Return non-zero if the increment succeeded. Otherwise return 0.
|
||||
*
|
||||
* This function is intended to simplify locking around refcounting for
|
||||
* objects that can be looked up from a lookup structure, and which are
|
||||
* removed from that lookup structure in the object destructor.
|
||||
* Operations on such objects require at least a read lock around
|
||||
* lookup + kref_get, and a write lock around kref_put + remove from lookup
|
||||
* structure. Furthermore, RCU implementations become extremely tricky.
|
||||
* With a lookup followed by a kref_get_unless_zero *with return value check*
|
||||
* locking in the kref_put path can be deferred to the actual removal from
|
||||
* the lookup structure and RCU lookups become trivial.
|
||||
*/
|
||||
static inline int __must_check kref_get_unless_zero(struct kref *kref)
|
||||
{
|
||||
return atomic_add_unless(&kref->refcount, 1, 0);
|
||||
}
|
||||
#endif /* _KREF_H_ */
|
@ -1,18 +0,0 @@
|
||||
#ifndef __TOOLS_LINUX_LGLOCK_H
|
||||
#define __TOOLS_LINUX_LGLOCK_H
|
||||
|
||||
#include <pthread.h>
|
||||
|
||||
struct lglock {
|
||||
pthread_mutex_t lock;
|
||||
};
|
||||
|
||||
#define lg_lock_free(l) do {} while (0)
|
||||
#define lg_lock_init(l) pthread_mutex_init(&(l)->lock, NULL)
|
||||
|
||||
#define lg_local_lock(l) pthread_mutex_lock(&(l)->lock)
|
||||
#define lg_local_unlock(l) pthread_mutex_unlock(&(l)->lock)
|
||||
#define lg_global_lock(l) pthread_mutex_lock(&(l)->lock)
|
||||
#define lg_global_unlock(l) pthread_mutex_unlock(&(l)->lock)
|
||||
|
||||
#endif /* __TOOLS_LINUX_LGLOCK_H */
|
@ -1,117 +0,0 @@
|
||||
#ifndef _LINUX_LIST_NULLS_H
|
||||
#define _LINUX_LIST_NULLS_H
|
||||
|
||||
#include <linux/poison.h>
|
||||
#include <linux/const.h>
|
||||
|
||||
/*
|
||||
* Special version of lists, where end of list is not a NULL pointer,
|
||||
* but a 'nulls' marker, which can have many different values.
|
||||
* (up to 2^31 different values guaranteed on all platforms)
|
||||
*
|
||||
* In the standard hlist, termination of a list is the NULL pointer.
|
||||
* In this special 'nulls' variant, we use the fact that objects stored in
|
||||
* a list are aligned on a word (4 or 8 bytes alignment).
|
||||
* We therefore use the last significant bit of 'ptr' :
|
||||
* Set to 1 : This is a 'nulls' end-of-list marker (ptr >> 1)
|
||||
* Set to 0 : This is a pointer to some object (ptr)
|
||||
*/
|
||||
|
||||
struct hlist_nulls_head {
|
||||
struct hlist_nulls_node *first;
|
||||
};
|
||||
|
||||
struct hlist_nulls_node {
|
||||
struct hlist_nulls_node *next, **pprev;
|
||||
};
|
||||
#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
|
||||
#define INIT_HLIST_NULLS_HEAD(ptr, nulls) \
|
||||
((ptr)->first = (struct hlist_nulls_node *) NULLS_MARKER(nulls))
|
||||
|
||||
#define hlist_nulls_entry(ptr, type, member) container_of(ptr,type,member)
|
||||
/**
|
||||
* ptr_is_a_nulls - Test if a ptr is a nulls
|
||||
* @ptr: ptr to be tested
|
||||
*
|
||||
*/
|
||||
static inline int is_a_nulls(const struct hlist_nulls_node *ptr)
|
||||
{
|
||||
return ((unsigned long)ptr & 1);
|
||||
}
|
||||
|
||||
/**
|
||||
* get_nulls_value - Get the 'nulls' value of the end of chain
|
||||
* @ptr: end of chain
|
||||
*
|
||||
* Should be called only if is_a_nulls(ptr);
|
||||
*/
|
||||
static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
|
||||
{
|
||||
return ((unsigned long)ptr) >> 1;
|
||||
}
|
||||
|
||||
static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
|
||||
{
|
||||
return !h->pprev;
|
||||
}
|
||||
|
||||
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
|
||||
{
|
||||
return is_a_nulls(READ_ONCE(h->first));
|
||||
}
|
||||
|
||||
static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
|
||||
struct hlist_nulls_head *h)
|
||||
{
|
||||
struct hlist_nulls_node *first = h->first;
|
||||
|
||||
n->next = first;
|
||||
n->pprev = &h->first;
|
||||
h->first = n;
|
||||
if (!is_a_nulls(first))
|
||||
first->pprev = &n->next;
|
||||
}
|
||||
|
||||
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
|
||||
{
|
||||
struct hlist_nulls_node *next = n->next;
|
||||
struct hlist_nulls_node **pprev = n->pprev;
|
||||
|
||||
WRITE_ONCE(*pprev, next);
|
||||
if (!is_a_nulls(next))
|
||||
next->pprev = pprev;
|
||||
}
|
||||
|
||||
static inline void hlist_nulls_del(struct hlist_nulls_node *n)
|
||||
{
|
||||
__hlist_nulls_del(n);
|
||||
n->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_nulls_for_each_entry - iterate over list of given type
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*
|
||||
*/
|
||||
#define hlist_nulls_for_each_entry(tpos, pos, head, member) \
|
||||
for (pos = (head)->first; \
|
||||
(!is_a_nulls(pos)) && \
|
||||
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
|
||||
pos = pos->next)
|
||||
|
||||
/**
|
||||
* hlist_nulls_for_each_entry_from - iterate over a hlist continuing from current point
|
||||
* @tpos: the type * to use as a loop cursor.
|
||||
* @pos: the &struct hlist_node to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*
|
||||
*/
|
||||
#define hlist_nulls_for_each_entry_from(tpos, pos, member) \
|
||||
for (; (!is_a_nulls(pos)) && \
|
||||
({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1;}); \
|
||||
pos = pos->next)
|
||||
|
||||
#endif
|
@ -1 +0,0 @@
|
||||
#include <linux/slab.h>
|
@ -3,7 +3,6 @@
|
||||
|
||||
#include <linux/stat.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
struct module;
|
||||
@ -43,4 +42,7 @@ static inline void module_put(struct module *module)
|
||||
{
|
||||
}
|
||||
|
||||
#define module_param_named(name, value, type, perm)
|
||||
#define MODULE_PARM_DESC(_parm, desc)
|
||||
|
||||
#endif /* _LINUX_MODULE_H */
|
||||
|
@ -1,7 +0,0 @@
|
||||
#ifndef _LINUX_MODULE_PARAMS_H
|
||||
#define _LINUX_MODULE_PARAMS_H
|
||||
|
||||
#define module_param_named(name, value, type, perm)
|
||||
#define MODULE_PARM_DESC(_parm, desc)
|
||||
|
||||
#endif /* _LINUX_MODULE_PARAMS_H */
|
@ -1,20 +0,0 @@
|
||||
#ifndef _LINUX_PATH_H
|
||||
#define _LINUX_PATH_H
|
||||
|
||||
struct dentry;
|
||||
struct vfsmount;
|
||||
|
||||
struct path {
|
||||
struct vfsmount *mnt;
|
||||
struct dentry *dentry;
|
||||
};
|
||||
|
||||
extern void path_get(const struct path *);
|
||||
extern void path_put(const struct path *);
|
||||
|
||||
static inline int path_equal(const struct path *path1, const struct path *path2)
|
||||
{
|
||||
return path1->mnt == path2->mnt && path1->dentry == path2->dentry;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_PATH_H */
|
@ -1,6 +1,8 @@
|
||||
#ifndef __TOOLS_LINUX_PERCPU_H
|
||||
#define __TOOLS_LINUX_PERCPU_H
|
||||
|
||||
#include <linux/cpumask.h>
|
||||
|
||||
#define __percpu
|
||||
|
||||
#define free_percpu(percpu) free(percpu)
|
||||
|
@ -1,90 +0,0 @@
|
||||
#ifndef _LINUX_POISON_H
|
||||
#define _LINUX_POISON_H
|
||||
|
||||
/********** include/linux/list.h **********/
|
||||
|
||||
/*
|
||||
* Architectures might want to move the poison pointer offset
|
||||
* into some well-recognized area such as 0xdead000000000000,
|
||||
* that is also not mappable by user-space exploits:
|
||||
*/
|
||||
#ifdef CONFIG_ILLEGAL_POINTER_VALUE
|
||||
# define POISON_POINTER_DELTA _AC(CONFIG_ILLEGAL_POINTER_VALUE, UL)
|
||||
#else
|
||||
# define POISON_POINTER_DELTA 0
|
||||
#endif
|
||||
|
||||
/*
|
||||
* These are non-NULL pointers that will result in page faults
|
||||
* under normal circumstances, used to verify that nobody uses
|
||||
* non-initialized list entries.
|
||||
*/
|
||||
#define LIST_POISON1 ((void *) 0x100 + POISON_POINTER_DELTA)
|
||||
#define LIST_POISON2 ((void *) 0x200 + POISON_POINTER_DELTA)
|
||||
|
||||
/********** include/linux/timer.h **********/
|
||||
/*
|
||||
* Magic number "tsta" to indicate a static timer initializer
|
||||
* for the object debugging code.
|
||||
*/
|
||||
#define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA)
|
||||
|
||||
/********** mm/debug-pagealloc.c **********/
|
||||
#ifdef CONFIG_PAGE_POISONING_ZERO
|
||||
#define PAGE_POISON 0x00
|
||||
#else
|
||||
#define PAGE_POISON 0xaa
|
||||
#endif
|
||||
|
||||
/********** mm/page_alloc.c ************/
|
||||
|
||||
#define TAIL_MAPPING ((void *) 0x400 + POISON_POINTER_DELTA)
|
||||
|
||||
/********** mm/slab.c **********/
|
||||
/*
|
||||
* Magic nums for obj red zoning.
|
||||
* Placed in the first word before and the first word after an obj.
|
||||
*/
|
||||
#define RED_INACTIVE 0x09F911029D74E35BULL /* when obj is inactive */
|
||||
#define RED_ACTIVE 0xD84156C5635688C0ULL /* when obj is active */
|
||||
|
||||
#define SLUB_RED_INACTIVE 0xbb
|
||||
#define SLUB_RED_ACTIVE 0xcc
|
||||
|
||||
/* ...and for poisoning */
|
||||
#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
|
||||
#define POISON_FREE 0x6b /* for use-after-free poisoning */
|
||||
#define POISON_END 0xa5 /* end-byte of poisoning */
|
||||
|
||||
/********** arch/$ARCH/mm/init.c **********/
|
||||
#define POISON_FREE_INITMEM 0xcc
|
||||
|
||||
/********** arch/ia64/hp/common/sba_iommu.c **********/
|
||||
/*
|
||||
* arch/ia64/hp/common/sba_iommu.c uses a 16-byte poison string with a
|
||||
* value of "SBAIOMMU POISON\0" for spill-over poisoning.
|
||||
*/
|
||||
|
||||
/********** fs/jbd/journal.c **********/
|
||||
#define JBD_POISON_FREE 0x5b
|
||||
#define JBD2_POISON_FREE 0x5c
|
||||
|
||||
/********** drivers/base/dmapool.c **********/
|
||||
#define POOL_POISON_FREED 0xa7 /* !inuse */
|
||||
#define POOL_POISON_ALLOCATED 0xa9 /* !initted */
|
||||
|
||||
/********** drivers/atm/ **********/
|
||||
#define ATM_POISON_FREE 0x12
|
||||
#define ATM_POISON 0xdeadbeef
|
||||
|
||||
/********** kernel/mutexes **********/
|
||||
#define MUTEX_DEBUG_INIT 0x11
|
||||
#define MUTEX_DEBUG_FREE 0x22
|
||||
|
||||
/********** lib/flex_array.c **********/
|
||||
#define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */
|
||||
|
||||
/********** security/ **********/
|
||||
#define KEY_DESTROY 0xbd
|
||||
|
||||
#endif
|
@ -20,11 +20,9 @@
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/list_nulls.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/spinlock.h>
|
||||
@ -96,6 +94,8 @@ struct rhashtable_walker {
|
||||
struct bucket_table *tbl;
|
||||
};
|
||||
|
||||
#define NULLS_MARKER(value) (1UL | (((long)value) << 1))
|
||||
|
||||
static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
|
||||
{
|
||||
return NULLS_MARKER(ht->p.nulls_base + hash);
|
||||
@ -109,11 +109,6 @@ static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
|
||||
return ((unsigned long) ptr & 1);
|
||||
}
|
||||
|
||||
static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
|
||||
{
|
||||
return ((unsigned long) ptr) >> 1;
|
||||
}
|
||||
|
||||
static inline void *rht_obj(const struct rhashtable *ht,
|
||||
const struct rhash_head *he)
|
||||
{
|
||||
|
@ -1,10 +1,8 @@
|
||||
#ifndef _LINUX_SCATTERLIST_H
|
||||
#define _LINUX_SCATTERLIST_H
|
||||
|
||||
#include <linux/string.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct scatterlist {
|
||||
unsigned long page_link;
|
||||
|
@ -4,9 +4,6 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/fs.h>
|
||||
|
||||
struct seq_operations;
|
||||
struct path;
|
||||
|
||||
struct seq_file {
|
||||
char *buf;
|
||||
size_t size;
|
||||
@ -16,7 +13,6 @@ struct seq_file {
|
||||
loff_t index;
|
||||
loff_t read_pos;
|
||||
u64 version;
|
||||
const struct seq_operations *op;
|
||||
int poll_event;
|
||||
const struct file *file;
|
||||
void *private;
|
||||
|
@ -1,78 +1,18 @@
|
||||
#ifndef __LINUX_SEQLOCK_H
|
||||
#define __LINUX_SEQLOCK_H
|
||||
/*
|
||||
* Reader/writer consistent mechanism without starving writers. This type of
|
||||
* lock for data where the reader wants a consistent set of information
|
||||
* and is willing to retry if the information changes. There are two types
|
||||
* of readers:
|
||||
* 1. Sequence readers which never block a writer but they may have to retry
|
||||
* if a writer is in progress by detecting change in sequence number.
|
||||
* Writers do not wait for a sequence reader.
|
||||
* 2. Locking readers which will wait if a writer or another locking reader
|
||||
* is in progress. A locking reader in progress will also block a writer
|
||||
* from going forward. Unlike the regular rwlock, the read lock here is
|
||||
* exclusive so that only one locking reader can get it.
|
||||
*
|
||||
* This is not as cache friendly as brlock. Also, this may not work well
|
||||
* for data that contains pointers, because any writer could
|
||||
* invalidate a pointer that a reader was following.
|
||||
*
|
||||
* Expected non-blocking reader usage:
|
||||
* do {
|
||||
* seq = read_seqbegin(&foo);
|
||||
* ...
|
||||
* } while (read_seqretry(&foo, seq));
|
||||
*
|
||||
*
|
||||
* On non-SMP the spin locks disappear but the writer still needs
|
||||
* to increment the sequence variables because an interrupt routine could
|
||||
* change the state of the data.
|
||||
*
|
||||
* Based on x86_64 vsyscall gettimeofday
|
||||
* by Keith Owens and Andrea Arcangeli
|
||||
*/
|
||||
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
/*
|
||||
* Version using sequence counter only.
|
||||
* This can be used when code has its own mutex protecting the
|
||||
* updating starting before the write_seqcountbeqin() and ending
|
||||
* after the write_seqcount_end().
|
||||
*/
|
||||
typedef struct seqcount {
|
||||
unsigned sequence;
|
||||
} seqcount_t;
|
||||
|
||||
static inline void __seqcount_init(seqcount_t *s, const char *name,
|
||||
struct lock_class_key *key)
|
||||
static inline void seqcount_init(seqcount_t *s)
|
||||
{
|
||||
s->sequence = 0;
|
||||
}
|
||||
|
||||
# define SEQCOUNT_DEP_MAP_INIT(lockname)
|
||||
# define seqcount_init(s) __seqcount_init(s, NULL, NULL)
|
||||
# define seqcount_lockdep_reader_access(x)
|
||||
|
||||
#define SEQCNT_ZERO(lockname) { .sequence = 0, SEQCOUNT_DEP_MAP_INIT(lockname)}
|
||||
|
||||
|
||||
/**
|
||||
* __read_seqcount_begin - begin a seq-read critical section (without barrier)
|
||||
* @s: pointer to seqcount_t
|
||||
* Returns: count to be passed to read_seqcount_retry
|
||||
*
|
||||
* __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb()
|
||||
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
|
||||
* provided before actually loading any of the variables that are to be
|
||||
* protected in this critical section.
|
||||
*
|
||||
* Use carefully, only in critical code, and comment how the barrier is
|
||||
* provided.
|
||||
*/
|
||||
static inline unsigned __read_seqcount_begin(const seqcount_t *s)
|
||||
static inline unsigned read_seqcount_begin(const seqcount_t *s)
|
||||
{
|
||||
unsigned ret;
|
||||
|
||||
@ -82,486 +22,26 @@ repeat:
|
||||
cpu_relax();
|
||||
goto repeat;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* raw_read_seqcount - Read the raw seqcount
|
||||
* @s: pointer to seqcount_t
|
||||
* Returns: count to be passed to read_seqcount_retry
|
||||
*
|
||||
* raw_read_seqcount opens a read critical section of the given
|
||||
* seqcount without any lockdep checking and without checking or
|
||||
* masking the LSB. Calling code is responsible for handling that.
|
||||
*/
|
||||
static inline unsigned raw_read_seqcount(const seqcount_t *s)
|
||||
{
|
||||
unsigned ret = READ_ONCE(s->sequence);
|
||||
smp_rmb();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* raw_read_seqcount_begin - start seq-read critical section w/o lockdep
|
||||
* @s: pointer to seqcount_t
|
||||
* Returns: count to be passed to read_seqcount_retry
|
||||
*
|
||||
* raw_read_seqcount_begin opens a read critical section of the given
|
||||
* seqcount, but without any lockdep checking. Validity of the critical
|
||||
* section is tested by checking read_seqcount_retry function.
|
||||
*/
|
||||
static inline unsigned raw_read_seqcount_begin(const seqcount_t *s)
|
||||
{
|
||||
unsigned ret = __read_seqcount_begin(s);
|
||||
smp_rmb();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* read_seqcount_begin - begin a seq-read critical section
|
||||
* @s: pointer to seqcount_t
|
||||
* Returns: count to be passed to read_seqcount_retry
|
||||
*
|
||||
* read_seqcount_begin opens a read critical section of the given seqcount.
|
||||
* Validity of the critical section is tested by checking read_seqcount_retry
|
||||
* function.
|
||||
*/
|
||||
static inline unsigned read_seqcount_begin(const seqcount_t *s)
|
||||
{
|
||||
seqcount_lockdep_reader_access(s);
|
||||
return raw_read_seqcount_begin(s);
|
||||
}
|
||||
|
||||
/**
|
||||
* raw_seqcount_begin - begin a seq-read critical section
|
||||
* @s: pointer to seqcount_t
|
||||
* Returns: count to be passed to read_seqcount_retry
|
||||
*
|
||||
* raw_seqcount_begin opens a read critical section of the given seqcount.
|
||||
* Validity of the critical section is tested by checking read_seqcount_retry
|
||||
* function.
|
||||
*
|
||||
* Unlike read_seqcount_begin(), this function will not wait for the count
|
||||
* to stabilize. If a writer is active when we begin, we will fail the
|
||||
* read_seqcount_retry() instead of stabilizing at the beginning of the
|
||||
* critical section.
|
||||
*/
|
||||
static inline unsigned raw_seqcount_begin(const seqcount_t *s)
|
||||
{
|
||||
unsigned ret = READ_ONCE(s->sequence);
|
||||
smp_rmb();
|
||||
return ret & ~1;
|
||||
}
|
||||
|
||||
/**
|
||||
* __read_seqcount_retry - end a seq-read critical section (without barrier)
|
||||
* @s: pointer to seqcount_t
|
||||
* @start: count, from read_seqcount_begin
|
||||
* Returns: 1 if retry is required, else 0
|
||||
*
|
||||
* __read_seqcount_retry is like read_seqcount_retry, but has no smp_rmb()
|
||||
* barrier. Callers should ensure that smp_rmb() or equivalent ordering is
|
||||
* provided before actually loading any of the variables that are to be
|
||||
* protected in this critical section.
|
||||
*
|
||||
* Use carefully, only in critical code, and comment how the barrier is
|
||||
* provided.
|
||||
*/
|
||||
static inline int __read_seqcount_retry(const seqcount_t *s, unsigned start)
|
||||
{
|
||||
return unlikely(s->sequence != start);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_seqcount_retry - end a seq-read critical section
|
||||
* @s: pointer to seqcount_t
|
||||
* @start: count, from read_seqcount_begin
|
||||
* Returns: 1 if retry is required, else 0
|
||||
*
|
||||
* read_seqcount_retry closes a read critical section of the given seqcount.
|
||||
* If the critical section was invalid, it must be ignored (and typically
|
||||
* retried).
|
||||
*/
|
||||
static inline int read_seqcount_retry(const seqcount_t *s, unsigned start)
|
||||
{
|
||||
smp_rmb();
|
||||
return __read_seqcount_retry(s, start);
|
||||
}
|
||||
|
||||
|
||||
|
||||
static inline void raw_write_seqcount_begin(seqcount_t *s)
|
||||
{
|
||||
s->sequence++;
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static inline void raw_write_seqcount_end(seqcount_t *s)
|
||||
{
|
||||
smp_wmb();
|
||||
s->sequence++;
|
||||
}
|
||||
|
||||
/**
|
||||
* raw_write_seqcount_barrier - do a seq write barrier
|
||||
* @s: pointer to seqcount_t
|
||||
*
|
||||
* This can be used to provide an ordering guarantee instead of the
|
||||
* usual consistency guarantee. It is one wmb cheaper, because we can
|
||||
* collapse the two back-to-back wmb()s.
|
||||
*
|
||||
* seqcount_t seq;
|
||||
* bool X = true, Y = false;
|
||||
*
|
||||
* void read(void)
|
||||
* {
|
||||
* bool x, y;
|
||||
*
|
||||
* do {
|
||||
* int s = read_seqcount_begin(&seq);
|
||||
*
|
||||
* x = X; y = Y;
|
||||
*
|
||||
* } while (read_seqcount_retry(&seq, s));
|
||||
*
|
||||
* BUG_ON(!x && !y);
|
||||
* }
|
||||
*
|
||||
* void write(void)
|
||||
* {
|
||||
* Y = true;
|
||||
*
|
||||
* raw_write_seqcount_barrier(seq);
|
||||
*
|
||||
* X = false;
|
||||
* }
|
||||
*/
|
||||
static inline void raw_write_seqcount_barrier(seqcount_t *s)
|
||||
{
|
||||
s->sequence++;
|
||||
smp_wmb();
|
||||
s->sequence++;
|
||||
}
|
||||
|
||||
static inline int raw_read_seqcount_latch(seqcount_t *s)
|
||||
{
|
||||
int seq = READ_ONCE(s->sequence);
|
||||
/* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */
|
||||
smp_read_barrier_depends();
|
||||
return seq;
|
||||
}
|
||||
|
||||
/**
|
||||
* raw_write_seqcount_latch - redirect readers to even/odd copy
|
||||
* @s: pointer to seqcount_t
|
||||
*
|
||||
* The latch technique is a multiversion concurrency control method that allows
|
||||
* queries during non-atomic modifications. If you can guarantee queries never
|
||||
* interrupt the modification -- e.g. the concurrency is strictly between CPUs
|
||||
* -- you most likely do not need this.
|
||||
*
|
||||
* Where the traditional RCU/lockless data structures rely on atomic
|
||||
* modifications to ensure queries observe either the old or the new state the
|
||||
* latch allows the same for non-atomic updates. The trade-off is doubling the
|
||||
* cost of storage; we have to maintain two copies of the entire data
|
||||
* structure.
|
||||
*
|
||||
* Very simply put: we first modify one copy and then the other. This ensures
|
||||
* there is always one copy in a stable state, ready to give us an answer.
|
||||
*
|
||||
* The basic form is a data structure like:
|
||||
*
|
||||
* struct latch_struct {
|
||||
* seqcount_t seq;
|
||||
* struct data_struct data[2];
|
||||
* };
|
||||
*
|
||||
* Where a modification, which is assumed to be externally serialized, does the
|
||||
* following:
|
||||
*
|
||||
* void latch_modify(struct latch_struct *latch, ...)
|
||||
* {
|
||||
* smp_wmb(); <- Ensure that the last data[1] update is visible
|
||||
* latch->seq++;
|
||||
* smp_wmb(); <- Ensure that the seqcount update is visible
|
||||
*
|
||||
* modify(latch->data[0], ...);
|
||||
*
|
||||
* smp_wmb(); <- Ensure that the data[0] update is visible
|
||||
* latch->seq++;
|
||||
* smp_wmb(); <- Ensure that the seqcount update is visible
|
||||
*
|
||||
* modify(latch->data[1], ...);
|
||||
* }
|
||||
*
|
||||
* The query will have a form like:
|
||||
*
|
||||
* struct entry *latch_query(struct latch_struct *latch, ...)
|
||||
* {
|
||||
* struct entry *entry;
|
||||
* unsigned seq, idx;
|
||||
*
|
||||
* do {
|
||||
* seq = raw_read_seqcount_latch(&latch->seq);
|
||||
*
|
||||
* idx = seq & 0x01;
|
||||
* entry = data_query(latch->data[idx], ...);
|
||||
*
|
||||
* smp_rmb();
|
||||
* } while (seq != latch->seq);
|
||||
*
|
||||
* return entry;
|
||||
* }
|
||||
*
|
||||
* So during the modification, queries are first redirected to data[1]. Then we
|
||||
* modify data[0]. When that is complete, we redirect queries back to data[0]
|
||||
* and we can modify data[1].
|
||||
*
|
||||
* NOTE: The non-requirement for atomic modifications does _NOT_ include
|
||||
* the publishing of new entries in the case where data is a dynamic
|
||||
* data structure.
|
||||
*
|
||||
* An iteration might start in data[0] and get suspended long enough
|
||||
* to miss an entire modification sequence, once it resumes it might
|
||||
* observe the new entry.
|
||||
*
|
||||
* NOTE: When data is a dynamic data structure; one should use regular RCU
|
||||
* patterns to manage the lifetimes of the objects within.
|
||||
*/
|
||||
static inline void raw_write_seqcount_latch(seqcount_t *s)
|
||||
{
|
||||
smp_wmb(); /* prior stores before incrementing "sequence" */
|
||||
s->sequence++;
|
||||
smp_wmb(); /* increment "sequence" before following stores */
|
||||
}
|
||||
|
||||
/*
|
||||
* Sequence counter only version assumes that callers are using their
|
||||
* own mutexing.
|
||||
*/
|
||||
static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass)
|
||||
{
|
||||
raw_write_seqcount_begin(s);
|
||||
return unlikely(s->sequence != start);
|
||||
}
|
||||
|
||||
static inline void write_seqcount_begin(seqcount_t *s)
|
||||
{
|
||||
write_seqcount_begin_nested(s, 0);
|
||||
s->sequence++;
|
||||
smp_wmb();
|
||||
}
|
||||
|
||||
static inline void write_seqcount_end(seqcount_t *s)
|
||||
{
|
||||
raw_write_seqcount_end(s);
|
||||
}
|
||||
|
||||
/**
|
||||
* write_seqcount_invalidate - invalidate in-progress read-side seq operations
|
||||
* @s: pointer to seqcount_t
|
||||
*
|
||||
* After write_seqcount_invalidate, no read-side seq operations will complete
|
||||
* successfully and see data older than this.
|
||||
*/
|
||||
static inline void write_seqcount_invalidate(seqcount_t *s)
|
||||
{
|
||||
smp_wmb();
|
||||
s->sequence+=2;
|
||||
s->sequence++;
|
||||
}
|
||||
|
||||
typedef struct {
|
||||
struct seqcount seqcount;
|
||||
spinlock_t lock;
|
||||
} seqlock_t;
|
||||
|
||||
/*
|
||||
* These macros triggered gcc-3.x compile-time problems. We think these are
|
||||
* OK now. Be cautious.
|
||||
*/
|
||||
#define __SEQLOCK_UNLOCKED(lockname) \
|
||||
{ \
|
||||
.seqcount = SEQCNT_ZERO(lockname), \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(lockname) \
|
||||
}
|
||||
|
||||
#define seqlock_init(x) \
|
||||
do { \
|
||||
seqcount_init(&(x)->seqcount); \
|
||||
spin_lock_init(&(x)->lock); \
|
||||
} while (0)
|
||||
|
||||
#define DEFINE_SEQLOCK(x) \
|
||||
seqlock_t x = __SEQLOCK_UNLOCKED(x)
|
||||
|
||||
/*
|
||||
* Read side functions for starting and finalizing a read side section.
|
||||
*/
|
||||
static inline unsigned read_seqbegin(const seqlock_t *sl)
|
||||
{
|
||||
return read_seqcount_begin(&sl->seqcount);
|
||||
}
|
||||
|
||||
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
|
||||
{
|
||||
return read_seqcount_retry(&sl->seqcount, start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lock out other writers and update the count.
|
||||
* Acts like a normal spin_lock/unlock.
|
||||
* Don't need preempt_disable() because that is in the spin_lock already.
|
||||
*/
|
||||
static inline void write_seqlock(seqlock_t *sl)
|
||||
{
|
||||
spin_lock(&sl->lock);
|
||||
write_seqcount_begin(&sl->seqcount);
|
||||
}
|
||||
|
||||
static inline void write_sequnlock(seqlock_t *sl)
|
||||
{
|
||||
write_seqcount_end(&sl->seqcount);
|
||||
spin_unlock(&sl->lock);
|
||||
}
|
||||
|
||||
static inline void write_seqlock_bh(seqlock_t *sl)
|
||||
{
|
||||
spin_lock_bh(&sl->lock);
|
||||
write_seqcount_begin(&sl->seqcount);
|
||||
}
|
||||
|
||||
static inline void write_sequnlock_bh(seqlock_t *sl)
|
||||
{
|
||||
write_seqcount_end(&sl->seqcount);
|
||||
spin_unlock_bh(&sl->lock);
|
||||
}
|
||||
|
||||
static inline void write_seqlock_irq(seqlock_t *sl)
|
||||
{
|
||||
spin_lock_irq(&sl->lock);
|
||||
write_seqcount_begin(&sl->seqcount);
|
||||
}
|
||||
|
||||
static inline void write_sequnlock_irq(seqlock_t *sl)
|
||||
{
|
||||
write_seqcount_end(&sl->seqcount);
|
||||
spin_unlock_irq(&sl->lock);
|
||||
}
|
||||
|
||||
static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sl->lock, flags);
|
||||
write_seqcount_begin(&sl->seqcount);
|
||||
return flags;
|
||||
}
|
||||
|
||||
#define write_seqlock_irqsave(lock, flags) \
|
||||
do { flags = __write_seqlock_irqsave(lock); } while (0)
|
||||
|
||||
static inline void
|
||||
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
|
||||
{
|
||||
write_seqcount_end(&sl->seqcount);
|
||||
spin_unlock_irqrestore(&sl->lock, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* A locking reader exclusively locks out other writers and locking readers,
|
||||
* but doesn't update the sequence number. Acts like a normal spin_lock/unlock.
|
||||
* Don't need preempt_disable() because that is in the spin_lock already.
|
||||
*/
|
||||
static inline void read_seqlock_excl(seqlock_t *sl)
|
||||
{
|
||||
spin_lock(&sl->lock);
|
||||
}
|
||||
|
||||
static inline void read_sequnlock_excl(seqlock_t *sl)
|
||||
{
|
||||
spin_unlock(&sl->lock);
|
||||
}
|
||||
|
||||
/**
|
||||
* read_seqbegin_or_lock - begin a sequence number check or locking block
|
||||
* @lock: sequence lock
|
||||
* @seq : sequence number to be checked
|
||||
*
|
||||
* First try it once optimistically without taking the lock. If that fails,
|
||||
* take the lock. The sequence number is also used as a marker for deciding
|
||||
* whether to be a reader (even) or writer (odd).
|
||||
* N.B. seq must be initialized to an even number to begin with.
|
||||
*/
|
||||
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
|
||||
{
|
||||
if (!(*seq & 1)) /* Even */
|
||||
*seq = read_seqbegin(lock);
|
||||
else /* Odd */
|
||||
read_seqlock_excl(lock);
|
||||
}
|
||||
|
||||
static inline int need_seqretry(seqlock_t *lock, int seq)
|
||||
{
|
||||
return !(seq & 1) && read_seqretry(lock, seq);
|
||||
}
|
||||
|
||||
static inline void done_seqretry(seqlock_t *lock, int seq)
|
||||
{
|
||||
if (seq & 1)
|
||||
read_sequnlock_excl(lock);
|
||||
}
|
||||
|
||||
static inline void read_seqlock_excl_bh(seqlock_t *sl)
|
||||
{
|
||||
spin_lock_bh(&sl->lock);
|
||||
}
|
||||
|
||||
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
|
||||
{
|
||||
spin_unlock_bh(&sl->lock);
|
||||
}
|
||||
|
||||
static inline void read_seqlock_excl_irq(seqlock_t *sl)
|
||||
{
|
||||
spin_lock_irq(&sl->lock);
|
||||
}
|
||||
|
||||
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
|
||||
{
|
||||
spin_unlock_irq(&sl->lock);
|
||||
}
|
||||
|
||||
static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&sl->lock, flags);
|
||||
return flags;
|
||||
}
|
||||
|
||||
#define read_seqlock_excl_irqsave(lock, flags) \
|
||||
do { flags = __read_seqlock_excl_irqsave(lock); } while (0)
|
||||
|
||||
static inline void
|
||||
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
|
||||
{
|
||||
spin_unlock_irqrestore(&sl->lock, flags);
|
||||
}
|
||||
|
||||
static inline unsigned long
|
||||
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
|
||||
{
|
||||
unsigned long flags = 0;
|
||||
|
||||
if (!(*seq & 1)) /* Even */
|
||||
*seq = read_seqbegin(lock);
|
||||
else /* Odd */
|
||||
read_seqlock_excl_irqsave(lock, flags);
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static inline void
|
||||
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
|
||||
{
|
||||
if (seq & 1)
|
||||
read_sequnlock_excl_irqrestore(lock, flags);
|
||||
}
|
||||
#endif /* __LINUX_SEQLOCK_H */
|
||||
|
@ -1,10 +1,13 @@
|
||||
#ifndef _LINUX_SORT_H
|
||||
#define _LINUX_SORT_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
void sort(void *base, size_t num, size_t size,
|
||||
int (*cmp)(const void *, const void *),
|
||||
void (*swap)(void *, void *, int));
|
||||
static inline void sort(void *base, size_t num, size_t size,
|
||||
int (*cmp_func)(const void *, const void *),
|
||||
void (*swap_func)(void *, void *, int size))
|
||||
{
|
||||
return qsort(base, num, size, cmp_func);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <linux/types.h> /* for size_t */
|
||||
|
||||
extern size_t strlcpy(char *dest, const char *src, size_t size);
|
||||
extern char *skip_spaces(const char *);
|
||||
extern char *strim(char *);
|
||||
extern void memzero_explicit(void *, size_t);
|
||||
int match_string(const char * const *, size_t, const char *);
|
||||
|
@ -1,12 +0,0 @@
|
||||
#ifndef __LINUX_STRINGIFY_H
|
||||
#define __LINUX_STRINGIFY_H
|
||||
|
||||
/* Indirect stringification. Doing two levels allows the parameter to be a
|
||||
* macro itself. For example, compile with -DFOO=bar, __stringify(FOO)
|
||||
* converts to "bar".
|
||||
*/
|
||||
|
||||
#define __stringify_1(x...) #x
|
||||
#define __stringify(x...) __stringify_1(x)
|
||||
|
||||
#endif /* !__LINUX_STRINGIFY_H */
|
@ -2,7 +2,6 @@
|
||||
#define _SYSFS_H_
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
struct kobject;
|
||||
|
||||
@ -11,12 +10,6 @@ struct attribute {
|
||||
umode_t mode;
|
||||
};
|
||||
|
||||
#define __ATTR(_name, _mode, _show, _store) { \
|
||||
.attr = {.name = __stringify(_name), .mode = _mode }, \
|
||||
.show = _show, \
|
||||
.store = _store, \
|
||||
}
|
||||
|
||||
struct sysfs_ops {
|
||||
ssize_t (*show)(struct kobject *, struct attribute *, char *);
|
||||
ssize_t (*store)(struct kobject *, struct attribute *, const char *, size_t);
|
||||
|
@ -1,67 +0,0 @@
|
||||
#ifndef _LINUX_UNALIGNED_ACCESS_OK_H
|
||||
#define _LINUX_UNALIGNED_ACCESS_OK_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
static __always_inline u16 get_unaligned_le16(const void *p)
|
||||
{
|
||||
return le16_to_cpup((__le16 *)p);
|
||||
}
|
||||
|
||||
static __always_inline u32 get_unaligned_le32(const void *p)
|
||||
{
|
||||
return le32_to_cpup((__le32 *)p);
|
||||
}
|
||||
|
||||
static __always_inline u64 get_unaligned_le64(const void *p)
|
||||
{
|
||||
return le64_to_cpup((__le64 *)p);
|
||||
}
|
||||
|
||||
static __always_inline u16 get_unaligned_be16(const void *p)
|
||||
{
|
||||
return be16_to_cpup((__be16 *)p);
|
||||
}
|
||||
|
||||
static __always_inline u32 get_unaligned_be32(const void *p)
|
||||
{
|
||||
return be32_to_cpup((__be32 *)p);
|
||||
}
|
||||
|
||||
static __always_inline u64 get_unaligned_be64(const void *p)
|
||||
{
|
||||
return be64_to_cpup((__be64 *)p);
|
||||
}
|
||||
|
||||
static __always_inline void put_unaligned_le16(u16 val, void *p)
|
||||
{
|
||||
*((__le16 *)p) = cpu_to_le16(val);
|
||||
}
|
||||
|
||||
static __always_inline void put_unaligned_le32(u32 val, void *p)
|
||||
{
|
||||
*((__le32 *)p) = cpu_to_le32(val);
|
||||
}
|
||||
|
||||
static __always_inline void put_unaligned_le64(u64 val, void *p)
|
||||
{
|
||||
*((__le64 *)p) = cpu_to_le64(val);
|
||||
}
|
||||
|
||||
static __always_inline void put_unaligned_be16(u16 val, void *p)
|
||||
{
|
||||
*((__be16 *)p) = cpu_to_be16(val);
|
||||
}
|
||||
|
||||
static __always_inline void put_unaligned_be32(u32 val, void *p)
|
||||
{
|
||||
*((__be32 *)p) = cpu_to_be32(val);
|
||||
}
|
||||
|
||||
static __always_inline void put_unaligned_be64(u64 val, void *p)
|
||||
{
|
||||
*((__be64 *)p) = cpu_to_be64(val);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_UNALIGNED_ACCESS_OK_H */
|
@ -1,36 +0,0 @@
|
||||
#ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H
|
||||
#define _LINUX_UNALIGNED_BE_MEMMOVE_H
|
||||
|
||||
#include <linux/unaligned/memmove.h>
|
||||
|
||||
static inline u16 get_unaligned_be16(const void *p)
|
||||
{
|
||||
return __get_unaligned_memmove16((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_be32(const void *p)
|
||||
{
|
||||
return __get_unaligned_memmove32((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_be64(const void *p)
|
||||
{
|
||||
return __get_unaligned_memmove64((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be16(u16 val, void *p)
|
||||
{
|
||||
__put_unaligned_memmove16(val, p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be32(u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_memmove32(val, p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_be64(u64 val, void *p)
|
||||
{
|
||||
__put_unaligned_memmove64(val, p);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
|
@ -1,36 +0,0 @@
|
||||
#ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H
|
||||
#define _LINUX_UNALIGNED_LE_MEMMOVE_H
|
||||
|
||||
#include <linux/unaligned/memmove.h>
|
||||
|
||||
static inline u16 get_unaligned_le16(const void *p)
|
||||
{
|
||||
return __get_unaligned_memmove16((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline u32 get_unaligned_le32(const void *p)
|
||||
{
|
||||
return __get_unaligned_memmove32((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline u64 get_unaligned_le64(const void *p)
|
||||
{
|
||||
return __get_unaligned_memmove64((const u8 *)p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le16(u16 val, void *p)
|
||||
{
|
||||
__put_unaligned_memmove16(val, p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le32(u32 val, void *p)
|
||||
{
|
||||
__put_unaligned_memmove32(val, p);
|
||||
}
|
||||
|
||||
static inline void put_unaligned_le64(u64 val, void *p)
|
||||
{
|
||||
__put_unaligned_memmove64(val, p);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */
|
@ -1,45 +0,0 @@
|
||||
#ifndef _LINUX_UNALIGNED_MEMMOVE_H
|
||||
#define _LINUX_UNALIGNED_MEMMOVE_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
/* Use memmove here, so gcc does not insert a __builtin_memcpy. */
|
||||
|
||||
static inline u16 __get_unaligned_memmove16(const void *p)
|
||||
{
|
||||
u16 tmp;
|
||||
memmove(&tmp, p, 2);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static inline u32 __get_unaligned_memmove32(const void *p)
|
||||
{
|
||||
u32 tmp;
|
||||
memmove(&tmp, p, 4);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static inline u64 __get_unaligned_memmove64(const void *p)
|
||||
{
|
||||
u64 tmp;
|
||||
memmove(&tmp, p, 8);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static inline void __put_unaligned_memmove16(u16 val, void *p)
|
||||
{
|
||||
memmove(p, &val, 2);
|
||||
}
|
||||
|
||||
static inline void __put_unaligned_memmove32(u32 val, void *p)
|
||||
{
|
||||
memmove(p, &val, 4);
|
||||
}
|
||||
|
||||
static inline void __put_unaligned_memmove64(u64 val, void *p)
|
||||
{
|
||||
memmove(p, &val, 8);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_UNALIGNED_MEMMOVE_H */
|
@ -16,8 +16,30 @@
|
||||
#ifndef _LINUX_UUID_H_
|
||||
#define _LINUX_UUID_H_
|
||||
|
||||
#include <uapi/linux/uuid.h>
|
||||
#include <string.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
typedef struct {
|
||||
__u8 b[16];
|
||||
} uuid_le;
|
||||
|
||||
typedef struct {
|
||||
__u8 b[16];
|
||||
} uuid_be;
|
||||
|
||||
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
|
||||
((uuid_le) \
|
||||
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
|
||||
(b) & 0xff, ((b) >> 8) & 0xff, \
|
||||
(c) & 0xff, ((c) >> 8) & 0xff, \
|
||||
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
|
||||
|
||||
#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
|
||||
((uuid_be) \
|
||||
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
|
||||
((b) >> 8) & 0xff, (b) & 0xff, \
|
||||
((c) >> 8) & 0xff, (c) & 0xff, \
|
||||
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
|
||||
|
||||
static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
|
||||
{
|
||||
|
@ -1,108 +0,0 @@
|
||||
/* zutil.h -- internal interface and configuration of the compression library
|
||||
* Copyright (C) 1995-1998 Jean-loup Gailly.
|
||||
* For conditions of distribution and use, see copyright notice in zlib.h
|
||||
*/
|
||||
|
||||
/* WARNING: this file should *not* be used by applications. It is
|
||||
part of the implementation of the compression library and is
|
||||
subject to change. Applications should only use zlib.h.
|
||||
*/
|
||||
|
||||
/* @(#) $Id: zutil.h,v 1.1 2000/01/01 03:32:23 davem Exp $ */
|
||||
|
||||
#ifndef _Z_UTIL_H
|
||||
#define _Z_UTIL_H
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <linux/zlib.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
typedef unsigned char uch;
|
||||
typedef unsigned short ush;
|
||||
typedef unsigned long ulg;
|
||||
|
||||
/* common constants */
|
||||
|
||||
#define STORED_BLOCK 0
|
||||
#define STATIC_TREES 1
|
||||
#define DYN_TREES 2
|
||||
/* The three kinds of block type */
|
||||
|
||||
#define MIN_MATCH 3
|
||||
#define MAX_MATCH 258
|
||||
/* The minimum and maximum match lengths */
|
||||
|
||||
#define PRESET_DICT 0x20 /* preset dictionary flag in zlib header */
|
||||
|
||||
/* target dependencies */
|
||||
|
||||
/* Common defaults */
|
||||
|
||||
#ifndef OS_CODE
|
||||
# define OS_CODE 0x03 /* assume Unix */
|
||||
#endif
|
||||
|
||||
/* functions */
|
||||
|
||||
typedef uLong (*check_func) (uLong check, const Byte *buf,
|
||||
uInt len);
|
||||
|
||||
|
||||
/* checksum functions */
|
||||
|
||||
#define BASE 65521L /* largest prime smaller than 65536 */
|
||||
#define NMAX 5552
|
||||
/* NMAX is the largest n such that 255n(n+1)/2 + (n+1)(BASE-1) <= 2^32-1 */
|
||||
|
||||
#define DO1(buf,i) {s1 += buf[i]; s2 += s1;}
|
||||
#define DO2(buf,i) DO1(buf,i); DO1(buf,i+1);
|
||||
#define DO4(buf,i) DO2(buf,i); DO2(buf,i+2);
|
||||
#define DO8(buf,i) DO4(buf,i); DO4(buf,i+4);
|
||||
#define DO16(buf) DO8(buf,0); DO8(buf,8);
|
||||
|
||||
/* ========================================================================= */
|
||||
/*
|
||||
Update a running Adler-32 checksum with the bytes buf[0..len-1] and
|
||||
return the updated checksum. If buf is NULL, this function returns
|
||||
the required initial value for the checksum.
|
||||
An Adler-32 checksum is almost as reliable as a CRC32 but can be computed
|
||||
much faster. Usage example:
|
||||
|
||||
uLong adler = zlib_adler32(0L, NULL, 0);
|
||||
|
||||
while (read_buffer(buffer, length) != EOF) {
|
||||
adler = zlib_adler32(adler, buffer, length);
|
||||
}
|
||||
if (adler != original_adler) error();
|
||||
*/
|
||||
static inline uLong zlib_adler32(uLong adler,
|
||||
const Byte *buf,
|
||||
uInt len)
|
||||
{
|
||||
unsigned long s1 = adler & 0xffff;
|
||||
unsigned long s2 = (adler >> 16) & 0xffff;
|
||||
int k;
|
||||
|
||||
if (buf == NULL) return 1L;
|
||||
|
||||
while (len > 0) {
|
||||
k = len < NMAX ? len : NMAX;
|
||||
len -= k;
|
||||
while (k >= 16) {
|
||||
DO16(buf);
|
||||
buf += 16;
|
||||
k -= 16;
|
||||
}
|
||||
if (k != 0) do {
|
||||
s1 += *buf++;
|
||||
s2 += s1;
|
||||
} while (--k);
|
||||
s1 %= BASE;
|
||||
s2 %= BASE;
|
||||
}
|
||||
return (s2 << 16) | s1;
|
||||
}
|
||||
|
||||
#endif /* _Z_UTIL_H */
|
@ -1,53 +0,0 @@
|
||||
/*
|
||||
* UUID/GUID definition
|
||||
*
|
||||
* Copyright (C) 2010, Intel Corp.
|
||||
* Huang Ying <ying.huang@intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License version
|
||||
* 2 as published by the Free Software Foundation;
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _UAPI_LINUX_UUID_H_
|
||||
#define _UAPI_LINUX_UUID_H_
|
||||
|
||||
#include <asm/types.h>
|
||||
|
||||
typedef struct {
|
||||
__u8 b[16];
|
||||
} uuid_le;
|
||||
|
||||
typedef struct {
|
||||
__u8 b[16];
|
||||
} uuid_be;
|
||||
|
||||
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
|
||||
((uuid_le) \
|
||||
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
|
||||
(b) & 0xff, ((b) >> 8) & 0xff, \
|
||||
(c) & 0xff, ((c) >> 8) & 0xff, \
|
||||
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
|
||||
|
||||
#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
|
||||
((uuid_be) \
|
||||
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
|
||||
((b) >> 8) & 0xff, (b) & 0xff, \
|
||||
((c) >> 8) & 0xff, (c) & 0xff, \
|
||||
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
|
||||
|
||||
#define NULL_UUID_LE \
|
||||
UUID_LE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
|
||||
0x00, 0x00, 0x00, 0x00)
|
||||
|
||||
#define NULL_UUID_BE \
|
||||
UUID_BE(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, \
|
||||
0x00, 0x00, 0x00, 0x00)
|
||||
|
||||
|
||||
#endif /* _UAPI_LINUX_UUID_H_ */
|
@ -1,37 +0,0 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/bitrev.h>
|
||||
|
||||
const u8 byte_rev_table[256] = {
|
||||
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
|
||||
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
|
||||
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
|
||||
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
|
||||
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
|
||||
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
|
||||
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
|
||||
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
|
||||
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
|
||||
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
|
||||
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
|
||||
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
|
||||
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
|
||||
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
|
||||
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
|
||||
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
|
||||
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
|
||||
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
|
||||
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
|
||||
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
|
||||
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
|
||||
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
|
||||
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
|
||||
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
|
||||
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
|
||||
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
|
||||
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
|
||||
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
|
||||
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
|
||||
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
|
||||
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
|
||||
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff,
|
||||
};
|
@ -12,10 +12,8 @@
|
||||
* If -E is returned, result is not touched.
|
||||
*/
|
||||
#include <errno.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <ctype.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/types.h>
|
||||
#include "kstrtox.h"
|
||||
|
||||
@ -71,7 +69,7 @@ unsigned int _parse_integer(const char *s, unsigned int base, unsigned long long
|
||||
* it in the max base we support (16)
|
||||
*/
|
||||
if (unlikely(res & (~0ull << 60))) {
|
||||
if (res > div_u64(ULLONG_MAX - val, base))
|
||||
if (res > ULLONG_MAX - val / base)
|
||||
overflow = 1;
|
||||
}
|
||||
res = res * base + val;
|
||||
@ -126,7 +124,6 @@ int kstrtoull(const char *s, unsigned int base, unsigned long long *res)
|
||||
s++;
|
||||
return _kstrtoull(s, base, res);
|
||||
}
|
||||
EXPORT_SYMBOL(kstrtoull);
|
||||
|
||||
/**
|
||||
* kstrtoll - convert a string to a long long
|
||||
@ -166,7 +163,6 @@ int kstrtoll(const char *s, unsigned int base, long long *res)
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kstrtoll);
|
||||
|
||||
/* Internal, do not use. */
|
||||
int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
|
||||
@ -182,7 +178,6 @@ int _kstrtoul(const char *s, unsigned int base, unsigned long *res)
|
||||
*res = tmp;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_kstrtoul);
|
||||
|
||||
/* Internal, do not use. */
|
||||
int _kstrtol(const char *s, unsigned int base, long *res)
|
||||
@ -198,7 +193,6 @@ int _kstrtol(const char *s, unsigned int base, long *res)
|
||||
*res = tmp;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(_kstrtol);
|
||||
|
||||
/**
|
||||
* kstrtouint - convert a string to an unsigned int
|
||||
@ -229,7 +223,6 @@ int kstrtouint(const char *s, unsigned int base, unsigned int *res)
|
||||
*res = tmp;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kstrtouint);
|
||||
|
||||
/**
|
||||
* kstrtoint - convert a string to an int
|
||||
@ -260,7 +253,6 @@ int kstrtoint(const char *s, unsigned int base, int *res)
|
||||
*res = tmp;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kstrtoint);
|
||||
|
||||
int kstrtou16(const char *s, unsigned int base, u16 *res)
|
||||
{
|
||||
@ -275,7 +267,6 @@ int kstrtou16(const char *s, unsigned int base, u16 *res)
|
||||
*res = tmp;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kstrtou16);
|
||||
|
||||
int kstrtos16(const char *s, unsigned int base, s16 *res)
|
||||
{
|
||||
@ -290,7 +281,6 @@ int kstrtos16(const char *s, unsigned int base, s16 *res)
|
||||
*res = tmp;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kstrtos16);
|
||||
|
||||
int kstrtou8(const char *s, unsigned int base, u8 *res)
|
||||
{
|
||||
@ -305,7 +295,6 @@ int kstrtou8(const char *s, unsigned int base, u8 *res)
|
||||
*res = tmp;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kstrtou8);
|
||||
|
||||
int kstrtos8(const char *s, unsigned int base, s8 *res)
|
||||
{
|
||||
@ -320,7 +309,6 @@ int kstrtos8(const char *s, unsigned int base, s8 *res)
|
||||
*res = tmp;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kstrtos8);
|
||||
|
||||
/**
|
||||
* kstrtobool - convert common user inputs into boolean values
|
||||
@ -367,4 +355,3 @@ int kstrtobool(const char *s, bool *res)
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL(kstrtobool);
|
||||
|
@ -15,12 +15,12 @@
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/log2.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/random.h>
|
||||
#include <linux/rhashtable.h>
|
||||
|
@ -1,17 +1,15 @@
|
||||
|
||||
#include <linux/futex.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <linux/futex.h>
|
||||
|
||||
/* hack for mips: */
|
||||
#define CONFIG_RCU_HAVE_FUTEX 1
|
||||
#include <urcu/futex.h>
|
||||
|
||||
#include <linux/math64.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/timer.h>
|
||||
|
||||
__thread struct task_struct *current;
|
||||
@ -83,7 +81,7 @@ long schedule_timeout(long timeout)
|
||||
* that will tell you if something is gone wrong and where.
|
||||
*/
|
||||
if (timeout < 0) {
|
||||
printk(KERN_ERR "schedule_timeout: wrong timeout "
|
||||
fprintf(stderr, "schedule_timeout: wrong timeout "
|
||||
"value %lx\n", timeout);
|
||||
current->state = TASK_RUNNING;
|
||||
goto out;
|
||||
|
143
linux/sort.c
143
linux/sort.c
@ -1,143 +0,0 @@
|
||||
/*
|
||||
* A fast, small, non-recursive O(nlog n) sort for the Linux kernel
|
||||
*
|
||||
* Jan 23 2005 Matt Mackall <mpm@selenic.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sort.h>
|
||||
|
||||
static int alignment_ok(const void *base, int align)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ||
|
||||
((unsigned long)base & (align - 1)) == 0;
|
||||
}
|
||||
|
||||
static void u32_swap(void *a, void *b, int size)
|
||||
{
|
||||
u32 t = *(u32 *)a;
|
||||
*(u32 *)a = *(u32 *)b;
|
||||
*(u32 *)b = t;
|
||||
}
|
||||
|
||||
static void u64_swap(void *a, void *b, int size)
|
||||
{
|
||||
u64 t = *(u64 *)a;
|
||||
*(u64 *)a = *(u64 *)b;
|
||||
*(u64 *)b = t;
|
||||
}
|
||||
|
||||
static void generic_swap(void *a, void *b, int size)
|
||||
{
|
||||
char t;
|
||||
|
||||
do {
|
||||
t = *(char *)a;
|
||||
*(char *)a++ = *(char *)b;
|
||||
*(char *)b++ = t;
|
||||
} while (--size > 0);
|
||||
}
|
||||
|
||||
/**
|
||||
* sort - sort an array of elements
|
||||
* @base: pointer to data to sort
|
||||
* @num: number of elements
|
||||
* @size: size of each element
|
||||
* @cmp_func: pointer to comparison function
|
||||
* @swap_func: pointer to swap function or NULL
|
||||
*
|
||||
* This function does a heapsort on the given array. You may provide a
|
||||
* swap_func function optimized to your element type.
|
||||
*
|
||||
* Sorting time is O(n log n) both on average and worst-case. While
|
||||
* qsort is about 20% faster on average, it suffers from exploitable
|
||||
* O(n*n) worst-case behavior and extra memory requirements that make
|
||||
* it less suitable for kernel use.
|
||||
*/
|
||||
|
||||
void sort(void *base, size_t num, size_t size,
|
||||
int (*cmp_func)(const void *, const void *),
|
||||
void (*swap_func)(void *, void *, int size))
|
||||
{
|
||||
/* pre-scale counters for performance */
|
||||
int i = (num/2 - 1) * size, n = num * size, c, r;
|
||||
|
||||
if (!swap_func) {
|
||||
if (size == 4 && alignment_ok(base, 4))
|
||||
swap_func = u32_swap;
|
||||
else if (size == 8 && alignment_ok(base, 8))
|
||||
swap_func = u64_swap;
|
||||
else
|
||||
swap_func = generic_swap;
|
||||
}
|
||||
|
||||
/* heapify */
|
||||
for ( ; i >= 0; i -= size) {
|
||||
for (r = i; r * 2 + size < n; r = c) {
|
||||
c = r * 2 + size;
|
||||
if (c < n - size &&
|
||||
cmp_func(base + c, base + c + size) < 0)
|
||||
c += size;
|
||||
if (cmp_func(base + r, base + c) >= 0)
|
||||
break;
|
||||
swap_func(base + r, base + c, size);
|
||||
}
|
||||
}
|
||||
|
||||
/* sort */
|
||||
for (i = n - size; i > 0; i -= size) {
|
||||
swap_func(base, base + i, size);
|
||||
for (r = 0; r * 2 + size < i; r = c) {
|
||||
c = r * 2 + size;
|
||||
if (c < i - size &&
|
||||
cmp_func(base + c, base + c + size) < 0)
|
||||
c += size;
|
||||
if (cmp_func(base + r, base + c) >= 0)
|
||||
break;
|
||||
swap_func(base + r, base + c, size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(sort);
|
||||
|
||||
#if 0
|
||||
#include <linux/slab.h>
|
||||
/* a simple boot-time regression test */
|
||||
|
||||
int cmpint(const void *a, const void *b)
|
||||
{
|
||||
return *(int *)a - *(int *)b;
|
||||
}
|
||||
|
||||
static int sort_test(void)
|
||||
{
|
||||
int *a, i, r = 1;
|
||||
|
||||
a = kmalloc(1000 * sizeof(int), GFP_KERNEL);
|
||||
BUG_ON(!a);
|
||||
|
||||
printk("testing sort()\n");
|
||||
|
||||
for (i = 0; i < 1000; i++) {
|
||||
r = (r * 725861) % 6599;
|
||||
a[i] = r;
|
||||
}
|
||||
|
||||
sort(a, 1000, sizeof(int), cmpint, NULL);
|
||||
|
||||
for (i = 0; i < 999; i++)
|
||||
if (a[i] > a[i+1]) {
|
||||
printk("sort() failed!\n");
|
||||
break;
|
||||
}
|
||||
|
||||
kfree(a);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_init(sort_test);
|
||||
#endif
|
@ -19,37 +19,20 @@
|
||||
* - Kissed strtok() goodbye
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/errno.h>
|
||||
|
||||
#include <ctype.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
|
||||
/**
|
||||
* skip_spaces - Removes leading whitespace from @str.
|
||||
* @str: The string to be stripped.
|
||||
*
|
||||
* Returns a pointer to the first non-whitespace character in @str.
|
||||
*/
|
||||
char *skip_spaces(const char *str)
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
static char *skip_spaces(const char *str)
|
||||
{
|
||||
while (isspace(*str))
|
||||
++str;
|
||||
return (char *)str;
|
||||
}
|
||||
|
||||
/**
|
||||
* strim - Removes leading and trailing whitespace from @s.
|
||||
* @s: The string to be stripped.
|
||||
*
|
||||
* Note that the first trailing whitespace is replaced with a %NUL-terminator
|
||||
* in the given string @s. Returns a pointer to the first non-whitespace
|
||||
* character in @s.
|
||||
*/
|
||||
char *strim(char *s)
|
||||
{
|
||||
size_t size;
|
||||
@ -67,17 +50,6 @@ char *strim(char *s)
|
||||
return skip_spaces(s);
|
||||
}
|
||||
|
||||
/**
|
||||
* strlcpy - Copy a C-string into a sized buffer
|
||||
* @dest: Where to copy the string to
|
||||
* @src: Where to copy the string from
|
||||
* @size: size of destination buffer
|
||||
*
|
||||
* Compatible with *BSD: the result is always a valid
|
||||
* NUL-terminated string that fits in the buffer (unless,
|
||||
* of course, the buffer size is zero). It does not pad
|
||||
* out the result like strncpy() does.
|
||||
*/
|
||||
size_t strlcpy(char *dest, const char *src, size_t size)
|
||||
{
|
||||
size_t ret = strlen(src);
|
||||
|
Loading…
Reference in New Issue
Block a user