mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-02 00:00:03 +03:00
drop dead code
This commit is contained in:
parent
426e88e41c
commit
62e4df2a38
@ -1,37 +1,7 @@
|
||||
/*
|
||||
* Cryptographic API for algorithms (i.e., low-level API).
|
||||
*
|
||||
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
#ifndef _CRYPTO_ALGAPI_H
|
||||
#define _CRYPTO_ALGAPI_H
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/skcipher.h>
|
||||
|
||||
struct crypto_type {
|
||||
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
|
||||
unsigned int (*extsize)(struct crypto_alg *alg);
|
||||
int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
|
||||
int (*init_tfm)(struct crypto_tfm *tfm);
|
||||
|
||||
unsigned type;
|
||||
unsigned maskclear;
|
||||
unsigned maskset;
|
||||
unsigned tfmsize;
|
||||
};
|
||||
|
||||
extern const struct crypto_type crypto_blkcipher_type;
|
||||
|
||||
static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
|
||||
{
|
||||
return crypto_tfm_ctx(&tfm->base);
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_ALGAPI_H */
|
||||
|
@ -14,19 +14,8 @@
|
||||
#define _CRYPTO_HASH_H
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
struct shash_desc {
|
||||
struct crypto_shash *tfm;
|
||||
u32 flags;
|
||||
|
||||
void *__ctx[] CRYPTO_MINALIGN_ATTR;
|
||||
};
|
||||
|
||||
#define SHASH_DESC_ON_STACK(shash, ctx) \
|
||||
char __##shash##_desc[sizeof(struct shash_desc) + \
|
||||
crypto_shash_descsize(ctx)] CRYPTO_MINALIGN_ATTR; \
|
||||
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
|
||||
struct shash_desc;
|
||||
|
||||
struct shash_alg {
|
||||
int (*init)(struct shash_desc *desc);
|
||||
@ -42,6 +31,8 @@ struct shash_alg {
|
||||
struct crypto_alg base;
|
||||
};
|
||||
|
||||
int crypto_register_shash(struct shash_alg *alg);
|
||||
|
||||
struct crypto_shash {
|
||||
unsigned descsize;
|
||||
struct crypto_tfm base;
|
||||
@ -50,24 +41,14 @@ struct crypto_shash {
|
||||
struct crypto_shash *crypto_alloc_shash(const char *alg_name, u32 type,
|
||||
u32 mask);
|
||||
|
||||
static inline struct crypto_tfm *crypto_shash_tfm(struct crypto_shash *tfm)
|
||||
{
|
||||
return &tfm->base;
|
||||
}
|
||||
|
||||
static inline void crypto_free_shash(struct crypto_shash *tfm)
|
||||
{
|
||||
crypto_destroy_tfm(tfm, crypto_shash_tfm(tfm));
|
||||
}
|
||||
|
||||
static inline struct shash_alg *__crypto_shash_alg(struct crypto_alg *alg)
|
||||
{
|
||||
return container_of(alg, struct shash_alg, base);
|
||||
kfree(tfm);
|
||||
}
|
||||
|
||||
static inline struct shash_alg *crypto_shash_alg(struct crypto_shash *tfm)
|
||||
{
|
||||
return __crypto_shash_alg(crypto_shash_tfm(tfm)->__crt_alg);
|
||||
return container_of(tfm->base.alg, struct shash_alg, base);
|
||||
}
|
||||
|
||||
static inline unsigned crypto_shash_digestsize(struct crypto_shash *tfm)
|
||||
@ -80,10 +61,17 @@ static inline unsigned crypto_shash_descsize(struct crypto_shash *tfm)
|
||||
return tfm->descsize;
|
||||
}
|
||||
|
||||
static inline void *shash_desc_ctx(struct shash_desc *desc)
|
||||
{
|
||||
return desc->__ctx;
|
||||
}
|
||||
struct shash_desc {
|
||||
struct crypto_shash *tfm;
|
||||
u32 flags;
|
||||
|
||||
void *ctx[] CRYPTO_MINALIGN_ATTR;
|
||||
};
|
||||
|
||||
#define SHASH_DESC_ON_STACK(shash, tfm) \
|
||||
char __##shash##_desc[sizeof(struct shash_desc) + \
|
||||
crypto_shash_descsize(tfm)] CRYPTO_MINALIGN_ATTR; \
|
||||
struct shash_desc *shash = (struct shash_desc *)__##shash##_desc
|
||||
|
||||
static inline int crypto_shash_init(struct shash_desc *desc)
|
||||
{
|
||||
|
@ -1,15 +0,0 @@
|
||||
#ifndef _CRYPTO_INTERNAL_HASH_H
|
||||
#define _CRYPTO_INTERNAL_HASH_H
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/hash.h>
|
||||
|
||||
int crypto_register_shash(struct shash_alg *alg);
|
||||
|
||||
static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm)
|
||||
{
|
||||
return container_of(tfm, struct crypto_shash, base);
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_INTERNAL_HASH_H */
|
||||
|
@ -14,89 +14,49 @@
|
||||
#define _CRYPTO_SKCIPHER_H
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct skcipher_request {
|
||||
unsigned int cryptlen;
|
||||
struct crypto_skcipher;
|
||||
struct skcipher_request;
|
||||
|
||||
u8 *iv;
|
||||
|
||||
struct scatterlist *src;
|
||||
struct scatterlist *dst;
|
||||
|
||||
struct crypto_tfm *tfm;
|
||||
//struct crypto_async_request base;
|
||||
|
||||
void *__ctx[] CRYPTO_MINALIGN_ATTR;
|
||||
struct skcipher_alg {
|
||||
struct crypto_alg base;
|
||||
};
|
||||
|
||||
int crypto_register_skcipher(struct skcipher_alg *alg);
|
||||
|
||||
struct crypto_skcipher {
|
||||
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
int (*encrypt)(struct skcipher_request *req);
|
||||
int (*decrypt)(struct skcipher_request *req);
|
||||
|
||||
unsigned int ivsize;
|
||||
unsigned int reqsize;
|
||||
unsigned int keysize;
|
||||
unsigned ivsize;
|
||||
unsigned keysize;
|
||||
|
||||
struct crypto_tfm base;
|
||||
struct crypto_tfm base;
|
||||
};
|
||||
|
||||
struct skcipher_alg {
|
||||
int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keylen);
|
||||
int (*encrypt)(struct skcipher_request *req);
|
||||
int (*decrypt)(struct skcipher_request *req);
|
||||
int (*init)(struct crypto_skcipher *tfm);
|
||||
void (*exit)(struct crypto_skcipher *tfm);
|
||||
|
||||
unsigned int min_keysize;
|
||||
unsigned int max_keysize;
|
||||
unsigned int ivsize;
|
||||
unsigned int chunksize;
|
||||
unsigned int walksize;
|
||||
|
||||
struct crypto_alg base;
|
||||
};
|
||||
|
||||
#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
|
||||
char __##name##_desc[sizeof(struct skcipher_request) + \
|
||||
crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
|
||||
struct skcipher_request *name = (void *)__##name##_desc
|
||||
|
||||
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
|
||||
{
|
||||
return crypto_tfm_ctx(&tfm->base);
|
||||
}
|
||||
|
||||
static inline struct crypto_skcipher *__crypto_skcipher_cast(
|
||||
struct crypto_tfm *tfm)
|
||||
{
|
||||
return container_of(tfm, struct crypto_skcipher, base);
|
||||
}
|
||||
|
||||
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
||||
u32 type, u32 mask);
|
||||
|
||||
static inline struct crypto_tfm *crypto_skcipher_tfm(
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
return &tfm->base;
|
||||
}
|
||||
|
||||
static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
|
||||
{
|
||||
crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
|
||||
kfree(tfm);
|
||||
}
|
||||
|
||||
static inline struct skcipher_alg *crypto_skcipher_alg(
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
return container_of(crypto_skcipher_tfm(tfm)->__crt_alg,
|
||||
struct skcipher_alg, base);
|
||||
}
|
||||
struct skcipher_request {
|
||||
unsigned cryptlen;
|
||||
u8 *iv;
|
||||
|
||||
struct scatterlist *src;
|
||||
struct scatterlist *dst;
|
||||
|
||||
struct crypto_tfm *tfm;
|
||||
};
|
||||
|
||||
#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
|
||||
struct skcipher_request __##name##_desc; \
|
||||
struct skcipher_request *name = &__##name##_desc
|
||||
|
||||
static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
|
||||
const u8 *key, unsigned int keylen)
|
||||
@ -107,32 +67,23 @@ static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
|
||||
static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
|
||||
struct skcipher_request *req)
|
||||
{
|
||||
return __crypto_skcipher_cast(req->tfm);
|
||||
return container_of(req->tfm, struct crypto_skcipher, base);
|
||||
}
|
||||
|
||||
static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
|
||||
return tfm->encrypt(req);
|
||||
return crypto_skcipher_reqtfm(req)->encrypt(req);
|
||||
}
|
||||
|
||||
static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
|
||||
return tfm->decrypt(req);
|
||||
}
|
||||
|
||||
static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
|
||||
{
|
||||
return tfm->reqsize;
|
||||
return crypto_skcipher_reqtfm(req)->decrypt(req);
|
||||
}
|
||||
|
||||
static inline void skcipher_request_set_tfm(struct skcipher_request *req,
|
||||
struct crypto_skcipher *tfm)
|
||||
{
|
||||
req->tfm = crypto_skcipher_tfm(tfm);
|
||||
req->tfm = &tfm->base;
|
||||
}
|
||||
|
||||
static inline void skcipher_request_set_crypt(
|
||||
@ -140,10 +91,10 @@ static inline void skcipher_request_set_crypt(
|
||||
struct scatterlist *src, struct scatterlist *dst,
|
||||
unsigned int cryptlen, void *iv)
|
||||
{
|
||||
req->src = src;
|
||||
req->dst = dst;
|
||||
req->cryptlen = cryptlen;
|
||||
req->iv = iv;
|
||||
req->src = src;
|
||||
req->dst = dst;
|
||||
req->cryptlen = cryptlen;
|
||||
req->iv = iv;
|
||||
}
|
||||
|
||||
#endif /* _CRYPTO_SKCIPHER_H */
|
||||
|
@ -250,16 +250,11 @@ extern void bio_advance(struct bio *, unsigned);
|
||||
extern void bio_reset(struct bio *);
|
||||
void bio_chain(struct bio *, struct bio *);
|
||||
|
||||
static inline void bio_flush_dcache_pages(struct bio *bi)
|
||||
{
|
||||
}
|
||||
|
||||
extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
|
||||
struct bio *src, struct bvec_iter *src_iter);
|
||||
extern void bio_copy_data(struct bio *dst, struct bio *src);
|
||||
|
||||
void bio_free_pages(struct bio *bio);
|
||||
extern int bio_alloc_pages(struct bio *bio, gfp_t gfp);
|
||||
|
||||
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
|
||||
|
||||
|
@ -17,157 +17,29 @@
|
||||
#ifndef _LINUX_CRYPTO_H
|
||||
#define _LINUX_CRYPTO_H
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#define CRYPTO_ALG_TYPE_MASK 0x0000000f
|
||||
#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
|
||||
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
|
||||
#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
|
||||
#define CRYPTO_ALG_ASYNC 0x00000080
|
||||
|
||||
#define CRYPTO_MAX_ALG_NAME 64
|
||||
|
||||
#define CRYPTO_MINALIGN ARCH_KMALLOC_MINALIGN
|
||||
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
|
||||
|
||||
struct scatterlist;
|
||||
struct crypto_blkcipher;
|
||||
struct crypto_tfm;
|
||||
struct crypto_type;
|
||||
|
||||
struct blkcipher_desc {
|
||||
struct crypto_blkcipher *tfm;
|
||||
void *info;
|
||||
u32 flags;
|
||||
};
|
||||
|
||||
struct blkcipher_alg {
|
||||
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned keylen);
|
||||
int (*encrypt)(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned nbytes);
|
||||
int (*decrypt)(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned nbytes);
|
||||
};
|
||||
|
||||
#define cra_blkcipher cra_u.blkcipher
|
||||
|
||||
struct crypto_alg {
|
||||
struct list_head cra_list;
|
||||
struct list_head cra_users;
|
||||
|
||||
u32 cra_flags;
|
||||
unsigned cra_ctxsize;
|
||||
char cra_name[CRYPTO_MAX_ALG_NAME];
|
||||
|
||||
const char *cra_name;
|
||||
const struct crypto_type *cra_type;
|
||||
|
||||
union {
|
||||
struct blkcipher_alg blkcipher;
|
||||
} cra_u;
|
||||
|
||||
int (*cra_init)(struct crypto_tfm *tfm);
|
||||
void (*cra_exit)(struct crypto_tfm *tfm);
|
||||
void * (*alloc_tfm)(void);
|
||||
} CRYPTO_MINALIGN_ATTR;
|
||||
|
||||
int crypto_register_alg(struct crypto_alg *alg);
|
||||
|
||||
struct blkcipher_tfm {
|
||||
int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
|
||||
unsigned keylen);
|
||||
int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned nbytes);
|
||||
int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
|
||||
struct scatterlist *src, unsigned nbytes);
|
||||
};
|
||||
|
||||
struct crypto_tfm {
|
||||
u32 crt_flags;
|
||||
|
||||
struct blkcipher_tfm crt_blkcipher;
|
||||
|
||||
void (*exit)(struct crypto_tfm *tfm);
|
||||
|
||||
struct crypto_alg *__crt_alg;
|
||||
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
|
||||
struct crypto_alg *alg;
|
||||
};
|
||||
|
||||
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask);
|
||||
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm);
|
||||
|
||||
static inline void crypto_free_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
return crypto_destroy_tfm(tfm, tfm);
|
||||
}
|
||||
|
||||
static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm)
|
||||
{
|
||||
return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK;
|
||||
}
|
||||
|
||||
static inline void *crypto_tfm_ctx(struct crypto_tfm *tfm)
|
||||
{
|
||||
return tfm->__crt_ctx;
|
||||
}
|
||||
|
||||
struct crypto_blkcipher {
|
||||
struct crypto_tfm base;
|
||||
};
|
||||
|
||||
static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
|
||||
struct crypto_tfm *tfm)
|
||||
{
|
||||
return (struct crypto_blkcipher *)tfm;
|
||||
}
|
||||
|
||||
static inline struct crypto_blkcipher *crypto_blkcipher_cast(
|
||||
struct crypto_tfm *tfm)
|
||||
{
|
||||
BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
|
||||
return __crypto_blkcipher_cast(tfm);
|
||||
}
|
||||
|
||||
static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
|
||||
const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
type &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
type |= CRYPTO_ALG_TYPE_BLKCIPHER;
|
||||
mask |= CRYPTO_ALG_TYPE_MASK;
|
||||
|
||||
return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
|
||||
}
|
||||
|
||||
static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
|
||||
{
|
||||
crypto_free_tfm(&tfm->base);
|
||||
}
|
||||
|
||||
static inline struct blkcipher_tfm *crypto_blkcipher_crt(
|
||||
struct crypto_blkcipher *tfm)
|
||||
{
|
||||
return &tfm->base.crt_blkcipher;
|
||||
}
|
||||
|
||||
static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
|
||||
const u8 *key, unsigned keylen)
|
||||
{
|
||||
return crypto_blkcipher_crt(tfm)->setkey(&tfm->base, key, keylen);
|
||||
}
|
||||
|
||||
static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
|
||||
struct scatterlist *dst,
|
||||
struct scatterlist *src,
|
||||
unsigned nbytes)
|
||||
{
|
||||
return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_CRYPTO_H */
|
||||
|
||||
|
@ -6,111 +6,6 @@
|
||||
#include <linux/typecheck.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
#define HZ 100
|
||||
|
||||
#define MSEC_PER_SEC 1000L
|
||||
#define USEC_PER_MSEC 1000L
|
||||
#define NSEC_PER_USEC 1000L
|
||||
#define NSEC_PER_MSEC 1000000L
|
||||
#define USEC_PER_SEC 1000000L
|
||||
#define NSEC_PER_SEC 1000000000L
|
||||
#define FSEC_PER_SEC 1000000000000000LL
|
||||
|
||||
/*
|
||||
* The following defines establish the engineering parameters of the PLL
|
||||
* model. The HZ variable establishes the timer interrupt frequency, 100 Hz
|
||||
* for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
|
||||
* OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
|
||||
* nearest power of two in order to avoid hardware multiply operations.
|
||||
*/
|
||||
#if HZ >= 12 && HZ < 24
|
||||
# define SHIFT_HZ 4
|
||||
#elif HZ >= 24 && HZ < 48
|
||||
# define SHIFT_HZ 5
|
||||
#elif HZ >= 48 && HZ < 96
|
||||
# define SHIFT_HZ 6
|
||||
#elif HZ >= 96 && HZ < 192
|
||||
# define SHIFT_HZ 7
|
||||
#elif HZ >= 192 && HZ < 384
|
||||
# define SHIFT_HZ 8
|
||||
#elif HZ >= 384 && HZ < 768
|
||||
# define SHIFT_HZ 9
|
||||
#elif HZ >= 768 && HZ < 1536
|
||||
# define SHIFT_HZ 10
|
||||
#elif HZ >= 1536 && HZ < 3072
|
||||
# define SHIFT_HZ 11
|
||||
#elif HZ >= 3072 && HZ < 6144
|
||||
# define SHIFT_HZ 12
|
||||
#elif HZ >= 6144 && HZ < 12288
|
||||
# define SHIFT_HZ 13
|
||||
#else
|
||||
# error Invalid value of HZ.
|
||||
#endif
|
||||
|
||||
/* Suppose we want to divide two numbers NOM and DEN: NOM/DEN, then we can
|
||||
* improve accuracy by shifting LSH bits, hence calculating:
|
||||
* (NOM << LSH) / DEN
|
||||
* This however means trouble for large NOM, because (NOM << LSH) may no
|
||||
* longer fit in 32 bits. The following way of calculating this gives us
|
||||
* some slack, under the following conditions:
|
||||
* - (NOM / DEN) fits in (32 - LSH) bits.
|
||||
* - (NOM % DEN) fits in (32 - LSH) bits.
|
||||
*/
|
||||
#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
|
||||
+ ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
|
||||
|
||||
/* LATCH is used in the interval timer and ftape setup. */
|
||||
#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
|
||||
|
||||
extern int register_refined_jiffies(long clock_tick_rate);
|
||||
|
||||
/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
|
||||
#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
|
||||
|
||||
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
|
||||
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
|
||||
|
||||
static inline u64 sched_clock(void)
|
||||
{
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
|
||||
return ((s64) ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec;
|
||||
}
|
||||
|
||||
static inline u64 local_clock(void)
|
||||
{
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
extern unsigned long clock_t_to_jiffies(unsigned long x);
|
||||
extern u64 jiffies_64_to_clock_t(u64 x);
|
||||
extern u64 nsec_to_clock_t(u64 x);
|
||||
extern u64 nsecs_to_jiffies64(u64 n);
|
||||
extern unsigned long nsecs_to_jiffies(u64 n);
|
||||
|
||||
static inline u64 get_jiffies_64(void)
|
||||
{
|
||||
return nsecs_to_jiffies64(sched_clock());
|
||||
}
|
||||
|
||||
#define jiffies_64 get_jiffies_64()
|
||||
#define jiffies ((unsigned long) get_jiffies_64())
|
||||
|
||||
/*
|
||||
* These inlines deal with timer wrapping correctly. You are
|
||||
* strongly encouraged to use them
|
||||
* 1. Because people otherwise forget
|
||||
* 2. Because if the timer wrap changes in future you won't have to
|
||||
* alter your driver code.
|
||||
*
|
||||
* time_after(a,b) returns true if the time a is after time b.
|
||||
*
|
||||
* Do this with "<0" and ">=0" to only test the sign of the result. A
|
||||
* good compiler would generate better code (and a really good compiler
|
||||
* wouldn't care). Gcc is currently neither.
|
||||
*/
|
||||
#define time_after(a,b) \
|
||||
(typecheck(unsigned long, a) && \
|
||||
typecheck(unsigned long, b) && \
|
||||
@ -123,23 +18,14 @@ static inline u64 get_jiffies_64(void)
|
||||
((long)((a) - (b)) >= 0))
|
||||
#define time_before_eq(a,b) time_after_eq(b,a)
|
||||
|
||||
/*
|
||||
* Calculate whether a is in the range of [b, c].
|
||||
*/
|
||||
#define time_in_range(a,b,c) \
|
||||
(time_after_eq(a,b) && \
|
||||
time_before_eq(a,c))
|
||||
|
||||
/*
|
||||
* Calculate whether a is in the range of [b, c).
|
||||
*/
|
||||
#define time_in_range_open(a,b,c) \
|
||||
(time_after_eq(a,b) && \
|
||||
time_before(a,c))
|
||||
|
||||
/* Same as above, but does so with platform independent 64bit types.
|
||||
* These must be used when utilizing jiffies_64 (i.e. return value of
|
||||
* get_jiffies_64() */
|
||||
#define time_after64(a,b) \
|
||||
(typecheck(__u64, a) && \
|
||||
typecheck(__u64, b) && \
|
||||
@ -156,301 +42,42 @@ static inline u64 get_jiffies_64(void)
|
||||
(time_after_eq64(a, b) && \
|
||||
time_before_eq64(a, c))
|
||||
|
||||
/*
|
||||
* These four macros compare jiffies and 'a' for convenience.
|
||||
*/
|
||||
|
||||
/* time_is_before_jiffies(a) return true if a is before jiffies */
|
||||
#define time_is_before_jiffies(a) time_after(jiffies, a)
|
||||
|
||||
/* time_is_after_jiffies(a) return true if a is after jiffies */
|
||||
#define time_is_after_jiffies(a) time_before(jiffies, a)
|
||||
|
||||
/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
|
||||
#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
|
||||
|
||||
/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
|
||||
#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
|
||||
|
||||
/*
|
||||
* Have the 32 bit jiffies value wrap 5 minutes after boot
|
||||
* so jiffies wrap bugs show up earlier.
|
||||
*/
|
||||
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
|
||||
|
||||
/*
|
||||
* Change timeval to jiffies, trying to avoid the
|
||||
* most obvious overflows..
|
||||
*
|
||||
* And some not so obvious.
|
||||
*
|
||||
* Note that we don't want to return LONG_MAX, because
|
||||
* for various timeout reasons we often end up having
|
||||
* to wait "jiffies+1" in order to guarantee that we wait
|
||||
* at _least_ "jiffies" - so "jiffies+1" had better still
|
||||
* be positive.
|
||||
*/
|
||||
#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1)
|
||||
|
||||
extern unsigned long preset_lpj;
|
||||
|
||||
/*
|
||||
* We want to do realistic conversions of time so we need to use the same
|
||||
* values the update wall clock code uses as the jiffies size. This value
|
||||
* is: TICK_NSEC (which is defined in timex.h). This
|
||||
* is a constant and is in nanoseconds. We will use scaled math
|
||||
* with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
|
||||
* NSEC_JIFFIE_SC. Note that these defines contain nothing but
|
||||
* constants and so are computed at compile time. SHIFT_HZ (computed in
|
||||
* timex.h) adjusts the scaling for different HZ values.
|
||||
|
||||
* Scaled math??? What is that?
|
||||
*
|
||||
* Scaled math is a way to do integer math on values that would,
|
||||
* otherwise, either overflow, underflow, or cause undesired div
|
||||
* instructions to appear in the execution path. In short, we "scale"
|
||||
* up the operands so they take more bits (more precision, less
|
||||
* underflow), do the desired operation and then "scale" the result back
|
||||
* by the same amount. If we do the scaling by shifting we avoid the
|
||||
* costly mpy and the dastardly div instructions.
|
||||
|
||||
* Suppose, for example, we want to convert from seconds to jiffies
|
||||
* where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The
|
||||
* simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
|
||||
* observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
|
||||
* might calculate at compile time, however, the result will only have
|
||||
* about 3-4 bits of precision (less for smaller values of HZ).
|
||||
*
|
||||
* So, we scale as follows:
|
||||
* jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
|
||||
* jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
|
||||
* Then we make SCALE a power of two so:
|
||||
* jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
|
||||
* Now we define:
|
||||
* #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
|
||||
* jiff = (sec * SEC_CONV) >> SCALE;
|
||||
*
|
||||
* Often the math we use will expand beyond 32-bits so we tell C how to
|
||||
* do this and pass the 64-bit result of the mpy through the ">> SCALE"
|
||||
* which should take the result back to 32-bits. We want this expansion
|
||||
* to capture as much precision as possible. At the same time we don't
|
||||
* want to overflow so we pick the SCALE to avoid this. In this file,
|
||||
* that means using a different scale for each range of HZ values (as
|
||||
* defined in timex.h).
|
||||
*
|
||||
* For those who want to know, gcc will give a 64-bit result from a "*"
|
||||
* operator if the result is a long long AND at least one of the
|
||||
* operands is cast to long long (usually just prior to the "*" so as
|
||||
* not to confuse it into thinking it really has a 64-bit operand,
|
||||
* which, buy the way, it can do, but it takes more code and at least 2
|
||||
* mpys).
|
||||
|
||||
* We also need to be aware that one second in nanoseconds is only a
|
||||
* couple of bits away from overflowing a 32-bit word, so we MUST use
|
||||
* 64-bits to get the full range time in nanoseconds.
|
||||
|
||||
*/
|
||||
|
||||
/*
|
||||
* Here are the scales we will use. One for seconds, nanoseconds and
|
||||
* microseconds.
|
||||
*
|
||||
* Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
|
||||
* check if the sign bit is set. If not, we bump the shift count by 1.
|
||||
* (Gets an extra bit of precision where we can use it.)
|
||||
* We know it is set for HZ = 1024 and HZ = 100 not for 1000.
|
||||
* Haven't tested others.
|
||||
|
||||
* Limits of cpp (for #if expressions) only long (no long long), but
|
||||
* then we only need the most signicant bit.
|
||||
*/
|
||||
|
||||
#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
|
||||
#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
|
||||
#undef SEC_JIFFIE_SC
|
||||
#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
|
||||
#endif
|
||||
#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
|
||||
#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
|
||||
TICK_NSEC -1) / (u64)TICK_NSEC))
|
||||
|
||||
#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
|
||||
TICK_NSEC -1) / (u64)TICK_NSEC))
|
||||
/*
|
||||
* The maximum jiffie value is (MAX_INT >> 1). Here we translate that
|
||||
* into seconds. The 64-bit case will overflow if we are not careful,
|
||||
* so use the messy SH_DIV macro to do it. Still all constants.
|
||||
*/
|
||||
#if BITS_PER_LONG < 64
|
||||
# define MAX_SEC_IN_JIFFIES \
|
||||
(long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
|
||||
#else /* take care of overflow on 64 bits machines */
|
||||
# define MAX_SEC_IN_JIFFIES \
|
||||
(SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Convert various time units to each other:
|
||||
*/
|
||||
extern unsigned int jiffies_to_msecs(const unsigned long j);
|
||||
extern unsigned int jiffies_to_usecs(const unsigned long j);
|
||||
#define HZ 1000
|
||||
|
||||
static inline u64 jiffies_to_nsecs(const unsigned long j)
|
||||
{
|
||||
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
|
||||
return (u64)j * NSEC_PER_MSEC;
|
||||
}
|
||||
|
||||
extern unsigned long __msecs_to_jiffies(const unsigned int m);
|
||||
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
|
||||
/*
|
||||
* HZ is equal to or smaller than 1000, and 1000 is a nice round
|
||||
* multiple of HZ, divide with the factor between them, but round
|
||||
* upwards:
|
||||
*/
|
||||
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
|
||||
static inline unsigned jiffies_to_msecs(const unsigned long j)
|
||||
{
|
||||
return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
|
||||
return j;
|
||||
}
|
||||
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
|
||||
/*
|
||||
* HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
|
||||
* simply multiply with the factor between them.
|
||||
*
|
||||
* But first make sure the multiplication result cannot overflow:
|
||||
*/
|
||||
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
|
||||
|
||||
static inline unsigned long msecs_to_jiffies(const unsigned int m)
|
||||
{
|
||||
if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
|
||||
return MAX_JIFFY_OFFSET;
|
||||
return m * (HZ / MSEC_PER_SEC);
|
||||
return m;
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* Generic case - multiply, round and divide. But first check that if
|
||||
* we are doing a net multiplication, that we wouldn't overflow:
|
||||
*/
|
||||
static inline unsigned long _msecs_to_jiffies(const unsigned int m)
|
||||
|
||||
static inline unsigned long nsecs_to_jiffies(u64 n)
|
||||
{
|
||||
if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
|
||||
return MAX_JIFFY_OFFSET;
|
||||
|
||||
return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
|
||||
return n / NSEC_PER_MSEC;
|
||||
}
|
||||
#endif
|
||||
/**
|
||||
* msecs_to_jiffies: - convert milliseconds to jiffies
|
||||
* @m: time in milliseconds
|
||||
*
|
||||
* conversion is done as follows:
|
||||
*
|
||||
* - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
|
||||
*
|
||||
* - 'too large' values [that would result in larger than
|
||||
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
|
||||
*
|
||||
* - all other values are converted to jiffies by either multiplying
|
||||
* the input value by a factor or dividing it with a factor and
|
||||
* handling any 32-bit overflows.
|
||||
* for the details see __msecs_to_jiffies()
|
||||
*
|
||||
* msecs_to_jiffies() checks for the passed in value being a constant
|
||||
* via __builtin_constant_p() allowing gcc to eliminate most of the
|
||||
* code, __msecs_to_jiffies() is called if the value passed does not
|
||||
* allow constant folding and the actual conversion must be done at
|
||||
* runtime.
|
||||
* the HZ range specific helpers _msecs_to_jiffies() are called both
|
||||
* directly here and from __msecs_to_jiffies() in the case where
|
||||
* constant folding is not possible.
|
||||
*/
|
||||
static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
|
||||
|
||||
static inline u64 sched_clock(void)
|
||||
{
|
||||
if (__builtin_constant_p(m)) {
|
||||
if ((int)m < 0)
|
||||
return MAX_JIFFY_OFFSET;
|
||||
return _msecs_to_jiffies(m);
|
||||
} else {
|
||||
return __msecs_to_jiffies(m);
|
||||
}
|
||||
struct timespec ts;
|
||||
|
||||
clock_gettime(CLOCK_MONOTONIC, &ts);
|
||||
|
||||
return ((s64) ts.tv_sec * NSEC_PER_SEC) + ts.tv_nsec;
|
||||
}
|
||||
|
||||
extern unsigned long __usecs_to_jiffies(const unsigned int u);
|
||||
#if !(USEC_PER_SEC % HZ)
|
||||
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
|
||||
static inline u64 local_clock(void)
|
||||
{
|
||||
return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
|
||||
}
|
||||
#else
|
||||
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
|
||||
{
|
||||
return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
|
||||
>> USEC_TO_HZ_SHR32;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* usecs_to_jiffies: - convert microseconds to jiffies
|
||||
* @u: time in microseconds
|
||||
*
|
||||
* conversion is done as follows:
|
||||
*
|
||||
* - 'too large' values [that would result in larger than
|
||||
* MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
|
||||
*
|
||||
* - all other values are converted to jiffies by either multiplying
|
||||
* the input value by a factor or dividing it with a factor and
|
||||
* handling any 32-bit overflows as for msecs_to_jiffies.
|
||||
*
|
||||
* usecs_to_jiffies() checks for the passed in value being a constant
|
||||
* via __builtin_constant_p() allowing gcc to eliminate most of the
|
||||
* code, __usecs_to_jiffies() is called if the value passed does not
|
||||
* allow constant folding and the actual conversion must be done at
|
||||
* runtime.
|
||||
* the HZ range specific helpers _usecs_to_jiffies() are called both
|
||||
* directly here and from __msecs_to_jiffies() in the case where
|
||||
* constant folding is not possible.
|
||||
*/
|
||||
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
|
||||
{
|
||||
if (__builtin_constant_p(u)) {
|
||||
if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
|
||||
return MAX_JIFFY_OFFSET;
|
||||
return _usecs_to_jiffies(u);
|
||||
} else {
|
||||
return __usecs_to_jiffies(u);
|
||||
}
|
||||
return sched_clock();
|
||||
}
|
||||
|
||||
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
|
||||
|
||||
extern void jiffies_to_timespec64(const unsigned long,
|
||||
struct timespec64 *value);
|
||||
static inline unsigned long timespec_to_jiffies(const struct timespec *value)
|
||||
{
|
||||
struct timespec64 ts = timespec_to_timespec64(*value);
|
||||
|
||||
return timespec64_to_jiffies(&ts);
|
||||
}
|
||||
|
||||
static inline void jiffies_to_timespec(const unsigned long j,
|
||||
struct timespec *value)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
|
||||
jiffies_to_timespec64(j, &ts);
|
||||
*value = timespec64_to_timespec(ts);
|
||||
}
|
||||
|
||||
extern unsigned long timeval_to_jiffies(const struct timeval *value);
|
||||
extern void jiffies_to_timeval(const unsigned long j,
|
||||
struct timeval *value);
|
||||
|
||||
extern clock_t jiffies_to_clock_t(unsigned long x);
|
||||
static inline clock_t jiffies_delta_to_clock_t(long delta)
|
||||
{
|
||||
return jiffies_to_clock_t(max(0L, delta));
|
||||
}
|
||||
|
||||
#define TIMESTAMP_SIZE 30
|
||||
#define jiffies nsecs_to_jiffies(sched_clock())
|
||||
|
||||
#endif
|
||||
|
@ -2,17 +2,9 @@
|
||||
#define _LINUX_KEY_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/rbtree.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/atomic.h>
|
||||
|
||||
#include <keyutils.h>
|
||||
|
||||
struct key;
|
||||
|
||||
struct user_key_payload {
|
||||
size_t datalen; /* length of this data */
|
||||
char data[0]; /* actual data */
|
||||
|
@ -1,771 +1,64 @@
|
||||
#ifndef __TOOLS_LINUX_LIST_H
|
||||
#define __TOOLS_LINUX_LIST_H
|
||||
#ifndef _LINUX_LIST_H
|
||||
#define _LINUX_LIST_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/poison.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <urcu/list.h>
|
||||
|
||||
/*
|
||||
* Simple doubly linked list implementation.
|
||||
*
|
||||
* Some of the internal functions ("__xxx") are useful when
|
||||
* manipulating whole lists rather than single entries, as
|
||||
* sometimes we already know the next/prev entries and we can
|
||||
* generate better code by using them directly rather than
|
||||
* using the generic single-entry routines.
|
||||
*/
|
||||
#define list_head cds_list_head
|
||||
#define LIST_HEAD_INIT(l) CDS_LIST_HEAD_INIT(l)
|
||||
#define LIST_HEAD(l) CDS_LIST_HEAD(l)
|
||||
#define INIT_LIST_HEAD(l) CDS_INIT_LIST_HEAD(l)
|
||||
#define list_add(n, h) cds_list_add(n, h)
|
||||
#define list_add_tail(n, h) cds_list_add_tail(n, h)
|
||||
#define __list_del_entry(l) cds_list_del(l)
|
||||
#define list_del(l) cds_list_del(l)
|
||||
#define list_del_init(l) cds_list_del_init(l)
|
||||
#define list_replace(o, n) cds_list_replace(o, n)
|
||||
#define list_replace_init(o, n) cds_list_replace_init(o, n)
|
||||
#define list_move(l, h) cds_list_move(l, h)
|
||||
#define list_empty(l) cds_list_empty(l)
|
||||
#define list_splice(l, h) cds_list_splice(l, h)
|
||||
#define list_entry(p, t, m) cds_list_entry(p, t, m)
|
||||
#define list_first_entry(p, t, m) cds_list_first_entry(p, t, m)
|
||||
#define list_for_each(p, h) cds_list_for_each(p, h)
|
||||
#define list_for_each_prev(p, h) cds_list_for_each_prev(p, h)
|
||||
#define list_for_each_safe(p, n, h) cds_list_for_each_safe(p, n, h)
|
||||
#define list_for_each_prev_safe(p, n, h) cds_list_for_each_prev_safe(p, n, h)
|
||||
#define list_for_each_entry(p, h, m) cds_list_for_each_entry(p, h, m)
|
||||
#define list_for_each_entry_reverse(p, h, m) cds_list_for_each_entry_reverse(p, h, m)
|
||||
#define list_for_each_entry_safe(p, n, h, m) cds_list_for_each_entry_safe(p, n, h, m)
|
||||
#define list_for_each_entry_safe_reverse(p, n, h, m) cds_list_for_each_entry_safe_reverse(p, n, h, m)
|
||||
|
||||
#define LIST_HEAD_INIT(name) { &(name), &(name) }
|
||||
|
||||
#define LIST_HEAD(name) \
|
||||
struct list_head name = LIST_HEAD_INIT(name)
|
||||
|
||||
static inline void INIT_LIST_HEAD(struct list_head *list)
|
||||
{
|
||||
list->next = list;
|
||||
list->prev = list;
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert a new entry between two known consecutive entries.
|
||||
*
|
||||
* This is only for internal list manipulation where we know
|
||||
* the prev/next entries already!
|
||||
*/
|
||||
#ifndef CONFIG_DEBUG_LIST
|
||||
static inline void __list_add(struct list_head *new,
|
||||
struct list_head *prev,
|
||||
struct list_head *next)
|
||||
{
|
||||
next->prev = new;
|
||||
new->next = next;
|
||||
new->prev = prev;
|
||||
prev->next = new;
|
||||
}
|
||||
#else
|
||||
extern void __list_add(struct list_head *new,
|
||||
struct list_head *prev,
|
||||
struct list_head *next);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* list_add - add a new entry
|
||||
* @new: new entry to be added
|
||||
* @head: list head to add it after
|
||||
*
|
||||
* Insert a new entry after the specified head.
|
||||
* This is good for implementing stacks.
|
||||
*/
|
||||
static inline void list_add(struct list_head *new, struct list_head *head)
|
||||
{
|
||||
__list_add(new, head, head->next);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* list_add_tail - add a new entry
|
||||
* @new: new entry to be added
|
||||
* @head: list head to add it before
|
||||
*
|
||||
* Insert a new entry before the specified head.
|
||||
* This is useful for implementing queues.
|
||||
*/
|
||||
static inline void list_add_tail(struct list_head *new, struct list_head *head)
|
||||
{
|
||||
__list_add(new, head->prev, head);
|
||||
}
|
||||
|
||||
/*
|
||||
* Delete a list entry by making the prev/next entries
|
||||
* point to each other.
|
||||
*
|
||||
* This is only for internal list manipulation where we know
|
||||
* the prev/next entries already!
|
||||
*/
|
||||
static inline void __list_del(struct list_head * prev, struct list_head * next)
|
||||
{
|
||||
next->prev = prev;
|
||||
WRITE_ONCE(prev->next, next);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_del - deletes entry from list.
|
||||
* @entry: the element to delete from the list.
|
||||
* Note: list_empty() on entry does not return true after this, the entry is
|
||||
* in an undefined state.
|
||||
*/
|
||||
#ifndef CONFIG_DEBUG_LIST
|
||||
static inline void __list_del_entry(struct list_head *entry)
|
||||
{
|
||||
__list_del(entry->prev, entry->next);
|
||||
}
|
||||
|
||||
static inline void list_del(struct list_head *entry)
|
||||
{
|
||||
__list_del(entry->prev, entry->next);
|
||||
entry->next = LIST_POISON1;
|
||||
entry->prev = LIST_POISON2;
|
||||
}
|
||||
#else
|
||||
extern void __list_del_entry(struct list_head *entry);
|
||||
extern void list_del(struct list_head *entry);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* list_replace - replace old entry by new one
|
||||
* @old : the element to be replaced
|
||||
* @new : the new element to insert
|
||||
*
|
||||
* If @old was empty, it will be overwritten.
|
||||
*/
|
||||
static inline void list_replace(struct list_head *old,
|
||||
struct list_head *new)
|
||||
{
|
||||
new->next = old->next;
|
||||
new->next->prev = new;
|
||||
new->prev = old->prev;
|
||||
new->prev->next = new;
|
||||
}
|
||||
|
||||
static inline void list_replace_init(struct list_head *old,
|
||||
struct list_head *new)
|
||||
{
|
||||
list_replace(old, new);
|
||||
INIT_LIST_HEAD(old);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_del_init - deletes entry from list and reinitialize it.
|
||||
* @entry: the element to delete from the list.
|
||||
*/
|
||||
static inline void list_del_init(struct list_head *entry)
|
||||
{
|
||||
__list_del_entry(entry);
|
||||
INIT_LIST_HEAD(entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_move - delete from one list and add as another's head
|
||||
* @list: the entry to move
|
||||
* @head: the head that will precede our entry
|
||||
*/
|
||||
static inline void list_move(struct list_head *list, struct list_head *head)
|
||||
{
|
||||
__list_del_entry(list);
|
||||
list_add(list, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_move_tail - delete from one list and add as another's tail
|
||||
* @list: the entry to move
|
||||
* @head: the head that will follow our entry
|
||||
*/
|
||||
static inline void list_move_tail(struct list_head *list,
|
||||
struct list_head *head)
|
||||
{
|
||||
__list_del_entry(list);
|
||||
list_add_tail(list, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_is_last - tests whether @list is the last entry in list @head
|
||||
* @list: the entry to test
|
||||
* @head: the head of the list
|
||||
*/
|
||||
static inline int list_is_last(const struct list_head *list,
|
||||
const struct list_head *head)
|
||||
{
|
||||
return list->next == head;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_empty - tests whether a list is empty
|
||||
* @head: the list to test.
|
||||
*/
|
||||
static inline int list_empty(const struct list_head *head)
|
||||
{
|
||||
return head->next == head;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_empty_careful - tests whether a list is empty and not being modified
|
||||
* @head: the list to test
|
||||
*
|
||||
* Description:
|
||||
* tests whether a list is empty _and_ checks that no other CPU might be
|
||||
* in the process of modifying either member (next or prev)
|
||||
*
|
||||
* NOTE: using list_empty_careful() without synchronization
|
||||
* can only be safe if the only activity that can happen
|
||||
* to the list entry is list_del_init(). Eg. it cannot be used
|
||||
* if another CPU could re-list_add() it.
|
||||
*/
|
||||
static inline int list_empty_careful(const struct list_head *head)
|
||||
{
|
||||
struct list_head *next = head->next;
|
||||
return (next == head) && (next == head->prev);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_rotate_left - rotate the list to the left
|
||||
* @head: the head of the list
|
||||
*/
|
||||
static inline void list_rotate_left(struct list_head *head)
|
||||
static inline void list_move_tail(struct list_head *list,
|
||||
struct list_head *head)
|
||||
{
|
||||
struct list_head *first;
|
||||
|
||||
if (!list_empty(head)) {
|
||||
first = head->next;
|
||||
list_move_tail(first, head);
|
||||
}
|
||||
list_del(list);
|
||||
list_add_tail(list, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_is_singular - tests whether a list has just one entry.
|
||||
* @head: the list to test.
|
||||
*/
|
||||
static inline int list_is_singular(const struct list_head *head)
|
||||
{
|
||||
return !list_empty(head) && (head->next == head->prev);
|
||||
}
|
||||
|
||||
static inline void __list_cut_position(struct list_head *list,
|
||||
struct list_head *head, struct list_head *entry)
|
||||
{
|
||||
struct list_head *new_first = entry->next;
|
||||
list->next = head->next;
|
||||
list->next->prev = list;
|
||||
list->prev = entry;
|
||||
entry->next = list;
|
||||
head->next = new_first;
|
||||
new_first->prev = head;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_cut_position - cut a list into two
|
||||
* @list: a new list to add all removed entries
|
||||
* @head: a list with entries
|
||||
* @entry: an entry within head, could be the head itself
|
||||
* and if so we won't cut the list
|
||||
*
|
||||
* This helper moves the initial part of @head, up to and
|
||||
* including @entry, from @head to @list. You should
|
||||
* pass on @entry an element you know is on @head. @list
|
||||
* should be an empty list or a list you do not care about
|
||||
* losing its data.
|
||||
*
|
||||
*/
|
||||
static inline void list_cut_position(struct list_head *list,
|
||||
struct list_head *head, struct list_head *entry)
|
||||
{
|
||||
if (list_empty(head))
|
||||
return;
|
||||
if (list_is_singular(head) &&
|
||||
(head->next != entry && head != entry))
|
||||
return;
|
||||
if (entry == head)
|
||||
INIT_LIST_HEAD(list);
|
||||
else
|
||||
__list_cut_position(list, head, entry);
|
||||
}
|
||||
|
||||
static inline void __list_splice(const struct list_head *list,
|
||||
struct list_head *prev,
|
||||
struct list_head *next)
|
||||
{
|
||||
struct list_head *first = list->next;
|
||||
struct list_head *last = list->prev;
|
||||
|
||||
first->prev = prev;
|
||||
prev->next = first;
|
||||
|
||||
last->next = next;
|
||||
next->prev = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice - join two lists, this is designed for stacks
|
||||
* @list: the new list to add.
|
||||
* @head: the place to add it in the first list.
|
||||
*/
|
||||
static inline void list_splice(const struct list_head *list,
|
||||
struct list_head *head)
|
||||
{
|
||||
if (!list_empty(list))
|
||||
__list_splice(list, head, head->next);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_tail - join two lists, each list being a queue
|
||||
* @list: the new list to add.
|
||||
* @head: the place to add it in the first list.
|
||||
*/
|
||||
static inline void list_splice_tail(struct list_head *list,
|
||||
struct list_head *head)
|
||||
{
|
||||
if (!list_empty(list))
|
||||
__list_splice(list, head->prev, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_init - join two lists and reinitialise the emptied list.
|
||||
* @list: the new list to add.
|
||||
* @head: the place to add it in the first list.
|
||||
*
|
||||
* The list at @list is reinitialised
|
||||
*/
|
||||
static inline void list_splice_init(struct list_head *list,
|
||||
struct list_head *head)
|
||||
{
|
||||
if (!list_empty(list)) {
|
||||
__list_splice(list, head, head->next);
|
||||
INIT_LIST_HEAD(list);
|
||||
}
|
||||
list_splice(list, head);
|
||||
INIT_LIST_HEAD(list);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_tail_init - join two lists and reinitialise the emptied list
|
||||
* @list: the new list to add.
|
||||
* @head: the place to add it in the first list.
|
||||
*
|
||||
* Each of the lists is a queue.
|
||||
* The list at @list is reinitialised
|
||||
*/
|
||||
static inline void list_splice_tail_init(struct list_head *list,
|
||||
struct list_head *head)
|
||||
{
|
||||
if (!list_empty(list)) {
|
||||
__list_splice(list, head->prev, head);
|
||||
INIT_LIST_HEAD(list);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* list_entry - get the struct for this entry
|
||||
* @ptr: the &struct list_head pointer.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*/
|
||||
#define list_entry(ptr, type, member) \
|
||||
container_of(ptr, type, member)
|
||||
|
||||
/**
|
||||
* list_first_entry - get the first element from a list
|
||||
* @ptr: the list head to take the element from.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Note, that list is expected to be not empty.
|
||||
*/
|
||||
#define list_first_entry(ptr, type, member) \
|
||||
list_entry((ptr)->next, type, member)
|
||||
|
||||
/**
|
||||
* list_last_entry - get the last element from a list
|
||||
* @ptr: the list head to take the element from.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Note, that list is expected to be not empty.
|
||||
*/
|
||||
#define list_last_entry(ptr, type, member) \
|
||||
list_entry((ptr)->prev, type, member)
|
||||
|
||||
/**
|
||||
* list_first_entry_or_null - get the first element from a list
|
||||
* @ptr: the list head to take the element from.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Note that if the list is empty, it returns NULL.
|
||||
*/
|
||||
#define list_first_entry_or_null(ptr, type, member) \
|
||||
(!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL)
|
||||
|
||||
/**
|
||||
* list_next_entry - get the next element in list
|
||||
* @pos: the type * to cursor
|
||||
* @member: the name of the list_head within the struct.
|
||||
*/
|
||||
#define list_next_entry(pos, member) \
|
||||
list_entry((pos)->member.next, typeof(*(pos)), member)
|
||||
/* hlists: */
|
||||
|
||||
/**
|
||||
* list_prev_entry - get the prev element in list
|
||||
* @pos: the type * to cursor
|
||||
* @member: the name of the list_head within the struct.
|
||||
*/
|
||||
#define list_prev_entry(pos, member) \
|
||||
list_entry((pos)->member.prev, typeof(*(pos)), member)
|
||||
#include <urcu/hlist.h>
|
||||
|
||||
/**
|
||||
* list_for_each - iterate over a list
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*/
|
||||
#define list_for_each(pos, head) \
|
||||
for (pos = (head)->next; pos != (head); pos = pos->next)
|
||||
#define hlist_head cds_hlist_head
|
||||
#define hlist_node cds_hlist_node
|
||||
|
||||
/**
|
||||
* list_for_each_prev - iterate over a list backwards
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
*/
|
||||
#define list_for_each_prev(pos, head) \
|
||||
for (pos = (head)->prev; pos != (head); pos = pos->prev)
|
||||
|
||||
/**
|
||||
* list_for_each_safe - iterate over a list safe against removal of list entry
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @n: another &struct list_head to use as temporary storage
|
||||
* @head: the head for your list.
|
||||
*/
|
||||
#define list_for_each_safe(pos, n, head) \
|
||||
for (pos = (head)->next, n = pos->next; pos != (head); \
|
||||
pos = n, n = pos->next)
|
||||
|
||||
/**
|
||||
* list_for_each_prev_safe - iterate over a list backwards safe against removal of list entry
|
||||
* @pos: the &struct list_head to use as a loop cursor.
|
||||
* @n: another &struct list_head to use as temporary storage
|
||||
* @head: the head for your list.
|
||||
*/
|
||||
#define list_for_each_prev_safe(pos, n, head) \
|
||||
for (pos = (head)->prev, n = pos->prev; \
|
||||
pos != (head); \
|
||||
pos = n, n = pos->prev)
|
||||
|
||||
/**
|
||||
* list_for_each_entry - iterate over list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*/
|
||||
#define list_for_each_entry(pos, head, member) \
|
||||
for (pos = list_first_entry(head, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = list_next_entry(pos, member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_reverse - iterate backwards over list of given type.
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*/
|
||||
#define list_for_each_entry_reverse(pos, head, member) \
|
||||
for (pos = list_last_entry(head, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = list_prev_entry(pos, member))
|
||||
|
||||
/**
|
||||
* list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
|
||||
* @pos: the type * to use as a start point
|
||||
* @head: the head of the list
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Prepares a pos entry for use as a start point in list_for_each_entry_continue().
|
||||
*/
|
||||
#define list_prepare_entry(pos, head, member) \
|
||||
((pos) ? : list_entry(head, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_continue - continue iteration over list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Continue to iterate over list of given type, continuing after
|
||||
* the current position.
|
||||
*/
|
||||
#define list_for_each_entry_continue(pos, head, member) \
|
||||
for (pos = list_next_entry(pos, member); \
|
||||
&pos->member != (head); \
|
||||
pos = list_next_entry(pos, member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_continue_reverse - iterate backwards from the given point
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Start to iterate over list of given type backwards, continuing after
|
||||
* the current position.
|
||||
*/
|
||||
#define list_for_each_entry_continue_reverse(pos, head, member) \
|
||||
for (pos = list_prev_entry(pos, member); \
|
||||
&pos->member != (head); \
|
||||
pos = list_prev_entry(pos, member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_from - iterate over list of given type from the current point
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Iterate over list of given type, continuing from current position.
|
||||
*/
|
||||
#define list_for_each_entry_from(pos, head, member) \
|
||||
for (; &pos->member != (head); \
|
||||
pos = list_next_entry(pos, member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @n: another type * to use as temporary storage
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*/
|
||||
#define list_for_each_entry_safe(pos, n, head, member) \
|
||||
for (pos = list_first_entry(head, typeof(*pos), member), \
|
||||
n = list_next_entry(pos, member); \
|
||||
&pos->member != (head); \
|
||||
pos = n, n = list_next_entry(n, member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_safe_continue - continue list iteration safe against removal
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @n: another type * to use as temporary storage
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Iterate over list of given type, continuing after current point,
|
||||
* safe against removal of list entry.
|
||||
*/
|
||||
#define list_for_each_entry_safe_continue(pos, n, head, member) \
|
||||
for (pos = list_next_entry(pos, member), \
|
||||
n = list_next_entry(pos, member); \
|
||||
&pos->member != (head); \
|
||||
pos = n, n = list_next_entry(n, member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_safe_from - iterate over list from current point safe against removal
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @n: another type * to use as temporary storage
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Iterate over list of given type from current point, safe against
|
||||
* removal of list entry.
|
||||
*/
|
||||
#define list_for_each_entry_safe_from(pos, n, head, member) \
|
||||
for (n = list_next_entry(pos, member); \
|
||||
&pos->member != (head); \
|
||||
pos = n, n = list_next_entry(n, member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_safe_reverse - iterate backwards over list safe against removal
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @n: another type * to use as temporary storage
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Iterate backwards over list of given type, safe against removal
|
||||
* of list entry.
|
||||
*/
|
||||
#define list_for_each_entry_safe_reverse(pos, n, head, member) \
|
||||
for (pos = list_last_entry(head, typeof(*pos), member), \
|
||||
n = list_prev_entry(pos, member); \
|
||||
&pos->member != (head); \
|
||||
pos = n, n = list_prev_entry(n, member))
|
||||
|
||||
/**
|
||||
* list_safe_reset_next - reset a stale list_for_each_entry_safe loop
|
||||
* @pos: the loop cursor used in the list_for_each_entry_safe loop
|
||||
* @n: temporary storage used in list_for_each_entry_safe
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* list_safe_reset_next is not safe to use in general if the list may be
|
||||
* modified concurrently (eg. the lock is dropped in the loop body). An
|
||||
* exception to this is if the cursor element (pos) is pinned in the list,
|
||||
* and list_safe_reset_next is called after re-taking the lock and before
|
||||
* completing the current iteration of the loop body.
|
||||
*/
|
||||
#define list_safe_reset_next(pos, n, member) \
|
||||
n = list_next_entry(pos, member)
|
||||
|
||||
/*
|
||||
* Double linked lists with a single pointer list head.
|
||||
* Mostly useful for hash tables where the two pointer list head is
|
||||
* too wasteful.
|
||||
* You lose the ability to access the tail in O(1).
|
||||
*/
|
||||
|
||||
#define HLIST_HEAD_INIT { .first = NULL }
|
||||
#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL }
|
||||
#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL)
|
||||
static inline void INIT_HLIST_NODE(struct hlist_node *h)
|
||||
{
|
||||
h->next = NULL;
|
||||
h->pprev = NULL;
|
||||
}
|
||||
|
||||
static inline int hlist_unhashed(const struct hlist_node *h)
|
||||
{
|
||||
return !h->pprev;
|
||||
}
|
||||
|
||||
static inline int hlist_empty(const struct hlist_head *h)
|
||||
{
|
||||
return !h->first;
|
||||
}
|
||||
|
||||
static inline void __hlist_del(struct hlist_node *n)
|
||||
{
|
||||
struct hlist_node *next = n->next;
|
||||
struct hlist_node **pprev = n->pprev;
|
||||
|
||||
WRITE_ONCE(*pprev, next);
|
||||
if (next)
|
||||
next->pprev = pprev;
|
||||
}
|
||||
|
||||
static inline void hlist_del(struct hlist_node *n)
|
||||
{
|
||||
__hlist_del(n);
|
||||
n->next = LIST_POISON1;
|
||||
n->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
static inline void hlist_del_init(struct hlist_node *n)
|
||||
{
|
||||
if (!hlist_unhashed(n)) {
|
||||
__hlist_del(n);
|
||||
INIT_HLIST_NODE(n);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
|
||||
{
|
||||
struct hlist_node *first = h->first;
|
||||
n->next = first;
|
||||
if (first)
|
||||
first->pprev = &n->next;
|
||||
h->first = n;
|
||||
n->pprev = &h->first;
|
||||
}
|
||||
|
||||
/* next must be != NULL */
|
||||
static inline void hlist_add_before(struct hlist_node *n,
|
||||
struct hlist_node *next)
|
||||
{
|
||||
n->pprev = next->pprev;
|
||||
n->next = next;
|
||||
next->pprev = &n->next;
|
||||
*(n->pprev) = n;
|
||||
}
|
||||
|
||||
static inline void hlist_add_behind(struct hlist_node *n,
|
||||
struct hlist_node *prev)
|
||||
{
|
||||
n->next = prev->next;
|
||||
prev->next = n;
|
||||
n->pprev = &prev->next;
|
||||
|
||||
if (n->next)
|
||||
n->next->pprev = &n->next;
|
||||
}
|
||||
|
||||
/* after that we'll appear to be on some hlist and hlist_del will work */
|
||||
static inline void hlist_add_fake(struct hlist_node *n)
|
||||
{
|
||||
n->pprev = &n->next;
|
||||
}
|
||||
|
||||
static inline bool hlist_fake(struct hlist_node *h)
|
||||
{
|
||||
return h->pprev == &h->next;
|
||||
}
|
||||
|
||||
/*
|
||||
* Move a list from one list head to another. Fixup the pprev
|
||||
* reference of the first entry if it exists.
|
||||
*/
|
||||
static inline void hlist_move_list(struct hlist_head *old,
|
||||
struct hlist_head *new)
|
||||
{
|
||||
new->first = old->first;
|
||||
if (new->first)
|
||||
new->first->pprev = &new->first;
|
||||
old->first = NULL;
|
||||
}
|
||||
|
||||
#define hlist_entry(ptr, type, member) container_of(ptr,type,member)
|
||||
|
||||
#define hlist_for_each(pos, head) \
|
||||
for (pos = (head)->first; pos ; pos = pos->next)
|
||||
|
||||
#define hlist_for_each_safe(pos, n, head) \
|
||||
for (pos = (head)->first; pos && ({ n = pos->next; 1; }); \
|
||||
pos = n)
|
||||
|
||||
#define hlist_entry_safe(ptr, type, member) \
|
||||
({ typeof(ptr) ____ptr = (ptr); \
|
||||
____ptr ? hlist_entry(____ptr, type, member) : NULL; \
|
||||
})
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry - iterate over list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry(pos, head, member) \
|
||||
for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member);\
|
||||
pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_continue - iterate over a hlist continuing after current point
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_continue(pos, member) \
|
||||
for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\
|
||||
pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_from - iterate over a hlist continuing from current point
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_from(pos, member) \
|
||||
for (; pos; \
|
||||
pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_safe - iterate over list of given type safe against removal of list entry
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @n: another &struct hlist_node to use as temporary storage
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_safe(pos, n, head, member) \
|
||||
for (pos = hlist_entry_safe((head)->first, typeof(*pos), member);\
|
||||
pos && ({ n = pos->member.next; 1; }); \
|
||||
pos = hlist_entry_safe(n, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* list_del_range - deletes range of entries from list.
|
||||
* @begin: first element in the range to delete from the list.
|
||||
* @end: last element in the range to delete from the list.
|
||||
* Note: list_empty on the range of entries does not return true after this,
|
||||
* the entries is in an undefined state.
|
||||
*/
|
||||
static inline void list_del_range(struct list_head *begin,
|
||||
struct list_head *end)
|
||||
{
|
||||
begin->prev->next = end->next;
|
||||
end->next->prev = begin->prev;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_for_each_from - iterate over a list from one of its nodes
|
||||
* @pos: the &struct list_head to use as a loop cursor, from where to start
|
||||
* @head: the head for your list.
|
||||
*/
|
||||
#define list_for_each_from(pos, head) \
|
||||
for (; pos != (head); pos = pos->next)
|
||||
|
||||
#endif /* __TOOLS_LINUX_LIST_H */
|
||||
#endif /* _LIST_LIST_H */
|
||||
|
@ -1,197 +0,0 @@
|
||||
/*
|
||||
* Routines to manage notifier chains for passing status changes to any
|
||||
* interested routines. We need this instead of hard coded call lists so
|
||||
* that modules can poke their nose into the innards. The network devices
|
||||
* needed them so here they are for the rest of you.
|
||||
*
|
||||
* Alan Cox <Alan.Cox@linux.org>
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_NOTIFIER_H
|
||||
#define _LINUX_NOTIFIER_H
|
||||
#include <linux/errno.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/rwsem.h>
|
||||
//#include <linux/srcu.h>
|
||||
|
||||
/*
|
||||
* Notifier chains are of four types:
|
||||
*
|
||||
* Atomic notifier chains: Chain callbacks run in interrupt/atomic
|
||||
* context. Callouts are not allowed to block.
|
||||
* Blocking notifier chains: Chain callbacks run in process context.
|
||||
* Callouts are allowed to block.
|
||||
* Raw notifier chains: There are no restrictions on callbacks,
|
||||
* registration, or unregistration. All locking and protection
|
||||
* must be provided by the caller.
|
||||
* SRCU notifier chains: A variant of blocking notifier chains, with
|
||||
* the same restrictions.
|
||||
*
|
||||
* atomic_notifier_chain_register() may be called from an atomic context,
|
||||
* but blocking_notifier_chain_register() and srcu_notifier_chain_register()
|
||||
* must be called from a process context. Ditto for the corresponding
|
||||
* _unregister() routines.
|
||||
*
|
||||
* atomic_notifier_chain_unregister(), blocking_notifier_chain_unregister(),
|
||||
* and srcu_notifier_chain_unregister() _must not_ be called from within
|
||||
* the call chain.
|
||||
*
|
||||
* SRCU notifier chains are an alternative form of blocking notifier chains.
|
||||
* They use SRCU (Sleepable Read-Copy Update) instead of rw-semaphores for
|
||||
* protection of the chain links. This means there is _very_ low overhead
|
||||
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
|
||||
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
|
||||
* SRCU notifier chains should be used when the chain will be called very
|
||||
* often but notifier_blocks will seldom be removed. Also, SRCU notifier
|
||||
* chains are slightly more difficult to use because they require special
|
||||
* runtime initialization.
|
||||
*/
|
||||
|
||||
struct notifier_block;
|
||||
|
||||
typedef int (*notifier_fn_t)(struct notifier_block *nb,
|
||||
unsigned long action, void *data);
|
||||
|
||||
struct notifier_block {
|
||||
notifier_fn_t notifier_call;
|
||||
struct notifier_block __rcu *next;
|
||||
int priority;
|
||||
};
|
||||
|
||||
struct atomic_notifier_head {
|
||||
spinlock_t lock;
|
||||
struct notifier_block __rcu *head;
|
||||
};
|
||||
|
||||
struct blocking_notifier_head {
|
||||
struct rw_semaphore rwsem;
|
||||
struct notifier_block __rcu *head;
|
||||
};
|
||||
|
||||
struct raw_notifier_head {
|
||||
struct notifier_block __rcu *head;
|
||||
};
|
||||
|
||||
#define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \
|
||||
spin_lock_init(&(name)->lock); \
|
||||
(name)->head = NULL; \
|
||||
} while (0)
|
||||
#define BLOCKING_INIT_NOTIFIER_HEAD(name) do { \
|
||||
init_rwsem(&(name)->rwsem); \
|
||||
(name)->head = NULL; \
|
||||
} while (0)
|
||||
#define RAW_INIT_NOTIFIER_HEAD(name) do { \
|
||||
(name)->head = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define ATOMIC_NOTIFIER_INIT(name) { \
|
||||
.lock = __SPIN_LOCK_UNLOCKED(name.lock), \
|
||||
.head = NULL }
|
||||
#define BLOCKING_NOTIFIER_INIT(name) { \
|
||||
.rwsem = __RWSEM_INITIALIZER((name).rwsem), \
|
||||
.head = NULL }
|
||||
#define RAW_NOTIFIER_INIT(name) { \
|
||||
.head = NULL }
|
||||
|
||||
#define ATOMIC_NOTIFIER_HEAD(name) \
|
||||
struct atomic_notifier_head name = \
|
||||
ATOMIC_NOTIFIER_INIT(name)
|
||||
#define BLOCKING_NOTIFIER_HEAD(name) \
|
||||
struct blocking_notifier_head name = \
|
||||
BLOCKING_NOTIFIER_INIT(name)
|
||||
#define RAW_NOTIFIER_HEAD(name) \
|
||||
struct raw_notifier_head name = \
|
||||
RAW_NOTIFIER_INIT(name)
|
||||
|
||||
#define NOTIFY_DONE 0x0000 /* Don't care */
|
||||
#define NOTIFY_OK 0x0001 /* Suits me */
|
||||
#define NOTIFY_STOP_MASK 0x8000 /* Don't call further */
|
||||
#define NOTIFY_BAD (NOTIFY_STOP_MASK|0x0002)
|
||||
/* Bad/Veto action */
|
||||
/*
|
||||
* Clean way to return from the notifier and stop further calls.
|
||||
*/
|
||||
#define NOTIFY_STOP (NOTIFY_OK|NOTIFY_STOP_MASK)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
|
||||
struct notifier_block *nb);
|
||||
extern int blocking_notifier_chain_register(struct blocking_notifier_head *nh,
|
||||
struct notifier_block *nb);
|
||||
extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
|
||||
struct notifier_block *nb);
|
||||
|
||||
extern int blocking_notifier_chain_cond_register(
|
||||
struct blocking_notifier_head *nh,
|
||||
struct notifier_block *nb);
|
||||
|
||||
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
|
||||
struct notifier_block *nb);
|
||||
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
|
||||
struct notifier_block *nb);
|
||||
extern int raw_notifier_chain_unregister(struct raw_notifier_head *nh,
|
||||
struct notifier_block *nb);
|
||||
|
||||
extern int atomic_notifier_call_chain(struct atomic_notifier_head *nh,
|
||||
unsigned long val, void *v);
|
||||
extern int __atomic_notifier_call_chain(struct atomic_notifier_head *nh,
|
||||
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
||||
extern int blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
||||
unsigned long val, void *v);
|
||||
extern int __blocking_notifier_call_chain(struct blocking_notifier_head *nh,
|
||||
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
||||
extern int raw_notifier_call_chain(struct raw_notifier_head *nh,
|
||||
unsigned long val, void *v);
|
||||
extern int __raw_notifier_call_chain(struct raw_notifier_head *nh,
|
||||
unsigned long val, void *v, int nr_to_call, int *nr_calls);
|
||||
|
||||
/* Encapsulate (negative) errno value (in particular, NOTIFY_BAD <=> EPERM). */
|
||||
static inline int notifier_from_errno(int err)
|
||||
{
|
||||
if (err)
|
||||
return NOTIFY_STOP_MASK | (NOTIFY_OK - err);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
/* Restore (negative) errno value from notify return value. */
|
||||
static inline int notifier_to_errno(int ret)
|
||||
{
|
||||
ret &= ~NOTIFY_STOP_MASK;
|
||||
return ret > NOTIFY_OK ? NOTIFY_OK - ret : 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Declared notifiers so far. I can imagine quite a few more chains
|
||||
* over time (eg laptop power reset chains, reboot chain (to clean
|
||||
* device units up), device [un]mount chain, module load/unload chain,
|
||||
* low memory chain, screenblank chain (for plug in modular screenblankers)
|
||||
* VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
|
||||
*/
|
||||
|
||||
/* CPU notfiers are defined in include/linux/cpu.h. */
|
||||
|
||||
/* netdevice notifiers are defined in include/linux/netdevice.h */
|
||||
|
||||
/* reboot notifiers are defined in include/linux/reboot.h. */
|
||||
|
||||
/* Hibernation and suspend events are defined in include/linux/suspend.h. */
|
||||
|
||||
/* Virtual Terminal events are defined in include/linux/vt.h. */
|
||||
|
||||
#define NETLINK_URELEASE 0x0001 /* Unicast netlink socket released */
|
||||
|
||||
/* Console keyboard events.
|
||||
* Note: KBD_KEYCODE is always sent before KBD_UNBOUND_KEYCODE, KBD_UNICODE and
|
||||
* KBD_KEYSYM. */
|
||||
#define KBD_KEYCODE 0x0001 /* Keyboard keycode, called before any other */
|
||||
#define KBD_UNBOUND_KEYCODE 0x0002 /* Keyboard keycode which is not bound to any other */
|
||||
#define KBD_UNICODE 0x0003 /* Keyboard unicode */
|
||||
#define KBD_KEYSYM 0x0004 /* Keyboard keysym */
|
||||
#define KBD_POST_KEYSYM 0x0005 /* Called after keyboard keysym interpretation */
|
||||
|
||||
extern struct blocking_notifier_head reboot_notifier_list;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_NOTIFIER_H */
|
@ -1,14 +0,0 @@
|
||||
#ifndef _LINUX_RADIX_TREE_H
|
||||
#define _LINUX_RADIX_TREE_H
|
||||
|
||||
struct radix_tree_root {
|
||||
};
|
||||
|
||||
#define INIT_RADIX_TREE(root, mask) do {} while (0)
|
||||
|
||||
static inline void *radix_tree_lookup(struct radix_tree_root *r, unsigned long i)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* _LINUX_RADIX_TREE_H */
|
@ -1,127 +0,0 @@
|
||||
/*
|
||||
Red Black Trees
|
||||
(C) 1999 Andrea Arcangeli <andrea@suse.de>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
linux/include/linux/rbtree.h
|
||||
|
||||
To use rbtrees you'll have to implement your own insert and search cores.
|
||||
This will avoid us to use callbacks and to drop drammatically performances.
|
||||
I know it's not the cleaner way, but in C (not in C++) to get
|
||||
performances and genericity...
|
||||
|
||||
See Documentation/rbtree.txt for documentation and samples.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_RBTREE_H
|
||||
#define _LINUX_RBTREE_H
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/rcupdate.h>
|
||||
|
||||
struct rb_node {
|
||||
unsigned long __rb_parent_color;
|
||||
struct rb_node *rb_right;
|
||||
struct rb_node *rb_left;
|
||||
} __attribute__((aligned(sizeof(long))));
|
||||
/* The alignment might seem pointless, but allegedly CRIS needs it */
|
||||
|
||||
struct rb_root {
|
||||
struct rb_node *rb_node;
|
||||
};
|
||||
|
||||
|
||||
#define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3))
|
||||
|
||||
#define RB_ROOT (struct rb_root) { NULL, }
|
||||
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
|
||||
|
||||
#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
|
||||
|
||||
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
|
||||
#define RB_EMPTY_NODE(node) \
|
||||
((node)->__rb_parent_color == (unsigned long)(node))
|
||||
#define RB_CLEAR_NODE(node) \
|
||||
((node)->__rb_parent_color = (unsigned long)(node))
|
||||
|
||||
|
||||
extern void rb_insert_color(struct rb_node *, struct rb_root *);
|
||||
extern void rb_erase(struct rb_node *, struct rb_root *);
|
||||
|
||||
|
||||
/* Find logical next and previous nodes in a tree */
|
||||
extern struct rb_node *rb_next(const struct rb_node *);
|
||||
extern struct rb_node *rb_prev(const struct rb_node *);
|
||||
extern struct rb_node *rb_first(const struct rb_root *);
|
||||
extern struct rb_node *rb_last(const struct rb_root *);
|
||||
|
||||
/* Postorder iteration - always visit the parent after its children */
|
||||
extern struct rb_node *rb_first_postorder(const struct rb_root *);
|
||||
extern struct rb_node *rb_next_postorder(const struct rb_node *);
|
||||
|
||||
/* Fast replacement of a single node without remove/rebalance/add/rebalance */
|
||||
extern void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root);
|
||||
extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root);
|
||||
|
||||
static inline void rb_link_node(struct rb_node *node, struct rb_node *parent,
|
||||
struct rb_node **rb_link)
|
||||
{
|
||||
node->__rb_parent_color = (unsigned long)parent;
|
||||
node->rb_left = node->rb_right = NULL;
|
||||
|
||||
*rb_link = node;
|
||||
}
|
||||
|
||||
static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent,
|
||||
struct rb_node **rb_link)
|
||||
{
|
||||
node->__rb_parent_color = (unsigned long)parent;
|
||||
node->rb_left = node->rb_right = NULL;
|
||||
|
||||
rcu_assign_pointer(*rb_link, node);
|
||||
}
|
||||
|
||||
#define rb_entry_safe(ptr, type, member) \
|
||||
({ typeof(ptr) ____ptr = (ptr); \
|
||||
____ptr ? rb_entry(____ptr, type, member) : NULL; \
|
||||
})
|
||||
|
||||
/**
|
||||
* rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of
|
||||
* given type allowing the backing memory of @pos to be invalidated
|
||||
*
|
||||
* @pos: the 'type *' to use as a loop cursor.
|
||||
* @n: another 'type *' to use as temporary storage
|
||||
* @root: 'rb_root *' of the rbtree.
|
||||
* @field: the name of the rb_node field within 'type'.
|
||||
*
|
||||
* rbtree_postorder_for_each_entry_safe() provides a similar guarantee as
|
||||
* list_for_each_entry_safe() and allows the iteration to continue independent
|
||||
* of changes to @pos by the body of the loop.
|
||||
*
|
||||
* Note, however, that it cannot handle other modifications that re-order the
|
||||
* rbtree it is iterating over. This includes calling rb_erase() on @pos, as
|
||||
* rb_erase() may rebalance the tree, causing us to miss some nodes.
|
||||
*/
|
||||
#define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \
|
||||
for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \
|
||||
pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \
|
||||
typeof(*pos), field); 1; }); \
|
||||
pos = n)
|
||||
|
||||
#endif /* _LINUX_RBTREE_H */
|
@ -1,262 +0,0 @@
|
||||
/*
|
||||
Red Black Trees
|
||||
(C) 1999 Andrea Arcangeli <andrea@suse.de>
|
||||
(C) 2002 David Woodhouse <dwmw2@infradead.org>
|
||||
(C) 2012 Michel Lespinasse <walken@google.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
linux/include/linux/rbtree_augmented.h
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_RBTREE_AUGMENTED_H
|
||||
#define _LINUX_RBTREE_AUGMENTED_H
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/rbtree.h>
|
||||
|
||||
/*
|
||||
* Please note - only struct rb_augment_callbacks and the prototypes for
|
||||
* rb_insert_augmented() and rb_erase_augmented() are intended to be public.
|
||||
* The rest are implementation details you are not expected to depend on.
|
||||
*
|
||||
* See Documentation/rbtree.txt for documentation and samples.
|
||||
*/
|
||||
|
||||
struct rb_augment_callbacks {
|
||||
void (*propagate)(struct rb_node *node, struct rb_node *stop);
|
||||
void (*copy)(struct rb_node *old, struct rb_node *new);
|
||||
void (*rotate)(struct rb_node *old, struct rb_node *new);
|
||||
};
|
||||
|
||||
extern void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
|
||||
/*
|
||||
* Fixup the rbtree and update the augmented information when rebalancing.
|
||||
*
|
||||
* On insertion, the user must update the augmented information on the path
|
||||
* leading to the inserted node, then call rb_link_node() as usual and
|
||||
* rb_augment_inserted() instead of the usual rb_insert_color() call.
|
||||
* If rb_augment_inserted() rebalances the rbtree, it will callback into
|
||||
* a user provided function to update the augmented information on the
|
||||
* affected subtrees.
|
||||
*/
|
||||
static inline void
|
||||
rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
||||
const struct rb_augment_callbacks *augment)
|
||||
{
|
||||
__rb_insert_augmented(node, root, augment->rotate);
|
||||
}
|
||||
|
||||
#define RB_DECLARE_CALLBACKS(rbstatic, rbname, rbstruct, rbfield, \
|
||||
rbtype, rbaugmented, rbcompute) \
|
||||
static inline void \
|
||||
rbname ## _propagate(struct rb_node *rb, struct rb_node *stop) \
|
||||
{ \
|
||||
while (rb != stop) { \
|
||||
rbstruct *node = rb_entry(rb, rbstruct, rbfield); \
|
||||
rbtype augmented = rbcompute(node); \
|
||||
if (node->rbaugmented == augmented) \
|
||||
break; \
|
||||
node->rbaugmented = augmented; \
|
||||
rb = rb_parent(&node->rbfield); \
|
||||
} \
|
||||
} \
|
||||
static inline void \
|
||||
rbname ## _copy(struct rb_node *rb_old, struct rb_node *rb_new) \
|
||||
{ \
|
||||
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
|
||||
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
|
||||
new->rbaugmented = old->rbaugmented; \
|
||||
} \
|
||||
static void \
|
||||
rbname ## _rotate(struct rb_node *rb_old, struct rb_node *rb_new) \
|
||||
{ \
|
||||
rbstruct *old = rb_entry(rb_old, rbstruct, rbfield); \
|
||||
rbstruct *new = rb_entry(rb_new, rbstruct, rbfield); \
|
||||
new->rbaugmented = old->rbaugmented; \
|
||||
old->rbaugmented = rbcompute(old); \
|
||||
} \
|
||||
rbstatic const struct rb_augment_callbacks rbname = { \
|
||||
rbname ## _propagate, rbname ## _copy, rbname ## _rotate \
|
||||
};
|
||||
|
||||
|
||||
#define RB_RED 0
|
||||
#define RB_BLACK 1
|
||||
|
||||
#define __rb_parent(pc) ((struct rb_node *)(pc & ~3))
|
||||
|
||||
#define __rb_color(pc) ((pc) & 1)
|
||||
#define __rb_is_black(pc) __rb_color(pc)
|
||||
#define __rb_is_red(pc) (!__rb_color(pc))
|
||||
#define rb_color(rb) __rb_color((rb)->__rb_parent_color)
|
||||
#define rb_is_red(rb) __rb_is_red((rb)->__rb_parent_color)
|
||||
#define rb_is_black(rb) __rb_is_black((rb)->__rb_parent_color)
|
||||
|
||||
static inline void rb_set_parent(struct rb_node *rb, struct rb_node *p)
|
||||
{
|
||||
rb->__rb_parent_color = rb_color(rb) | (unsigned long)p;
|
||||
}
|
||||
|
||||
static inline void rb_set_parent_color(struct rb_node *rb,
|
||||
struct rb_node *p, int color)
|
||||
{
|
||||
rb->__rb_parent_color = (unsigned long)p | color;
|
||||
}
|
||||
|
||||
static inline void
|
||||
__rb_change_child(struct rb_node *old, struct rb_node *new,
|
||||
struct rb_node *parent, struct rb_root *root)
|
||||
{
|
||||
if (parent) {
|
||||
if (parent->rb_left == old)
|
||||
WRITE_ONCE(parent->rb_left, new);
|
||||
else
|
||||
WRITE_ONCE(parent->rb_right, new);
|
||||
} else
|
||||
WRITE_ONCE(root->rb_node, new);
|
||||
}
|
||||
|
||||
static inline void
|
||||
__rb_change_child_rcu(struct rb_node *old, struct rb_node *new,
|
||||
struct rb_node *parent, struct rb_root *root)
|
||||
{
|
||||
if (parent) {
|
||||
if (parent->rb_left == old)
|
||||
rcu_assign_pointer(parent->rb_left, new);
|
||||
else
|
||||
rcu_assign_pointer(parent->rb_right, new);
|
||||
} else
|
||||
rcu_assign_pointer(root->rb_node, new);
|
||||
}
|
||||
|
||||
extern void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new));
|
||||
|
||||
static __always_inline struct rb_node *
|
||||
__rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||
const struct rb_augment_callbacks *augment)
|
||||
{
|
||||
struct rb_node *child = node->rb_right;
|
||||
struct rb_node *tmp = node->rb_left;
|
||||
struct rb_node *parent, *rebalance;
|
||||
unsigned long pc;
|
||||
|
||||
if (!tmp) {
|
||||
/*
|
||||
* Case 1: node to erase has no more than 1 child (easy!)
|
||||
*
|
||||
* Note that if there is one child it must be red due to 5)
|
||||
* and node must be black due to 4). We adjust colors locally
|
||||
* so as to bypass __rb_erase_color() later on.
|
||||
*/
|
||||
pc = node->__rb_parent_color;
|
||||
parent = __rb_parent(pc);
|
||||
__rb_change_child(node, child, parent, root);
|
||||
if (child) {
|
||||
child->__rb_parent_color = pc;
|
||||
rebalance = NULL;
|
||||
} else
|
||||
rebalance = __rb_is_black(pc) ? parent : NULL;
|
||||
tmp = parent;
|
||||
} else if (!child) {
|
||||
/* Still case 1, but this time the child is node->rb_left */
|
||||
tmp->__rb_parent_color = pc = node->__rb_parent_color;
|
||||
parent = __rb_parent(pc);
|
||||
__rb_change_child(node, tmp, parent, root);
|
||||
rebalance = NULL;
|
||||
tmp = parent;
|
||||
} else {
|
||||
struct rb_node *successor = child, *child2;
|
||||
|
||||
tmp = child->rb_left;
|
||||
if (!tmp) {
|
||||
/*
|
||||
* Case 2: node's successor is its right child
|
||||
*
|
||||
* (n) (s)
|
||||
* / \ / \
|
||||
* (x) (s) -> (x) (c)
|
||||
* \
|
||||
* (c)
|
||||
*/
|
||||
parent = successor;
|
||||
child2 = successor->rb_right;
|
||||
|
||||
augment->copy(node, successor);
|
||||
} else {
|
||||
/*
|
||||
* Case 3: node's successor is leftmost under
|
||||
* node's right child subtree
|
||||
*
|
||||
* (n) (s)
|
||||
* / \ / \
|
||||
* (x) (y) -> (x) (y)
|
||||
* / /
|
||||
* (p) (p)
|
||||
* / /
|
||||
* (s) (c)
|
||||
* \
|
||||
* (c)
|
||||
*/
|
||||
do {
|
||||
parent = successor;
|
||||
successor = tmp;
|
||||
tmp = tmp->rb_left;
|
||||
} while (tmp);
|
||||
child2 = successor->rb_right;
|
||||
WRITE_ONCE(parent->rb_left, child2);
|
||||
WRITE_ONCE(successor->rb_right, child);
|
||||
rb_set_parent(child, successor);
|
||||
|
||||
augment->copy(node, successor);
|
||||
augment->propagate(parent, successor);
|
||||
}
|
||||
|
||||
tmp = node->rb_left;
|
||||
WRITE_ONCE(successor->rb_left, tmp);
|
||||
rb_set_parent(tmp, successor);
|
||||
|
||||
pc = node->__rb_parent_color;
|
||||
tmp = __rb_parent(pc);
|
||||
__rb_change_child(node, successor, tmp, root);
|
||||
|
||||
if (child2) {
|
||||
successor->__rb_parent_color = pc;
|
||||
rb_set_parent_color(child2, parent, RB_BLACK);
|
||||
rebalance = NULL;
|
||||
} else {
|
||||
unsigned long pc2 = successor->__rb_parent_color;
|
||||
successor->__rb_parent_color = pc;
|
||||
rebalance = __rb_is_black(pc2) ? parent : NULL;
|
||||
}
|
||||
tmp = successor;
|
||||
}
|
||||
|
||||
augment->propagate(tmp, NULL);
|
||||
return rebalance;
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
rb_erase_augmented(struct rb_node *node, struct rb_root *root,
|
||||
const struct rb_augment_callbacks *augment)
|
||||
{
|
||||
struct rb_node *rebalance = __rb_erase_augmented(node, root, augment);
|
||||
if (rebalance)
|
||||
__rb_erase_color(rebalance, root, augment->rotate);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_RBTREE_AUGMENTED_H */
|
@ -1,672 +1,16 @@
|
||||
#ifndef _LINUX_RCULIST_H
|
||||
#define _LINUX_RCULIST_H
|
||||
|
||||
/*
|
||||
* RCU-protected list version
|
||||
*/
|
||||
#include <linux/list.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <urcu/rculist.h>
|
||||
|
||||
/*
|
||||
* Why is there no list_empty_rcu()? Because list_empty() serves this
|
||||
* purpose. The list_empty() function fetches the RCU-protected pointer
|
||||
* and compares it to the address of the list head, but neither dereferences
|
||||
* this pointer itself nor provides this pointer to the caller. Therefore,
|
||||
* it is not necessary to use rcu_dereference(), so that list_empty() can
|
||||
* be used anywhere you would want to use a list_empty_rcu().
|
||||
*/
|
||||
|
||||
/*
|
||||
* INIT_LIST_HEAD_RCU - Initialize a list_head visible to RCU readers
|
||||
* @list: list to be initialized
|
||||
*
|
||||
* You should instead use INIT_LIST_HEAD() for normal initialization and
|
||||
* cleanup tasks, when readers have no access to the list being initialized.
|
||||
* However, if the list being initialized is visible to readers, you
|
||||
* need to keep the compiler from being too mischievous.
|
||||
*/
|
||||
static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
|
||||
{
|
||||
WRITE_ONCE(list->next, list);
|
||||
WRITE_ONCE(list->prev, list);
|
||||
}
|
||||
#include <urcu/rcuhlist.h>
|
||||
|
||||
/*
|
||||
* return the ->next pointer of a list_head in an rcu safe
|
||||
* way, we must not access it directly
|
||||
*/
|
||||
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
|
||||
#define hlist_add_head_rcu cds_hlist_add_head_rcu
|
||||
#define hlist_del_rcu cds_hlist_del_rcu
|
||||
|
||||
/*
|
||||
* Insert a new entry between two known consecutive entries.
|
||||
*
|
||||
* This is only for internal list manipulation where we know
|
||||
* the prev/next entries already!
|
||||
*/
|
||||
#ifndef CONFIG_DEBUG_LIST
|
||||
static inline void __list_add_rcu(struct list_head *new,
|
||||
struct list_head *prev, struct list_head *next)
|
||||
{
|
||||
new->next = next;
|
||||
new->prev = prev;
|
||||
rcu_assign_pointer(list_next_rcu(prev), new);
|
||||
next->prev = new;
|
||||
}
|
||||
#else
|
||||
void __list_add_rcu(struct list_head *new,
|
||||
struct list_head *prev, struct list_head *next);
|
||||
#endif
|
||||
#define hlist_for_each_rcu cds_hlist_for_each_rcu
|
||||
#define hlist_for_each_entry_rcu cds_hlist_for_each_entry_rcu_2
|
||||
|
||||
/**
|
||||
* list_add_rcu - add a new entry to rcu-protected list
|
||||
* @new: new entry to be added
|
||||
* @head: list head to add it after
|
||||
*
|
||||
* Insert a new entry after the specified head.
|
||||
* This is good for implementing stacks.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as list_add_rcu()
|
||||
* or list_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* list_for_each_entry_rcu().
|
||||
*/
|
||||
static inline void list_add_rcu(struct list_head *new, struct list_head *head)
|
||||
{
|
||||
__list_add_rcu(new, head, head->next);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_add_tail_rcu - add a new entry to rcu-protected list
|
||||
* @new: new entry to be added
|
||||
* @head: list head to add it before
|
||||
*
|
||||
* Insert a new entry before the specified head.
|
||||
* This is useful for implementing queues.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as list_add_tail_rcu()
|
||||
* or list_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* list_for_each_entry_rcu().
|
||||
*/
|
||||
static inline void list_add_tail_rcu(struct list_head *new,
|
||||
struct list_head *head)
|
||||
{
|
||||
__list_add_rcu(new, head->prev, head);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_del_rcu - deletes entry from list without re-initialization
|
||||
* @entry: the element to delete from the list.
|
||||
*
|
||||
* Note: list_empty() on entry does not return true after this,
|
||||
* the entry is in an undefined state. It is useful for RCU based
|
||||
* lockfree traversal.
|
||||
*
|
||||
* In particular, it means that we can not poison the forward
|
||||
* pointers that may still be used for walking the list.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as list_del_rcu()
|
||||
* or list_add_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* list_for_each_entry_rcu().
|
||||
*
|
||||
* Note that the caller is not permitted to immediately free
|
||||
* the newly deleted entry. Instead, either synchronize_rcu()
|
||||
* or call_rcu() must be used to defer freeing until an RCU
|
||||
* grace period has elapsed.
|
||||
*/
|
||||
static inline void list_del_rcu(struct list_head *entry)
|
||||
{
|
||||
__list_del_entry(entry);
|
||||
entry->prev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_del_init_rcu - deletes entry from hash list with re-initialization
|
||||
* @n: the element to delete from the hash list.
|
||||
*
|
||||
* Note: list_unhashed() on the node return true after this. It is
|
||||
* useful for RCU based read lockfree traversal if the writer side
|
||||
* must know if the list entry is still hashed or already unhashed.
|
||||
*
|
||||
* In particular, it means that we can not poison the forward pointers
|
||||
* that may still be used for walking the hash list and we can only
|
||||
* zero the pprev pointer so list_unhashed() will return true after
|
||||
* this.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary (such as
|
||||
* holding appropriate locks) to avoid racing with another
|
||||
* list-mutation primitive, such as hlist_add_head_rcu() or
|
||||
* hlist_del_rcu(), running on this same list. However, it is
|
||||
* perfectly legal to run concurrently with the _rcu list-traversal
|
||||
* primitives, such as hlist_for_each_entry_rcu().
|
||||
*/
|
||||
static inline void hlist_del_init_rcu(struct hlist_node *n)
|
||||
{
|
||||
if (!hlist_unhashed(n)) {
|
||||
__hlist_del(n);
|
||||
n->pprev = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* list_replace_rcu - replace old entry by new one
|
||||
* @old : the element to be replaced
|
||||
* @new : the new element to insert
|
||||
*
|
||||
* The @old entry will be replaced with the @new entry atomically.
|
||||
* Note: @old should not be empty.
|
||||
*/
|
||||
static inline void list_replace_rcu(struct list_head *old,
|
||||
struct list_head *new)
|
||||
{
|
||||
new->next = old->next;
|
||||
new->prev = old->prev;
|
||||
rcu_assign_pointer(list_next_rcu(new->prev), new);
|
||||
new->next->prev = new;
|
||||
old->prev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* __list_splice_init_rcu - join an RCU-protected list into an existing list.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @prev: points to the last element of the existing list
|
||||
* @next: points to the first element of the existing list
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
*
|
||||
* The list pointed to by @prev and @next can be RCU-read traversed
|
||||
* concurrently with this function.
|
||||
*
|
||||
* Note that this function blocks.
|
||||
*
|
||||
* Important note: the caller must take whatever action is necessary to prevent
|
||||
* any other updates to the existing list. In principle, it is possible to
|
||||
* modify the list as soon as sync() begins execution. If this sort of thing
|
||||
* becomes necessary, an alternative version based on call_rcu() could be
|
||||
* created. But only if -really- needed -- there is no shortage of RCU API
|
||||
* members.
|
||||
*/
|
||||
static inline void __list_splice_init_rcu(struct list_head *list,
|
||||
struct list_head *prev,
|
||||
struct list_head *next,
|
||||
void (*sync)(void))
|
||||
{
|
||||
struct list_head *first = list->next;
|
||||
struct list_head *last = list->prev;
|
||||
|
||||
/*
|
||||
* "first" and "last" tracking list, so initialize it. RCU readers
|
||||
* have access to this list, so we must use INIT_LIST_HEAD_RCU()
|
||||
* instead of INIT_LIST_HEAD().
|
||||
*/
|
||||
|
||||
INIT_LIST_HEAD_RCU(list);
|
||||
|
||||
/*
|
||||
* At this point, the list body still points to the source list.
|
||||
* Wait for any readers to finish using the list before splicing
|
||||
* the list body into the new list. Any new readers will see
|
||||
* an empty list.
|
||||
*/
|
||||
|
||||
sync();
|
||||
|
||||
/*
|
||||
* Readers are finished with the source list, so perform splice.
|
||||
* The order is important if the new list is global and accessible
|
||||
* to concurrent RCU readers. Note that RCU readers are not
|
||||
* permitted to traverse the prev pointers without excluding
|
||||
* this function.
|
||||
*/
|
||||
|
||||
last->next = next;
|
||||
rcu_assign_pointer(list_next_rcu(prev), first);
|
||||
first->prev = prev;
|
||||
next->prev = last;
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_init_rcu - splice an RCU-protected list into an existing list,
|
||||
* designed for stacks.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @head: the place in the existing list to splice the first list into
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
*/
|
||||
static inline void list_splice_init_rcu(struct list_head *list,
|
||||
struct list_head *head,
|
||||
void (*sync)(void))
|
||||
{
|
||||
if (!list_empty(list))
|
||||
__list_splice_init_rcu(list, head, head->next, sync);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_splice_tail_init_rcu - splice an RCU-protected list into an existing
|
||||
* list, designed for queues.
|
||||
* @list: the RCU-protected list to splice
|
||||
* @head: the place in the existing list to splice the first list into
|
||||
* @sync: function to sync: synchronize_rcu(), synchronize_sched(), ...
|
||||
*/
|
||||
static inline void list_splice_tail_init_rcu(struct list_head *list,
|
||||
struct list_head *head,
|
||||
void (*sync)(void))
|
||||
{
|
||||
if (!list_empty(list))
|
||||
__list_splice_init_rcu(list, head->prev, head, sync);
|
||||
}
|
||||
|
||||
/**
|
||||
* list_entry_rcu - get the struct for this entry
|
||||
* @ptr: the &struct list_head pointer.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_entry_rcu(ptr, type, member) \
|
||||
container_of(lockless_dereference(ptr), type, member)
|
||||
|
||||
/**
|
||||
* Where are list_empty_rcu() and list_first_entry_rcu()?
|
||||
*
|
||||
* Implementing those functions following their counterparts list_empty() and
|
||||
* list_first_entry() is not advisable because they lead to subtle race
|
||||
* conditions as the following snippet shows:
|
||||
*
|
||||
* if (!list_empty_rcu(mylist)) {
|
||||
* struct foo *bar = list_first_entry_rcu(mylist, struct foo, list_member);
|
||||
* do_something(bar);
|
||||
* }
|
||||
*
|
||||
* The list may not be empty when list_empty_rcu checks it, but it may be when
|
||||
* list_first_entry_rcu rereads the ->next pointer.
|
||||
*
|
||||
* Rereading the ->next pointer is not a problem for list_empty() and
|
||||
* list_first_entry() because they would be protected by a lock that blocks
|
||||
* writers.
|
||||
*
|
||||
* See list_first_or_null_rcu for an alternative.
|
||||
*/
|
||||
|
||||
/**
|
||||
* list_first_or_null_rcu - get the first element from a list
|
||||
* @ptr: the list head to take the element from.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Note that if the list is empty, it returns NULL.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_first_or_null_rcu(ptr, type, member) \
|
||||
({ \
|
||||
struct list_head *__ptr = (ptr); \
|
||||
struct list_head *__next = READ_ONCE(__ptr->next); \
|
||||
likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
|
||||
})
|
||||
|
||||
/**
|
||||
* list_next_or_null_rcu - get the first element from a list
|
||||
* @head: the head for the list.
|
||||
* @ptr: the list head to take the next element from.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Note that if the ptr is at the end of the list, NULL is returned.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_next_or_null_rcu(head, ptr, type, member) \
|
||||
({ \
|
||||
struct list_head *__head = (head); \
|
||||
struct list_head *__ptr = (ptr); \
|
||||
struct list_head *__next = READ_ONCE(__ptr->next); \
|
||||
likely(__next != __head) ? list_entry_rcu(__next, type, \
|
||||
member) : NULL; \
|
||||
})
|
||||
|
||||
/**
|
||||
* list_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as list_add_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define list_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* list_entry_lockless - get the struct for this entry
|
||||
* @ptr: the &struct list_head pointer.
|
||||
* @type: the type of the struct this is embedded in.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu(), but requires some implicit RCU
|
||||
* read-side guarding. One example is running within a special
|
||||
* exception-time environment where preemption is disabled and where
|
||||
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
|
||||
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
|
||||
* example is when items are added to the list, but never deleted.
|
||||
*/
|
||||
#define list_entry_lockless(ptr, type, member) \
|
||||
container_of((typeof(ptr))lockless_dereference(ptr), type, member)
|
||||
|
||||
/**
|
||||
* list_for_each_entry_lockless - iterate over rcu list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_struct within the struct.
|
||||
*
|
||||
* This primitive may safely run concurrently with the _rcu list-mutation
|
||||
* primitives such as list_add_rcu(), but requires some implicit RCU
|
||||
* read-side guarding. One example is running within a special
|
||||
* exception-time environment where preemption is disabled and where
|
||||
* lockdep cannot be invoked (in which case updaters must use RCU-sched,
|
||||
* as in synchronize_sched(), call_rcu_sched(), and friends). Another
|
||||
* example is when items are added to the list, but never deleted.
|
||||
*/
|
||||
#define list_for_each_entry_lockless(pos, head, member) \
|
||||
for (pos = list_entry_lockless((head)->next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = list_entry_lockless(pos->member.next, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* list_for_each_entry_continue_rcu - continue iteration over list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the list_head within the struct.
|
||||
*
|
||||
* Continue to iterate over list of given type, continuing after
|
||||
* the current position.
|
||||
*/
|
||||
#define list_for_each_entry_continue_rcu(pos, head, member) \
|
||||
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
|
||||
&pos->member != (head); \
|
||||
pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
|
||||
|
||||
/**
|
||||
* hlist_del_rcu - deletes entry from hash list without re-initialization
|
||||
* @n: the element to delete from the hash list.
|
||||
*
|
||||
* Note: list_unhashed() on entry does not return true after this,
|
||||
* the entry is in an undefined state. It is useful for RCU based
|
||||
* lockfree traversal.
|
||||
*
|
||||
* In particular, it means that we can not poison the forward
|
||||
* pointers that may still be used for walking the hash list.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry().
|
||||
*/
|
||||
static inline void hlist_del_rcu(struct hlist_node *n)
|
||||
{
|
||||
__hlist_del(n);
|
||||
n->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_replace_rcu - replace old entry by new one
|
||||
* @old : the element to be replaced
|
||||
* @new : the new element to insert
|
||||
*
|
||||
* The @old entry will be replaced with the @new entry atomically.
|
||||
*/
|
||||
static inline void hlist_replace_rcu(struct hlist_node *old,
|
||||
struct hlist_node *new)
|
||||
{
|
||||
struct hlist_node *next = old->next;
|
||||
|
||||
new->next = next;
|
||||
new->pprev = old->pprev;
|
||||
rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
|
||||
if (next)
|
||||
new->next->pprev = &new->next;
|
||||
old->pprev = LIST_POISON2;
|
||||
}
|
||||
|
||||
/*
|
||||
* return the first or the next element in an RCU protected hlist
|
||||
*/
|
||||
#define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first)))
|
||||
#define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next)))
|
||||
#define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev)))
|
||||
|
||||
/**
|
||||
* hlist_add_head_rcu
|
||||
* @n: the element to add to the hash list.
|
||||
* @h: the list to add to.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist,
|
||||
* while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs. Regardless of the type of CPU, the
|
||||
* list-traversal primitive must be guarded by rcu_read_lock().
|
||||
*/
|
||||
static inline void hlist_add_head_rcu(struct hlist_node *n,
|
||||
struct hlist_head *h)
|
||||
{
|
||||
struct hlist_node *first = h->first;
|
||||
|
||||
n->next = first;
|
||||
n->pprev = &h->first;
|
||||
rcu_assign_pointer(hlist_first_rcu(h), n);
|
||||
if (first)
|
||||
first->pprev = &n->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_add_tail_rcu
|
||||
* @n: the element to add to the hash list.
|
||||
* @h: the list to add to.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist,
|
||||
* while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs. Regardless of the type of CPU, the
|
||||
* list-traversal primitive must be guarded by rcu_read_lock().
|
||||
*/
|
||||
static inline void hlist_add_tail_rcu(struct hlist_node *n,
|
||||
struct hlist_head *h)
|
||||
{
|
||||
struct hlist_node *i, *last = NULL;
|
||||
|
||||
for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
|
||||
last = i;
|
||||
|
||||
if (last) {
|
||||
n->next = last->next;
|
||||
n->pprev = &last->next;
|
||||
rcu_assign_pointer(hlist_next_rcu(last), n);
|
||||
} else {
|
||||
hlist_add_head_rcu(n, h);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_add_before_rcu
|
||||
* @n: the new element to add to the hash list.
|
||||
* @next: the existing element to add the new element before.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist
|
||||
* before the specified node while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs.
|
||||
*/
|
||||
static inline void hlist_add_before_rcu(struct hlist_node *n,
|
||||
struct hlist_node *next)
|
||||
{
|
||||
n->pprev = next->pprev;
|
||||
n->next = next;
|
||||
rcu_assign_pointer(hlist_pprev_rcu(n), n);
|
||||
next->pprev = &n->next;
|
||||
}
|
||||
|
||||
/**
|
||||
* hlist_add_behind_rcu
|
||||
* @n: the new element to add to the hash list.
|
||||
* @prev: the existing element to add the new element after.
|
||||
*
|
||||
* Description:
|
||||
* Adds the specified element to the specified hlist
|
||||
* after the specified node while permitting racing traversals.
|
||||
*
|
||||
* The caller must take whatever precautions are necessary
|
||||
* (such as holding appropriate locks) to avoid racing
|
||||
* with another list-mutation primitive, such as hlist_add_head_rcu()
|
||||
* or hlist_del_rcu(), running on this same list.
|
||||
* However, it is perfectly legal to run concurrently with
|
||||
* the _rcu list-traversal primitives, such as
|
||||
* hlist_for_each_entry_rcu(), used to prevent memory-consistency
|
||||
* problems on Alpha CPUs.
|
||||
*/
|
||||
static inline void hlist_add_behind_rcu(struct hlist_node *n,
|
||||
struct hlist_node *prev)
|
||||
{
|
||||
n->next = prev->next;
|
||||
n->pprev = &prev->next;
|
||||
rcu_assign_pointer(hlist_next_rcu(prev), n);
|
||||
if (n->next)
|
||||
n->next->pprev = &n->next;
|
||||
}
|
||||
|
||||
#define __hlist_for_each_rcu(pos, head) \
|
||||
for (pos = rcu_dereference(hlist_first_rcu(head)); \
|
||||
pos; \
|
||||
pos = rcu_dereference(hlist_next_rcu(pos)))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_rcu - iterate over rcu list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu(pos, head, member) \
|
||||
for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_rcu_notrace - iterate over rcu list of given type (for tracing)
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*
|
||||
* This is the same as hlist_for_each_entry_rcu() except that it does
|
||||
* not do any RCU debugging or tracing.
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu_notrace(pos, head, member) \
|
||||
for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_rcu_bh - iterate over rcu list of given type
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @head: the head for your list.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*
|
||||
* This list-traversal primitive may safely run concurrently with
|
||||
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
|
||||
* as long as the traversal is guarded by rcu_read_lock().
|
||||
*/
|
||||
#define hlist_for_each_entry_rcu_bh(pos, head, member) \
|
||||
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_first_rcu(head)),\
|
||||
typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu(\
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_continue_rcu - iterate over a hlist continuing after current point
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_continue_rcu(pos, member) \
|
||||
for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
|
||||
&(pos)->member)), typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_continue_rcu_bh - iterate over a hlist continuing after current point
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_continue_rcu_bh(pos, member) \
|
||||
for (pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
|
||||
&(pos)->member)), typeof(*(pos)), member); \
|
||||
pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_bh(hlist_next_rcu( \
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
/**
|
||||
* hlist_for_each_entry_from_rcu - iterate over a hlist continuing from current point
|
||||
* @pos: the type * to use as a loop cursor.
|
||||
* @member: the name of the hlist_node within the struct.
|
||||
*/
|
||||
#define hlist_for_each_entry_from_rcu(pos, member) \
|
||||
for (; pos; \
|
||||
pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
|
||||
&(pos)->member)), typeof(*(pos)), member))
|
||||
|
||||
#endif
|
||||
|
@ -1,74 +0,0 @@
|
||||
#ifndef _LINUX_REBOOT_H
|
||||
#define _LINUX_REBOOT_H
|
||||
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#define SYS_DOWN 0x0001 /* Notify of system down */
|
||||
#define SYS_RESTART SYS_DOWN
|
||||
#define SYS_HALT 0x0002 /* Notify of system halt */
|
||||
#define SYS_POWER_OFF 0x0003 /* Notify of system power off */
|
||||
|
||||
enum reboot_mode {
|
||||
REBOOT_COLD = 0,
|
||||
REBOOT_WARM,
|
||||
REBOOT_HARD,
|
||||
REBOOT_SOFT,
|
||||
REBOOT_GPIO,
|
||||
};
|
||||
extern enum reboot_mode reboot_mode;
|
||||
|
||||
enum reboot_type {
|
||||
BOOT_TRIPLE = 't',
|
||||
BOOT_KBD = 'k',
|
||||
BOOT_BIOS = 'b',
|
||||
BOOT_ACPI = 'a',
|
||||
BOOT_EFI = 'e',
|
||||
BOOT_CF9_FORCE = 'p',
|
||||
BOOT_CF9_SAFE = 'q',
|
||||
};
|
||||
extern enum reboot_type reboot_type;
|
||||
|
||||
extern int reboot_default;
|
||||
extern int reboot_cpu;
|
||||
extern int reboot_force;
|
||||
|
||||
|
||||
static inline int register_reboot_notifier(struct notifier_block *n) { return 0; }
|
||||
static inline int unregister_reboot_notifier(struct notifier_block *n) { return 0; }
|
||||
|
||||
extern int register_restart_handler(struct notifier_block *);
|
||||
extern int unregister_restart_handler(struct notifier_block *);
|
||||
extern void do_kernel_restart(char *cmd);
|
||||
|
||||
/*
|
||||
* Architecture-specific implementations of sys_reboot commands.
|
||||
*/
|
||||
|
||||
extern void migrate_to_reboot_cpu(void);
|
||||
extern void machine_restart(char *cmd);
|
||||
extern void machine_halt(void);
|
||||
extern void machine_power_off(void);
|
||||
|
||||
extern void machine_shutdown(void);
|
||||
struct pt_regs;
|
||||
extern void machine_crash_shutdown(struct pt_regs *);
|
||||
|
||||
/*
|
||||
* Architecture independent implemenations of sys_reboot commands.
|
||||
*/
|
||||
|
||||
extern void kernel_restart_prepare(char *cmd);
|
||||
extern void kernel_restart(char *cmd);
|
||||
extern void kernel_halt(void);
|
||||
extern void kernel_power_off(void);
|
||||
|
||||
extern int C_A_D; /* for sysctl */
|
||||
void ctrl_alt_del(void);
|
||||
|
||||
#define POWEROFF_CMD_PATH_LEN 256
|
||||
extern char poweroff_cmd[POWEROFF_CMD_PATH_LEN];
|
||||
|
||||
extern void orderly_poweroff(bool force);
|
||||
extern void orderly_reboot(void);
|
||||
|
||||
#endif /* _LINUX_REBOOT_H */
|
@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Intel Corporation
|
||||
* Author: Matthew Wilcox <willy@linux.intel.com>
|
||||
*
|
||||
* Distributed under the terms of the GNU GPL, version 2
|
||||
*
|
||||
* Please see kernel/semaphore.c for documentation of these functions
|
||||
*/
|
||||
#ifndef __LINUX_SEMAPHORE_H
|
||||
#define __LINUX_SEMAPHORE_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
/* Please don't access any members of this structure directly */
|
||||
struct semaphore {
|
||||
raw_spinlock_t lock;
|
||||
unsigned int count;
|
||||
struct list_head wait_list;
|
||||
};
|
||||
|
||||
#define __SEMAPHORE_INITIALIZER(name, n) \
|
||||
{ \
|
||||
.lock = __RAW_SPIN_LOCK_UNLOCKED((name).lock), \
|
||||
.count = n, \
|
||||
.wait_list = LIST_HEAD_INIT((name).wait_list), \
|
||||
}
|
||||
|
||||
#define DEFINE_SEMAPHORE(name) \
|
||||
struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
|
||||
|
||||
static inline void sema_init(struct semaphore *sem, int val)
|
||||
{
|
||||
static struct lock_class_key __key;
|
||||
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val);
|
||||
lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
|
||||
}
|
||||
|
||||
extern void down(struct semaphore *sem);
|
||||
extern int __must_check down_interruptible(struct semaphore *sem);
|
||||
extern int __must_check down_killable(struct semaphore *sem);
|
||||
extern int __must_check down_trylock(struct semaphore *sem);
|
||||
extern int __must_check down_timeout(struct semaphore *sem, long);
|
||||
extern void up(struct semaphore *sem);
|
||||
|
||||
#endif /* __LINUX_SEMAPHORE_H */
|
@ -1,6 +1,9 @@
|
||||
#ifndef __TOOLS_LINUX_SHRINKER_H
|
||||
#define __TOOLS_LINUX_SHRINKER_H
|
||||
|
||||
#include <linux/list.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
struct shrink_control {
|
||||
gfp_t gfp_mask;
|
||||
unsigned long nr_to_scan;
|
||||
|
@ -5,25 +5,6 @@
|
||||
|
||||
typedef __s64 time64_t;
|
||||
|
||||
/*
|
||||
* This wants to go into uapi/linux/time.h once we agreed about the
|
||||
* userspace interfaces.
|
||||
*/
|
||||
#if __BITS_PER_LONG == 64
|
||||
# define timespec64 timespec
|
||||
#else
|
||||
struct timespec64 {
|
||||
time64_t tv_sec; /* seconds */
|
||||
long tv_nsec; /* nanoseconds */
|
||||
};
|
||||
|
||||
struct itimerspec64 {
|
||||
struct timespec64 it_interval;
|
||||
struct timespec64 it_value;
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
/* Parameters used to convert the timespec values: */
|
||||
#define MSEC_PER_SEC 1000L
|
||||
#define USEC_PER_MSEC 1000L
|
||||
@ -33,11 +14,6 @@ struct itimerspec64 {
|
||||
#define NSEC_PER_SEC 1000000000L
|
||||
#define FSEC_PER_SEC 1000000000000000LL
|
||||
|
||||
/* Located here for timespec[64]_valid_strict */
|
||||
#define TIME64_MAX ((s64)~((u64)1 << 63))
|
||||
#define KTIME_MAX ((s64)~((u64)1 << 63))
|
||||
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
|
||||
|
||||
static inline struct timespec ns_to_timespec(const u64 nsec)
|
||||
{
|
||||
return (struct timespec) {
|
||||
@ -51,159 +27,6 @@ static inline s64 timespec_to_ns(const struct timespec *ts)
|
||||
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
|
||||
}
|
||||
|
||||
#if __BITS_PER_LONG == 64
|
||||
|
||||
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
||||
{
|
||||
return ts64;
|
||||
}
|
||||
|
||||
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
||||
{
|
||||
return ts;
|
||||
}
|
||||
|
||||
# define timespec64_equal timespec_equal
|
||||
# define timespec64_compare timespec_compare
|
||||
# define set_normalized_timespec64 set_normalized_timespec
|
||||
# define timespec64_add timespec_add
|
||||
# define timespec64_sub timespec_sub
|
||||
# define timespec64_valid timespec_valid
|
||||
# define timespec64_valid_strict timespec_valid_strict
|
||||
# define timespec64_to_ns timespec_to_ns
|
||||
# define ns_to_timespec64 ns_to_timespec
|
||||
# define timespec64_add_ns timespec_add_ns
|
||||
|
||||
#else
|
||||
|
||||
static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
|
||||
{
|
||||
struct timespec ret;
|
||||
|
||||
ret.tv_sec = (time_t)ts64.tv_sec;
|
||||
ret.tv_nsec = ts64.tv_nsec;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
|
||||
{
|
||||
struct timespec64 ret;
|
||||
|
||||
ret.tv_sec = ts.tv_sec;
|
||||
ret.tv_nsec = ts.tv_nsec;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int timespec64_equal(const struct timespec64 *a,
|
||||
const struct timespec64 *b)
|
||||
{
|
||||
return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
|
||||
}
|
||||
|
||||
/*
|
||||
* lhs < rhs: return <0
|
||||
* lhs == rhs: return 0
|
||||
* lhs > rhs: return >0
|
||||
*/
|
||||
static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
|
||||
{
|
||||
if (lhs->tv_sec < rhs->tv_sec)
|
||||
return -1;
|
||||
if (lhs->tv_sec > rhs->tv_sec)
|
||||
return 1;
|
||||
return lhs->tv_nsec - rhs->tv_nsec;
|
||||
}
|
||||
|
||||
extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
|
||||
|
||||
static inline struct timespec64 timespec64_add(struct timespec64 lhs,
|
||||
struct timespec64 rhs)
|
||||
{
|
||||
struct timespec64 ts_delta;
|
||||
set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
|
||||
lhs.tv_nsec + rhs.tv_nsec);
|
||||
return ts_delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* sub = lhs - rhs, in normalized form
|
||||
*/
|
||||
static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
|
||||
struct timespec64 rhs)
|
||||
{
|
||||
struct timespec64 ts_delta;
|
||||
set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
|
||||
lhs.tv_nsec - rhs.tv_nsec);
|
||||
return ts_delta;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if the timespec64 is norm, false if denorm:
|
||||
*/
|
||||
static inline bool timespec64_valid(const struct timespec64 *ts)
|
||||
{
|
||||
/* Dates before 1970 are bogus */
|
||||
if (ts->tv_sec < 0)
|
||||
return false;
|
||||
/* Can't have more nanoseconds then a second */
|
||||
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool timespec64_valid_strict(const struct timespec64 *ts)
|
||||
{
|
||||
if (!timespec64_valid(ts))
|
||||
return false;
|
||||
/* Disallow values that could overflow ktime_t */
|
||||
if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* timespec64_to_ns - Convert timespec64 to nanoseconds
|
||||
* @ts: pointer to the timespec64 variable to be converted
|
||||
*
|
||||
* Returns the scalar nanosecond representation of the timespec64
|
||||
* parameter.
|
||||
*/
|
||||
static inline s64 timespec64_to_ns(const struct timespec64 *ts)
|
||||
{
|
||||
return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
|
||||
}
|
||||
|
||||
/**
|
||||
* ns_to_timespec64 - Convert nanoseconds to timespec64
|
||||
* @nsec: the nanoseconds value to be converted
|
||||
*
|
||||
* Returns the timespec64 representation of the nsec parameter.
|
||||
*/
|
||||
extern struct timespec64 ns_to_timespec64(const s64 nsec);
|
||||
|
||||
/**
|
||||
* timespec64_add_ns - Adds nanoseconds to a timespec64
|
||||
* @a: pointer to timespec64 to be incremented
|
||||
* @ns: unsigned nanoseconds value to be added
|
||||
*
|
||||
* This must always be inlined because its used from the x86-64 vdso,
|
||||
* which cannot call other kernel functions.
|
||||
*/
|
||||
static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
|
||||
{
|
||||
a->tv_sec += (a->tv_nsec + ns) / NSEC_PER_SEC;
|
||||
a->tv_nsec += (a->tv_nsec + ns) % NSEC_PER_SEC;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* timespec64_add_safe assumes both values are positive and checks for
|
||||
* overflow. It will return TIME64_MAX in case of overflow.
|
||||
*/
|
||||
extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
|
||||
const struct timespec64 rhs);
|
||||
|
||||
static inline struct timespec timespec_trunc(struct timespec t, unsigned gran)
|
||||
{
|
||||
/* Avoid division in the common cases 1 ns and 1 s. */
|
||||
|
@ -72,28 +72,4 @@ typedef __u64 __bitwise __be64;
|
||||
|
||||
typedef u64 sector_t;
|
||||
|
||||
struct list_head {
|
||||
struct list_head *next, *prev;
|
||||
};
|
||||
|
||||
struct hlist_head {
|
||||
struct hlist_node *first;
|
||||
};
|
||||
|
||||
struct hlist_node {
|
||||
struct hlist_node *next, **pprev;
|
||||
};
|
||||
|
||||
struct callback_head {
|
||||
struct callback_head *next;
|
||||
void (*func)(struct callback_head *head);
|
||||
} __attribute__((aligned(sizeof(void *))));
|
||||
|
||||
#if 0
|
||||
#define rcu_head callback_head
|
||||
|
||||
typedef void (*rcu_callback_t)(struct rcu_head *head);
|
||||
typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
|
||||
#endif
|
||||
|
||||
#endif /* _TOOLS_LINUX_TYPES_H_ */
|
||||
|
17
linux/bio.c
17
linux/bio.c
@ -172,23 +172,6 @@ void bio_free_pages(struct bio *bio)
|
||||
__free_page(bvec->bv_page);
|
||||
}
|
||||
|
||||
int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
|
||||
{
|
||||
int i;
|
||||
struct bio_vec *bv;
|
||||
|
||||
bio_for_each_segment_all(bv, bio, i) {
|
||||
bv->bv_page = alloc_page(gfp_mask);
|
||||
if (!bv->bv_page) {
|
||||
while (--bv >= bio->bi_io_vec)
|
||||
__free_page(bv->bv_page);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void bio_advance(struct bio *bio, unsigned bytes)
|
||||
{
|
||||
bio_advance_iter(bio, &bio->bi_iter, bytes);
|
||||
|
@ -20,181 +20,15 @@
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include "internal.h"
|
||||
|
||||
static LIST_HEAD(crypto_alg_list);
|
||||
static DECLARE_RWSEM(crypto_alg_sem);
|
||||
|
||||
static unsigned crypto_ctxsize(struct crypto_alg *alg, u32 type, u32 mask)
|
||||
{
|
||||
return alg->cra_type->ctxsize(alg, type, mask);
|
||||
}
|
||||
|
||||
unsigned crypto_alg_extsize(struct crypto_alg *alg)
|
||||
{
|
||||
return alg->cra_ctxsize;
|
||||
}
|
||||
|
||||
struct crypto_alg *crypto_alg_mod_lookup(const char *name, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
|
||||
down_read(&crypto_alg_sem);
|
||||
list_for_each_entry(alg, &crypto_alg_list, cra_list)
|
||||
if (!((alg->cra_flags ^ type) & mask) &&
|
||||
!strcmp(alg->cra_name, name))
|
||||
goto found;
|
||||
|
||||
alg = ERR_PTR(-ENOENT);
|
||||
found:
|
||||
up_read(&crypto_alg_sem);
|
||||
|
||||
return alg;
|
||||
}
|
||||
|
||||
static void crypto_exit_ops(struct crypto_tfm *tfm)
|
||||
{
|
||||
if (tfm->exit)
|
||||
tfm->exit(tfm);
|
||||
}
|
||||
|
||||
static struct crypto_tfm *__crypto_alloc_tfm(struct crypto_alg *alg,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_tfm *tfm = NULL;
|
||||
unsigned tfm_size;
|
||||
int err = -ENOMEM;
|
||||
|
||||
tfm_size = sizeof(*tfm) + crypto_ctxsize(alg, type, mask);
|
||||
tfm = kzalloc(tfm_size, GFP_KERNEL);
|
||||
if (tfm == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
tfm->__crt_alg = alg;
|
||||
|
||||
err = alg->cra_type->init(tfm, type, mask);
|
||||
if (err)
|
||||
goto out_free_tfm;
|
||||
|
||||
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
|
||||
goto cra_init_failed;
|
||||
|
||||
return tfm;
|
||||
|
||||
cra_init_failed:
|
||||
crypto_exit_ops(tfm);
|
||||
out_free_tfm:
|
||||
kfree(tfm);
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
struct crypto_tfm *crypto_alloc_base(const char *alg_name, u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
struct crypto_tfm *tfm;
|
||||
|
||||
alg = crypto_alg_mod_lookup(alg_name, type, mask);
|
||||
if (IS_ERR(alg)) {
|
||||
fprintf(stderr, "unknown cipher %s\n", alg_name);
|
||||
return ERR_CAST(alg);
|
||||
}
|
||||
|
||||
tfm = __crypto_alloc_tfm(alg, type, mask);
|
||||
if (IS_ERR(tfm))
|
||||
return tfm;
|
||||
|
||||
return tfm;
|
||||
}
|
||||
|
||||
static void *crypto_create_tfm(struct crypto_alg *alg,
|
||||
const struct crypto_type *frontend)
|
||||
{
|
||||
struct crypto_tfm *tfm = NULL;
|
||||
unsigned tfmsize;
|
||||
unsigned total;
|
||||
void *mem;
|
||||
int err = -ENOMEM;
|
||||
|
||||
tfmsize = frontend->tfmsize;
|
||||
total = tfmsize + sizeof(*tfm) + frontend->extsize(alg);
|
||||
|
||||
mem = kzalloc(total, GFP_KERNEL);
|
||||
if (!mem)
|
||||
goto out_err;
|
||||
|
||||
tfm = mem + tfmsize;
|
||||
tfm->__crt_alg = alg;
|
||||
|
||||
err = frontend->init_tfm(tfm);
|
||||
if (err)
|
||||
goto out_free_tfm;
|
||||
|
||||
if (!tfm->exit && alg->cra_init && (err = alg->cra_init(tfm)))
|
||||
goto cra_init_failed;
|
||||
|
||||
goto out;
|
||||
|
||||
cra_init_failed:
|
||||
crypto_exit_ops(tfm);
|
||||
out_free_tfm:
|
||||
kfree(mem);
|
||||
out_err:
|
||||
mem = ERR_PTR(err);
|
||||
out:
|
||||
return mem;
|
||||
}
|
||||
|
||||
static struct crypto_alg *crypto_find_alg(const char *alg_name,
|
||||
const struct crypto_type *frontend,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
if (frontend) {
|
||||
type &= frontend->maskclear;
|
||||
mask &= frontend->maskclear;
|
||||
type |= frontend->type;
|
||||
mask |= frontend->maskset;
|
||||
}
|
||||
|
||||
return crypto_alg_mod_lookup(alg_name, type, mask);
|
||||
}
|
||||
|
||||
void *crypto_alloc_tfm(const char *alg_name,
|
||||
const struct crypto_type *frontend,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
void *tfm;
|
||||
|
||||
alg = crypto_find_alg(alg_name, frontend, type, mask);
|
||||
if (IS_ERR(alg))
|
||||
return ERR_CAST(alg);
|
||||
|
||||
tfm = crypto_create_tfm(alg, frontend);
|
||||
if (IS_ERR(tfm))
|
||||
return tfm;
|
||||
|
||||
return tfm;
|
||||
}
|
||||
|
||||
void crypto_destroy_tfm(void *mem, struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
|
||||
if (unlikely(!mem))
|
||||
return;
|
||||
|
||||
alg = tfm->__crt_alg;
|
||||
|
||||
if (!tfm->exit && alg->cra_exit)
|
||||
alg->cra_exit(tfm);
|
||||
crypto_exit_ops(tfm);
|
||||
kzfree(mem);
|
||||
}
|
||||
struct crypto_type {
|
||||
};
|
||||
|
||||
int crypto_register_alg(struct crypto_alg *alg)
|
||||
{
|
||||
INIT_LIST_HEAD(&alg->cra_users);
|
||||
|
||||
down_write(&crypto_alg_sem);
|
||||
list_add(&alg->cra_list, &crypto_alg_list);
|
||||
up_write(&crypto_alg_sem);
|
||||
@ -202,35 +36,79 @@ int crypto_register_alg(struct crypto_alg *alg)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *crypto_alloc_tfm(const char *name,
|
||||
const struct crypto_type *type)
|
||||
{
|
||||
struct crypto_alg *alg;
|
||||
|
||||
down_read(&crypto_alg_sem);
|
||||
list_for_each_entry(alg, &crypto_alg_list, cra_list)
|
||||
if (alg->cra_type == type && !strcmp(alg->cra_name, name))
|
||||
goto found;
|
||||
|
||||
alg = ERR_PTR(-ENOENT);
|
||||
found:
|
||||
up_read(&crypto_alg_sem);
|
||||
|
||||
if (IS_ERR(alg))
|
||||
return ERR_CAST(alg);
|
||||
|
||||
return alg->alloc_tfm() ?: ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* skcipher: */
|
||||
|
||||
static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
|
||||
struct skcipher_alg *alg = crypto_skcipher_alg(skcipher);
|
||||
|
||||
skcipher->setkey = alg->setkey;
|
||||
skcipher->encrypt = alg->encrypt;
|
||||
skcipher->decrypt = alg->decrypt;
|
||||
skcipher->ivsize = alg->ivsize;
|
||||
skcipher->keysize = alg->max_keysize;
|
||||
|
||||
if (alg->init)
|
||||
return alg->init(skcipher);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct crypto_type crypto_skcipher_type2 = {
|
||||
.extsize = crypto_alg_extsize,
|
||||
.init_tfm = crypto_skcipher_init_tfm,
|
||||
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
||||
.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
|
||||
.tfmsize = offsetof(struct crypto_skcipher, base),
|
||||
};
|
||||
|
||||
struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
|
||||
struct crypto_skcipher *crypto_alloc_skcipher(const char *name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
|
||||
return crypto_alloc_tfm(name, &crypto_skcipher_type2);
|
||||
}
|
||||
|
||||
int crypto_register_skcipher(struct skcipher_alg *alg)
|
||||
{
|
||||
alg->base.cra_type = &crypto_skcipher_type2;
|
||||
|
||||
return crypto_register_alg(&alg->base);
|
||||
}
|
||||
|
||||
/* shash: */
|
||||
|
||||
#include <crypto/hash.h>
|
||||
|
||||
static int shash_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned len, u8 *out)
|
||||
{
|
||||
return crypto_shash_update(desc, data, len) ?:
|
||||
crypto_shash_final(desc, out);
|
||||
}
|
||||
|
||||
static int shash_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned len, u8 *out)
|
||||
{
|
||||
return crypto_shash_init(desc) ?:
|
||||
crypto_shash_finup(desc, data, len, out);
|
||||
}
|
||||
|
||||
static const struct crypto_type crypto_shash_type = {
|
||||
};
|
||||
|
||||
struct crypto_shash *crypto_alloc_shash(const char *name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_tfm(name, &crypto_shash_type);
|
||||
}
|
||||
|
||||
int crypto_register_shash(struct shash_alg *alg)
|
||||
{
|
||||
alg->base.cra_type = &crypto_shash_type;
|
||||
|
||||
if (!alg->finup)
|
||||
alg->finup = shash_finup;
|
||||
if (!alg->digest)
|
||||
alg->digest = shash_digest;
|
||||
|
||||
return crypto_register_alg(&alg->base);
|
||||
}
|
||||
|
@ -1,47 +0,0 @@
|
||||
/*
|
||||
* Block chaining cipher operations.
|
||||
*
|
||||
* Generic encrypt/decrypt wrapper for ciphers, handles operations across
|
||||
* multiple page boundaries by using temporary blocks. In user context,
|
||||
* the kernel is given a chance to schedule us once per page.
|
||||
*
|
||||
* Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/string.h>
|
||||
|
||||
#include <crypto/algapi.h>
|
||||
#include "internal.h"
|
||||
|
||||
static unsigned crypto_blkcipher_ctxsize(struct crypto_alg *alg,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
return alg->cra_ctxsize;
|
||||
}
|
||||
|
||||
static int crypto_init_blkcipher_ops(struct crypto_tfm *tfm, u32 type, u32 mask)
|
||||
{
|
||||
struct blkcipher_tfm *crt = &tfm->crt_blkcipher;
|
||||
struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;
|
||||
|
||||
BUG_ON((mask & CRYPTO_ALG_TYPE_MASK) != CRYPTO_ALG_TYPE_MASK);
|
||||
|
||||
crt->setkey = alg->setkey;
|
||||
crt->encrypt = alg->encrypt;
|
||||
crt->decrypt = alg->decrypt;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct crypto_type crypto_blkcipher_type = {
|
||||
.ctxsize = crypto_blkcipher_ctxsize,
|
||||
.init = crypto_init_blkcipher_ops,
|
||||
};
|
@ -22,14 +22,18 @@
|
||||
|
||||
#include <sodium/crypto_stream_chacha20.h>
|
||||
|
||||
struct chacha20_ctx {
|
||||
u32 key[8];
|
||||
static struct skcipher_alg alg;
|
||||
|
||||
struct chacha20_tfm {
|
||||
struct crypto_skcipher tfm;
|
||||
u32 key[8];
|
||||
};
|
||||
|
||||
static int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
unsigned int keysize)
|
||||
{
|
||||
struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct chacha20_tfm *ctx =
|
||||
container_of(tfm, struct chacha20_tfm, tfm);
|
||||
int i;
|
||||
|
||||
if (keysize != CHACHA20_KEY_SIZE)
|
||||
@ -43,8 +47,8 @@ static int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
|
||||
|
||||
static int crypto_chacha20_crypt(struct skcipher_request *req)
|
||||
{
|
||||
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
|
||||
struct chacha20_ctx *ctx = crypto_skcipher_ctx(tfm);
|
||||
struct chacha20_tfm *ctx =
|
||||
container_of(req->tfm, struct chacha20_tfm, tfm.base);
|
||||
struct scatterlist *sg = req->src;
|
||||
unsigned nbytes = req->cryptlen;
|
||||
u32 iv[4];
|
||||
@ -78,21 +82,30 @@ static int crypto_chacha20_crypt(struct skcipher_request *req)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *crypto_chacha20_alloc_tfm(void)
|
||||
{
|
||||
struct chacha20_tfm *tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
|
||||
|
||||
if (!tfm)
|
||||
return NULL;
|
||||
|
||||
tfm->tfm.base.alg = &alg.base;
|
||||
tfm->tfm.setkey = crypto_chacha20_setkey;
|
||||
tfm->tfm.encrypt = crypto_chacha20_crypt;
|
||||
tfm->tfm.decrypt = crypto_chacha20_crypt;
|
||||
tfm->tfm.ivsize = CHACHA20_IV_SIZE;
|
||||
tfm->tfm.keysize = CHACHA20_KEY_SIZE;
|
||||
|
||||
return tfm;
|
||||
}
|
||||
|
||||
static struct skcipher_alg alg = {
|
||||
.base.cra_name = "chacha20",
|
||||
.base.cra_ctxsize = sizeof(struct chacha20_ctx),
|
||||
|
||||
.min_keysize = CHACHA20_KEY_SIZE,
|
||||
.max_keysize = CHACHA20_KEY_SIZE,
|
||||
.ivsize = CHACHA20_IV_SIZE,
|
||||
.chunksize = CHACHA20_BLOCK_SIZE,
|
||||
.setkey = crypto_chacha20_setkey,
|
||||
.encrypt = crypto_chacha20_crypt,
|
||||
.decrypt = crypto_chacha20_crypt,
|
||||
.base.alloc_tfm = crypto_chacha20_alloc_tfm,
|
||||
};
|
||||
|
||||
__attribute__((constructor(110)))
|
||||
static int chacha20_generic_mod_init(void)
|
||||
{
|
||||
return crypto_register_alg(&alg.base);
|
||||
return crypto_register_skcipher(&alg);
|
||||
}
|
||||
|
@ -1,23 +0,0 @@
|
||||
/*
|
||||
* Cryptographic API.
|
||||
*
|
||||
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
|
||||
* Copyright (c) 2005 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
#ifndef _CRYPTO_INTERNAL_H
|
||||
#define _CRYPTO_INTERNAL_H
|
||||
|
||||
struct crypto_type;
|
||||
struct crypto_alg;
|
||||
|
||||
void *crypto_alloc_tfm(const char *, const struct crypto_type *, u32, u32);
|
||||
unsigned int crypto_alg_extsize(struct crypto_alg *);
|
||||
|
||||
#endif /* _CRYPTO_INTERNAL_H */
|
||||
|
@ -18,18 +18,19 @@
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/algapi.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/hash.h>
|
||||
#include <crypto/poly1305.h>
|
||||
|
||||
static struct shash_alg poly1305_alg;
|
||||
|
||||
struct poly1305_desc_ctx {
|
||||
bool key_done;
|
||||
crypto_onetimeauth_poly1305_state s;
|
||||
};
|
||||
|
||||
|
||||
static int poly1305_init(struct shash_desc *desc)
|
||||
{
|
||||
struct poly1305_desc_ctx *state = shash_desc_ctx(desc);
|
||||
struct poly1305_desc_ctx *state = (void *) desc->ctx;
|
||||
|
||||
state->key_done = false;
|
||||
return 0;
|
||||
@ -38,7 +39,7 @@ static int poly1305_init(struct shash_desc *desc)
|
||||
static int poly1305_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned len)
|
||||
{
|
||||
struct poly1305_desc_ctx *state = shash_desc_ctx(desc);
|
||||
struct poly1305_desc_ctx *state = (void *) desc->ctx;
|
||||
|
||||
if (!state->key_done) {
|
||||
BUG_ON(len != crypto_onetimeauth_poly1305_KEYBYTES);
|
||||
@ -52,21 +53,32 @@ static int poly1305_update(struct shash_desc *desc,
|
||||
|
||||
static int poly1305_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct poly1305_desc_ctx *state = shash_desc_ctx(desc);
|
||||
struct poly1305_desc_ctx *state = (void *) desc->ctx;
|
||||
|
||||
return crypto_onetimeauth_poly1305_final(&state->s, out);
|
||||
}
|
||||
|
||||
static void *poly1305_alloc_tfm(void)
|
||||
{
|
||||
struct crypto_shash *tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
|
||||
|
||||
if (!tfm)
|
||||
return NULL;
|
||||
|
||||
tfm->base.alg = &poly1305_alg.base;
|
||||
tfm->descsize = sizeof(struct poly1305_desc_ctx);
|
||||
return tfm;
|
||||
}
|
||||
|
||||
static struct shash_alg poly1305_alg = {
|
||||
.digestsize = crypto_onetimeauth_poly1305_BYTES,
|
||||
.init = poly1305_init,
|
||||
.update = poly1305_update,
|
||||
.final = poly1305_final,
|
||||
.descsize = sizeof(struct poly1305_desc_ctx),
|
||||
.base = {
|
||||
.cra_name = "poly1305",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
},
|
||||
|
||||
.base.cra_name = "poly1305",
|
||||
.base.alloc_tfm = poly1305_alloc_tfm,
|
||||
};
|
||||
|
||||
__attribute__((constructor(110)))
|
||||
|
@ -24,13 +24,15 @@
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/hash.h>
|
||||
|
||||
#include <sodium/crypto_hash_sha256.h>
|
||||
|
||||
static struct shash_alg sha256_alg;
|
||||
|
||||
static int sha256_init(struct shash_desc *desc)
|
||||
{
|
||||
crypto_hash_sha256_state *state = shash_desc_ctx(desc);
|
||||
crypto_hash_sha256_state *state = (void *) desc->ctx;
|
||||
|
||||
return crypto_hash_sha256_init(state);
|
||||
}
|
||||
@ -38,28 +40,38 @@ static int sha256_init(struct shash_desc *desc)
|
||||
static int sha256_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
crypto_hash_sha256_state *state = shash_desc_ctx(desc);
|
||||
crypto_hash_sha256_state *state = (void *) desc->ctx;
|
||||
|
||||
return crypto_hash_sha256_update(state, data, len);
|
||||
}
|
||||
|
||||
static int sha256_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
crypto_hash_sha256_state *state = shash_desc_ctx(desc);
|
||||
crypto_hash_sha256_state *state = (void *) desc->ctx;
|
||||
|
||||
return crypto_hash_sha256_final(state, out);
|
||||
}
|
||||
|
||||
static void *sha256_alloc_tfm(void)
|
||||
{
|
||||
struct crypto_shash *tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
|
||||
|
||||
if (!tfm)
|
||||
return NULL;
|
||||
|
||||
tfm->base.alg = &sha256_alg.base;
|
||||
tfm->descsize = sizeof(crypto_hash_sha256_state);
|
||||
return tfm;
|
||||
}
|
||||
|
||||
static struct shash_alg sha256_alg = {
|
||||
.digestsize = crypto_hash_sha256_BYTES,
|
||||
.init = sha256_init,
|
||||
.update = sha256_update,
|
||||
.final = sha256_final,
|
||||
.descsize = sizeof(crypto_hash_sha256_state),
|
||||
.base = {
|
||||
.cra_name = "sha256",
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
}
|
||||
.base.cra_name = "sha256",
|
||||
.base.alloc_tfm = sha256_alloc_tfm,
|
||||
};
|
||||
|
||||
__attribute__((constructor(110)))
|
||||
|
@ -1,72 +0,0 @@
|
||||
/*
|
||||
* Synchronous Cryptographic Hash operations.
|
||||
*
|
||||
* Copyright (c) 2008 Herbert Xu <herbert@gondor.apana.org.au>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/printk.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
static int shash_finup(struct shash_desc *desc, const u8 *data,
|
||||
unsigned len, u8 *out)
|
||||
{
|
||||
return crypto_shash_update(desc, data, len) ?:
|
||||
crypto_shash_final(desc, out);
|
||||
}
|
||||
|
||||
static int shash_digest(struct shash_desc *desc, const u8 *data,
|
||||
unsigned len, u8 *out)
|
||||
{
|
||||
return crypto_shash_init(desc) ?:
|
||||
crypto_shash_finup(desc, data, len, out);
|
||||
}
|
||||
|
||||
static int crypto_shash_init_tfm(struct crypto_tfm *tfm)
|
||||
{
|
||||
struct crypto_shash *hash = __crypto_shash_cast(tfm);
|
||||
|
||||
hash->descsize = crypto_shash_alg(hash)->descsize;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct crypto_type crypto_shash_type = {
|
||||
.extsize = crypto_alg_extsize,
|
||||
.init_tfm = crypto_shash_init_tfm,
|
||||
.maskclear = ~CRYPTO_ALG_TYPE_MASK,
|
||||
.maskset = CRYPTO_ALG_TYPE_MASK,
|
||||
.type = CRYPTO_ALG_TYPE_SHASH,
|
||||
.tfmsize = offsetof(struct crypto_shash, base),
|
||||
};
|
||||
|
||||
struct crypto_shash *crypto_alloc_shash(const char *alg_name,
|
||||
u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_tfm(alg_name, &crypto_shash_type, type, mask);
|
||||
}
|
||||
|
||||
int crypto_register_shash(struct shash_alg *alg)
|
||||
{
|
||||
struct crypto_alg *base = &alg->base;
|
||||
|
||||
base->cra_type = &crypto_shash_type;
|
||||
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
|
||||
base->cra_flags |= CRYPTO_ALG_TYPE_SHASH;
|
||||
|
||||
if (!alg->finup)
|
||||
alg->finup = shash_finup;
|
||||
if (!alg->digest)
|
||||
alg->digest = shash_digest;
|
||||
|
||||
return crypto_register_alg(base);
|
||||
}
|
615
linux/rbtree.c
615
linux/rbtree.c
@ -1,615 +0,0 @@
|
||||
/*
|
||||
Red Black Trees
|
||||
(C) 1999 Andrea Arcangeli <andrea@suse.de>
|
||||
(C) 2002 David Woodhouse <dwmw2@infradead.org>
|
||||
(C) 2012 Michel Lespinasse <walken@google.com>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
|
||||
linux/lib/rbtree.c
|
||||
*/
|
||||
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/rbtree_augmented.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
/*
|
||||
* red-black trees properties: http://en.wikipedia.org/wiki/Rbtree
|
||||
*
|
||||
* 1) A node is either red or black
|
||||
* 2) The root is black
|
||||
* 3) All leaves (NULL) are black
|
||||
* 4) Both children of every red node are black
|
||||
* 5) Every simple path from root to leaves contains the same number
|
||||
* of black nodes.
|
||||
*
|
||||
* 4 and 5 give the O(log n) guarantee, since 4 implies you cannot have two
|
||||
* consecutive red nodes in a path and every red node is therefore followed by
|
||||
* a black. So if B is the number of black nodes on every simple path (as per
|
||||
* 5), then the longest possible path due to 4 is 2B.
|
||||
*
|
||||
* We shall indicate color with case, where black nodes are uppercase and red
|
||||
* nodes will be lowercase. Unknown color nodes shall be drawn as red within
|
||||
* parentheses and have some accompanying text comment.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Notes on lockless lookups:
|
||||
*
|
||||
* All stores to the tree structure (rb_left and rb_right) must be done using
|
||||
* WRITE_ONCE(). And we must not inadvertently cause (temporary) loops in the
|
||||
* tree structure as seen in program order.
|
||||
*
|
||||
* These two requirements will allow lockless iteration of the tree -- not
|
||||
* correct iteration mind you, tree rotations are not atomic so a lookup might
|
||||
* miss entire subtrees.
|
||||
*
|
||||
* But they do guarantee that any such traversal will only see valid elements
|
||||
* and that it will indeed complete -- does not get stuck in a loop.
|
||||
*
|
||||
* It also guarantees that if the lookup returns an element it is the 'correct'
|
||||
* one. But not returning an element does _NOT_ mean it's not present.
|
||||
*
|
||||
* NOTE:
|
||||
*
|
||||
* Stores to __rb_parent_color are not important for simple lookups so those
|
||||
* are left undone as of now. Nor did I check for loops involving parent
|
||||
* pointers.
|
||||
*/
|
||||
|
||||
static inline void rb_set_black(struct rb_node *rb)
|
||||
{
|
||||
rb->__rb_parent_color |= RB_BLACK;
|
||||
}
|
||||
|
||||
static inline struct rb_node *rb_red_parent(struct rb_node *red)
|
||||
{
|
||||
return (struct rb_node *)red->__rb_parent_color;
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for rotations:
|
||||
* - old's parent and color get assigned to new
|
||||
* - old gets assigned new as a parent and 'color' as a color.
|
||||
*/
|
||||
static inline void
|
||||
__rb_rotate_set_parents(struct rb_node *old, struct rb_node *new,
|
||||
struct rb_root *root, int color)
|
||||
{
|
||||
struct rb_node *parent = rb_parent(old);
|
||||
new->__rb_parent_color = old->__rb_parent_color;
|
||||
rb_set_parent_color(old, new, color);
|
||||
__rb_change_child(old, new, parent, root);
|
||||
}
|
||||
|
||||
static __always_inline void
|
||||
__rb_insert(struct rb_node *node, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||
{
|
||||
struct rb_node *parent = rb_red_parent(node), *gparent, *tmp;
|
||||
|
||||
while (true) {
|
||||
/*
|
||||
* Loop invariant: node is red
|
||||
*
|
||||
* If there is a black parent, we are done.
|
||||
* Otherwise, take some corrective action as we don't
|
||||
* want a red root or two consecutive red nodes.
|
||||
*/
|
||||
if (!parent) {
|
||||
rb_set_parent_color(node, NULL, RB_BLACK);
|
||||
break;
|
||||
} else if (rb_is_black(parent))
|
||||
break;
|
||||
|
||||
gparent = rb_red_parent(parent);
|
||||
|
||||
tmp = gparent->rb_right;
|
||||
if (parent != tmp) { /* parent == gparent->rb_left */
|
||||
if (tmp && rb_is_red(tmp)) {
|
||||
/*
|
||||
* Case 1 - color flips
|
||||
*
|
||||
* G g
|
||||
* / \ / \
|
||||
* p u --> P U
|
||||
* / /
|
||||
* n n
|
||||
*
|
||||
* However, since g's parent might be red, and
|
||||
* 4) does not allow this, we need to recurse
|
||||
* at g.
|
||||
*/
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
rb_set_parent_color(parent, gparent, RB_BLACK);
|
||||
node = gparent;
|
||||
parent = rb_parent(node);
|
||||
rb_set_parent_color(node, parent, RB_RED);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = parent->rb_right;
|
||||
if (node == tmp) {
|
||||
/*
|
||||
* Case 2 - left rotate at parent
|
||||
*
|
||||
* G G
|
||||
* / \ / \
|
||||
* p U --> n U
|
||||
* \ /
|
||||
* n p
|
||||
*
|
||||
* This still leaves us in violation of 4), the
|
||||
* continuation into Case 3 will fix that.
|
||||
*/
|
||||
tmp = node->rb_left;
|
||||
WRITE_ONCE(parent->rb_right, tmp);
|
||||
WRITE_ONCE(node->rb_left, parent);
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, parent,
|
||||
RB_BLACK);
|
||||
rb_set_parent_color(parent, node, RB_RED);
|
||||
augment_rotate(parent, node);
|
||||
parent = node;
|
||||
tmp = node->rb_right;
|
||||
}
|
||||
|
||||
/*
|
||||
* Case 3 - right rotate at gparent
|
||||
*
|
||||
* G P
|
||||
* / \ / \
|
||||
* p U --> n g
|
||||
* / \
|
||||
* n U
|
||||
*/
|
||||
WRITE_ONCE(gparent->rb_left, tmp); /* == parent->rb_right */
|
||||
WRITE_ONCE(parent->rb_right, gparent);
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
||||
augment_rotate(gparent, parent);
|
||||
break;
|
||||
} else {
|
||||
tmp = gparent->rb_left;
|
||||
if (tmp && rb_is_red(tmp)) {
|
||||
/* Case 1 - color flips */
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
rb_set_parent_color(parent, gparent, RB_BLACK);
|
||||
node = gparent;
|
||||
parent = rb_parent(node);
|
||||
rb_set_parent_color(node, parent, RB_RED);
|
||||
continue;
|
||||
}
|
||||
|
||||
tmp = parent->rb_left;
|
||||
if (node == tmp) {
|
||||
/* Case 2 - right rotate at parent */
|
||||
tmp = node->rb_right;
|
||||
WRITE_ONCE(parent->rb_left, tmp);
|
||||
WRITE_ONCE(node->rb_right, parent);
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, parent,
|
||||
RB_BLACK);
|
||||
rb_set_parent_color(parent, node, RB_RED);
|
||||
augment_rotate(parent, node);
|
||||
parent = node;
|
||||
tmp = node->rb_left;
|
||||
}
|
||||
|
||||
/* Case 3 - left rotate at gparent */
|
||||
WRITE_ONCE(gparent->rb_right, tmp); /* == parent->rb_left */
|
||||
WRITE_ONCE(parent->rb_left, gparent);
|
||||
if (tmp)
|
||||
rb_set_parent_color(tmp, gparent, RB_BLACK);
|
||||
__rb_rotate_set_parents(gparent, parent, root, RB_RED);
|
||||
augment_rotate(gparent, parent);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Inline version for rb_erase() use - we want to be able to inline
|
||||
* and eliminate the dummy_rotate callback there
|
||||
*/
|
||||
static __always_inline void
|
||||
____rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||
{
|
||||
struct rb_node *node = NULL, *sibling, *tmp1, *tmp2;
|
||||
|
||||
while (true) {
|
||||
/*
|
||||
* Loop invariants:
|
||||
* - node is black (or NULL on first iteration)
|
||||
* - node is not the root (parent is not NULL)
|
||||
* - All leaf paths going through parent and node have a
|
||||
* black node count that is 1 lower than other leaf paths.
|
||||
*/
|
||||
sibling = parent->rb_right;
|
||||
if (node != sibling) { /* node == parent->rb_left */
|
||||
if (rb_is_red(sibling)) {
|
||||
/*
|
||||
* Case 1 - left rotate at parent
|
||||
*
|
||||
* P S
|
||||
* / \ / \
|
||||
* N s --> p Sr
|
||||
* / \ / \
|
||||
* Sl Sr N Sl
|
||||
*/
|
||||
tmp1 = sibling->rb_left;
|
||||
WRITE_ONCE(parent->rb_right, tmp1);
|
||||
WRITE_ONCE(sibling->rb_left, parent);
|
||||
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_RED);
|
||||
augment_rotate(parent, sibling);
|
||||
sibling = tmp1;
|
||||
}
|
||||
tmp1 = sibling->rb_right;
|
||||
if (!tmp1 || rb_is_black(tmp1)) {
|
||||
tmp2 = sibling->rb_left;
|
||||
if (!tmp2 || rb_is_black(tmp2)) {
|
||||
/*
|
||||
* Case 2 - sibling color flip
|
||||
* (p could be either color here)
|
||||
*
|
||||
* (p) (p)
|
||||
* / \ / \
|
||||
* N S --> N s
|
||||
* / \ / \
|
||||
* Sl Sr Sl Sr
|
||||
*
|
||||
* This leaves us violating 5) which
|
||||
* can be fixed by flipping p to black
|
||||
* if it was red, or by recursing at p.
|
||||
* p is red when coming from Case 1.
|
||||
*/
|
||||
rb_set_parent_color(sibling, parent,
|
||||
RB_RED);
|
||||
if (rb_is_red(parent))
|
||||
rb_set_black(parent);
|
||||
else {
|
||||
node = parent;
|
||||
parent = rb_parent(node);
|
||||
if (parent)
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* Case 3 - right rotate at sibling
|
||||
* (p could be either color here)
|
||||
*
|
||||
* (p) (p)
|
||||
* / \ / \
|
||||
* N S --> N Sl
|
||||
* / \ \
|
||||
* sl Sr s
|
||||
* \
|
||||
* Sr
|
||||
*/
|
||||
tmp1 = tmp2->rb_right;
|
||||
WRITE_ONCE(sibling->rb_left, tmp1);
|
||||
WRITE_ONCE(tmp2->rb_right, sibling);
|
||||
WRITE_ONCE(parent->rb_right, tmp2);
|
||||
if (tmp1)
|
||||
rb_set_parent_color(tmp1, sibling,
|
||||
RB_BLACK);
|
||||
augment_rotate(sibling, tmp2);
|
||||
tmp1 = sibling;
|
||||
sibling = tmp2;
|
||||
}
|
||||
/*
|
||||
* Case 4 - left rotate at parent + color flips
|
||||
* (p and sl could be either color here.
|
||||
* After rotation, p becomes black, s acquires
|
||||
* p's color, and sl keeps its color)
|
||||
*
|
||||
* (p) (s)
|
||||
* / \ / \
|
||||
* N S --> P Sr
|
||||
* / \ / \
|
||||
* (sl) sr N (sl)
|
||||
*/
|
||||
tmp2 = sibling->rb_left;
|
||||
WRITE_ONCE(parent->rb_right, tmp2);
|
||||
WRITE_ONCE(sibling->rb_left, parent);
|
||||
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
||||
if (tmp2)
|
||||
rb_set_parent(tmp2, parent);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_BLACK);
|
||||
augment_rotate(parent, sibling);
|
||||
break;
|
||||
} else {
|
||||
sibling = parent->rb_left;
|
||||
if (rb_is_red(sibling)) {
|
||||
/* Case 1 - right rotate at parent */
|
||||
tmp1 = sibling->rb_right;
|
||||
WRITE_ONCE(parent->rb_left, tmp1);
|
||||
WRITE_ONCE(sibling->rb_right, parent);
|
||||
rb_set_parent_color(tmp1, parent, RB_BLACK);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_RED);
|
||||
augment_rotate(parent, sibling);
|
||||
sibling = tmp1;
|
||||
}
|
||||
tmp1 = sibling->rb_left;
|
||||
if (!tmp1 || rb_is_black(tmp1)) {
|
||||
tmp2 = sibling->rb_right;
|
||||
if (!tmp2 || rb_is_black(tmp2)) {
|
||||
/* Case 2 - sibling color flip */
|
||||
rb_set_parent_color(sibling, parent,
|
||||
RB_RED);
|
||||
if (rb_is_red(parent))
|
||||
rb_set_black(parent);
|
||||
else {
|
||||
node = parent;
|
||||
parent = rb_parent(node);
|
||||
if (parent)
|
||||
continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
/* Case 3 - right rotate at sibling */
|
||||
tmp1 = tmp2->rb_left;
|
||||
WRITE_ONCE(sibling->rb_right, tmp1);
|
||||
WRITE_ONCE(tmp2->rb_left, sibling);
|
||||
WRITE_ONCE(parent->rb_left, tmp2);
|
||||
if (tmp1)
|
||||
rb_set_parent_color(tmp1, sibling,
|
||||
RB_BLACK);
|
||||
augment_rotate(sibling, tmp2);
|
||||
tmp1 = sibling;
|
||||
sibling = tmp2;
|
||||
}
|
||||
/* Case 4 - left rotate at parent + color flips */
|
||||
tmp2 = sibling->rb_right;
|
||||
WRITE_ONCE(parent->rb_left, tmp2);
|
||||
WRITE_ONCE(sibling->rb_right, parent);
|
||||
rb_set_parent_color(tmp1, sibling, RB_BLACK);
|
||||
if (tmp2)
|
||||
rb_set_parent(tmp2, parent);
|
||||
__rb_rotate_set_parents(parent, sibling, root,
|
||||
RB_BLACK);
|
||||
augment_rotate(parent, sibling);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Non-inline version for rb_erase_augmented() use */
|
||||
void __rb_erase_color(struct rb_node *parent, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||
{
|
||||
____rb_erase_color(parent, root, augment_rotate);
|
||||
}
|
||||
EXPORT_SYMBOL(__rb_erase_color);
|
||||
|
||||
/*
|
||||
* Non-augmented rbtree manipulation functions.
|
||||
*
|
||||
* We use dummy augmented callbacks here, and have the compiler optimize them
|
||||
* out of the rb_insert_color() and rb_erase() function definitions.
|
||||
*/
|
||||
|
||||
static inline void dummy_propagate(struct rb_node *node, struct rb_node *stop) {}
|
||||
static inline void dummy_copy(struct rb_node *old, struct rb_node *new) {}
|
||||
static inline void dummy_rotate(struct rb_node *old, struct rb_node *new) {}
|
||||
|
||||
static const struct rb_augment_callbacks dummy_callbacks = {
|
||||
dummy_propagate, dummy_copy, dummy_rotate
|
||||
};
|
||||
|
||||
void rb_insert_color(struct rb_node *node, struct rb_root *root)
|
||||
{
|
||||
__rb_insert(node, root, dummy_rotate);
|
||||
}
|
||||
EXPORT_SYMBOL(rb_insert_color);
|
||||
|
||||
void rb_erase(struct rb_node *node, struct rb_root *root)
|
||||
{
|
||||
struct rb_node *rebalance;
|
||||
rebalance = __rb_erase_augmented(node, root, &dummy_callbacks);
|
||||
if (rebalance)
|
||||
____rb_erase_color(rebalance, root, dummy_rotate);
|
||||
}
|
||||
EXPORT_SYMBOL(rb_erase);
|
||||
|
||||
/*
|
||||
* Augmented rbtree manipulation functions.
|
||||
*
|
||||
* This instantiates the same __always_inline functions as in the non-augmented
|
||||
* case, but this time with user-defined callbacks.
|
||||
*/
|
||||
|
||||
void __rb_insert_augmented(struct rb_node *node, struct rb_root *root,
|
||||
void (*augment_rotate)(struct rb_node *old, struct rb_node *new))
|
||||
{
|
||||
__rb_insert(node, root, augment_rotate);
|
||||
}
|
||||
EXPORT_SYMBOL(__rb_insert_augmented);
|
||||
|
||||
/*
|
||||
* This function returns the first node (in sort order) of the tree.
|
||||
*/
|
||||
struct rb_node *rb_first(const struct rb_root *root)
|
||||
{
|
||||
struct rb_node *n;
|
||||
|
||||
n = root->rb_node;
|
||||
if (!n)
|
||||
return NULL;
|
||||
while (n->rb_left)
|
||||
n = n->rb_left;
|
||||
return n;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_first);
|
||||
|
||||
struct rb_node *rb_last(const struct rb_root *root)
|
||||
{
|
||||
struct rb_node *n;
|
||||
|
||||
n = root->rb_node;
|
||||
if (!n)
|
||||
return NULL;
|
||||
while (n->rb_right)
|
||||
n = n->rb_right;
|
||||
return n;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_last);
|
||||
|
||||
struct rb_node *rb_next(const struct rb_node *node)
|
||||
{
|
||||
struct rb_node *parent;
|
||||
|
||||
if (RB_EMPTY_NODE(node))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If we have a right-hand child, go down and then left as far
|
||||
* as we can.
|
||||
*/
|
||||
if (node->rb_right) {
|
||||
node = node->rb_right;
|
||||
while (node->rb_left)
|
||||
node=node->rb_left;
|
||||
return (struct rb_node *)node;
|
||||
}
|
||||
|
||||
/*
|
||||
* No right-hand children. Everything down and left is smaller than us,
|
||||
* so any 'next' node must be in the general direction of our parent.
|
||||
* Go up the tree; any time the ancestor is a right-hand child of its
|
||||
* parent, keep going up. First time it's a left-hand child of its
|
||||
* parent, said parent is our 'next' node.
|
||||
*/
|
||||
while ((parent = rb_parent(node)) && node == parent->rb_right)
|
||||
node = parent;
|
||||
|
||||
return parent;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_next);
|
||||
|
||||
struct rb_node *rb_prev(const struct rb_node *node)
|
||||
{
|
||||
struct rb_node *parent;
|
||||
|
||||
if (RB_EMPTY_NODE(node))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* If we have a left-hand child, go down and then right as far
|
||||
* as we can.
|
||||
*/
|
||||
if (node->rb_left) {
|
||||
node = node->rb_left;
|
||||
while (node->rb_right)
|
||||
node=node->rb_right;
|
||||
return (struct rb_node *)node;
|
||||
}
|
||||
|
||||
/*
|
||||
* No left-hand children. Go up till we find an ancestor which
|
||||
* is a right-hand child of its parent.
|
||||
*/
|
||||
while ((parent = rb_parent(node)) && node == parent->rb_left)
|
||||
node = parent;
|
||||
|
||||
return parent;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_prev);
|
||||
|
||||
void rb_replace_node(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root)
|
||||
{
|
||||
struct rb_node *parent = rb_parent(victim);
|
||||
|
||||
/* Copy the pointers/colour from the victim to the replacement */
|
||||
*new = *victim;
|
||||
|
||||
/* Set the surrounding nodes to point to the replacement */
|
||||
if (victim->rb_left)
|
||||
rb_set_parent(victim->rb_left, new);
|
||||
if (victim->rb_right)
|
||||
rb_set_parent(victim->rb_right, new);
|
||||
__rb_change_child(victim, new, parent, root);
|
||||
}
|
||||
EXPORT_SYMBOL(rb_replace_node);
|
||||
|
||||
void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new,
|
||||
struct rb_root *root)
|
||||
{
|
||||
struct rb_node *parent = rb_parent(victim);
|
||||
|
||||
/* Copy the pointers/colour from the victim to the replacement */
|
||||
*new = *victim;
|
||||
|
||||
/* Set the surrounding nodes to point to the replacement */
|
||||
if (victim->rb_left)
|
||||
rb_set_parent(victim->rb_left, new);
|
||||
if (victim->rb_right)
|
||||
rb_set_parent(victim->rb_right, new);
|
||||
|
||||
/* Set the parent's pointer to the new node last after an RCU barrier
|
||||
* so that the pointers onwards are seen to be set correctly when doing
|
||||
* an RCU walk over the tree.
|
||||
*/
|
||||
__rb_change_child_rcu(victim, new, parent, root);
|
||||
}
|
||||
EXPORT_SYMBOL(rb_replace_node_rcu);
|
||||
|
||||
static struct rb_node *rb_left_deepest_node(const struct rb_node *node)
|
||||
{
|
||||
for (;;) {
|
||||
if (node->rb_left)
|
||||
node = node->rb_left;
|
||||
else if (node->rb_right)
|
||||
node = node->rb_right;
|
||||
else
|
||||
return (struct rb_node *)node;
|
||||
}
|
||||
}
|
||||
|
||||
struct rb_node *rb_next_postorder(const struct rb_node *node)
|
||||
{
|
||||
const struct rb_node *parent;
|
||||
if (!node)
|
||||
return NULL;
|
||||
parent = rb_parent(node);
|
||||
|
||||
/* If we're sitting on node, we've already seen our children */
|
||||
if (parent && node == parent->rb_left && parent->rb_right) {
|
||||
/* If we are the parent's left node, go to the parent's right
|
||||
* node then all the way down to the left */
|
||||
return rb_left_deepest_node(parent->rb_right);
|
||||
} else
|
||||
/* Otherwise we are the parent's right node, and the parent
|
||||
* should be next */
|
||||
return (struct rb_node *)parent;
|
||||
}
|
||||
EXPORT_SYMBOL(rb_next_postorder);
|
||||
|
||||
struct rb_node *rb_first_postorder(const struct rb_root *root)
|
||||
{
|
||||
if (!root->rb_node)
|
||||
return NULL;
|
||||
|
||||
return rb_left_deepest_node(root->rb_node);
|
||||
}
|
||||
EXPORT_SYMBOL(rb_first_postorder);
|
@ -103,72 +103,6 @@ out:
|
||||
return timeout < 0 ? 0 : timeout;
|
||||
}
|
||||
|
||||
unsigned long __msecs_to_jiffies(const unsigned int m)
|
||||
{
|
||||
/*
|
||||
* Negative value, means infinite timeout:
|
||||
*/
|
||||
if ((int)m < 0)
|
||||
return MAX_JIFFY_OFFSET;
|
||||
return _msecs_to_jiffies(m);
|
||||
}
|
||||
|
||||
u64 nsecs_to_jiffies64(u64 n)
|
||||
{
|
||||
#if (NSEC_PER_SEC % HZ) == 0
|
||||
/* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
|
||||
return div_u64(n, NSEC_PER_SEC / HZ);
|
||||
#elif (HZ % 512) == 0
|
||||
/* overflow after 292 years if HZ = 1024 */
|
||||
return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
|
||||
#else
|
||||
/*
|
||||
* Generic case - optimized for cases where HZ is a multiple of 3.
|
||||
* overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
|
||||
*/
|
||||
return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned long nsecs_to_jiffies(u64 n)
|
||||
{
|
||||
return (unsigned long)nsecs_to_jiffies64(n);
|
||||
}
|
||||
|
||||
unsigned int jiffies_to_msecs(const unsigned long j)
|
||||
{
|
||||
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
|
||||
return (MSEC_PER_SEC / HZ) * j;
|
||||
#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
|
||||
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
|
||||
#else
|
||||
# if BITS_PER_LONG == 32
|
||||
return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
|
||||
# else
|
||||
return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
||||
unsigned int jiffies_to_usecs(const unsigned long j)
|
||||
{
|
||||
/*
|
||||
* Hz usually doesn't go much further MSEC_PER_SEC.
|
||||
* jiffies_to_usecs() and usecs_to_jiffies() depend on that.
|
||||
*/
|
||||
BUILD_BUG_ON(HZ > USEC_PER_SEC);
|
||||
|
||||
#if !(USEC_PER_SEC % HZ)
|
||||
return (USEC_PER_SEC / HZ) * j;
|
||||
#else
|
||||
# if BITS_PER_LONG == 32
|
||||
return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
|
||||
# else
|
||||
return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
|
||||
# endif
|
||||
#endif
|
||||
}
|
||||
|
||||
__attribute__((constructor(101)))
|
||||
static void sched_init(void)
|
||||
{
|
||||
|
@ -1,256 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2008 Intel Corporation
|
||||
* Author: Matthew Wilcox <willy@linux.intel.com>
|
||||
*
|
||||
* Distributed under the terms of the GNU GPL, version 2
|
||||
*
|
||||
* This file implements counting semaphores.
|
||||
* A counting semaphore may be acquired 'n' times before sleeping.
|
||||
* See mutex.c for single-acquisition sleeping locks which enforce
|
||||
* rules which allow code to be debugged more easily.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Some notes on the implementation:
|
||||
*
|
||||
* The spinlock controls access to the other members of the semaphore.
|
||||
* down_trylock() and up() can be called from interrupt context, so we
|
||||
* have to disable interrupts when taking the lock. It turns out various
|
||||
* parts of the kernel expect to be able to use down() on a semaphore in
|
||||
* interrupt context when they know it will succeed, so we have to use
|
||||
* irqsave variants for down(), down_interruptible() and down_killable()
|
||||
* too.
|
||||
*
|
||||
* The ->count variable represents how many more tasks can acquire this
|
||||
* semaphore. If it's zero, there may be tasks waiting on the wait_list.
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/semaphore.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
static noinline void __down(struct semaphore *sem);
|
||||
static noinline int __down_interruptible(struct semaphore *sem);
|
||||
static noinline int __down_killable(struct semaphore *sem);
|
||||
static noinline int __down_timeout(struct semaphore *sem, long timeout);
|
||||
static noinline void __up(struct semaphore *sem);
|
||||
|
||||
/**
|
||||
* down - acquire the semaphore
|
||||
* @sem: the semaphore to be acquired
|
||||
*
|
||||
* Acquires the semaphore. If no more tasks are allowed to acquire the
|
||||
* semaphore, calling this function will put the task to sleep until the
|
||||
* semaphore is released.
|
||||
*
|
||||
* Use of this function is deprecated, please use down_interruptible() or
|
||||
* down_killable() instead.
|
||||
*/
|
||||
void down(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(sem->count > 0))
|
||||
sem->count--;
|
||||
else
|
||||
__down(sem);
|
||||
raw_spin_unlock_irqrestore(&sem->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(down);
|
||||
|
||||
/**
|
||||
* down_interruptible - acquire the semaphore unless interrupted
|
||||
* @sem: the semaphore to be acquired
|
||||
*
|
||||
* Attempts to acquire the semaphore. If no more tasks are allowed to
|
||||
* acquire the semaphore, calling this function will put the task to sleep.
|
||||
* If the sleep is interrupted by a signal, this function will return -EINTR.
|
||||
* If the semaphore is successfully acquired, this function returns 0.
|
||||
*/
|
||||
int down_interruptible(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
int result = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(sem->count > 0))
|
||||
sem->count--;
|
||||
else
|
||||
result = __down_interruptible(sem);
|
||||
raw_spin_unlock_irqrestore(&sem->lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL(down_interruptible);
|
||||
|
||||
/**
|
||||
* down_killable - acquire the semaphore unless killed
|
||||
* @sem: the semaphore to be acquired
|
||||
*
|
||||
* Attempts to acquire the semaphore. If no more tasks are allowed to
|
||||
* acquire the semaphore, calling this function will put the task to sleep.
|
||||
* If the sleep is interrupted by a fatal signal, this function will return
|
||||
* -EINTR. If the semaphore is successfully acquired, this function returns
|
||||
* 0.
|
||||
*/
|
||||
int down_killable(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
int result = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(sem->count > 0))
|
||||
sem->count--;
|
||||
else
|
||||
result = __down_killable(sem);
|
||||
raw_spin_unlock_irqrestore(&sem->lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL(down_killable);
|
||||
|
||||
/**
|
||||
* down_trylock - try to acquire the semaphore, without waiting
|
||||
* @sem: the semaphore to be acquired
|
||||
*
|
||||
* Try to acquire the semaphore atomically. Returns 0 if the semaphore has
|
||||
* been acquired successfully or 1 if it it cannot be acquired.
|
||||
*
|
||||
* NOTE: This return value is inverted from both spin_trylock and
|
||||
* mutex_trylock! Be careful about this when converting code.
|
||||
*
|
||||
* Unlike mutex_trylock, this function can be used from interrupt context,
|
||||
* and the semaphore can be released by any task or interrupt.
|
||||
*/
|
||||
int down_trylock(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
int count;
|
||||
|
||||
raw_spin_lock_irqsave(&sem->lock, flags);
|
||||
count = sem->count - 1;
|
||||
if (likely(count >= 0))
|
||||
sem->count = count;
|
||||
raw_spin_unlock_irqrestore(&sem->lock, flags);
|
||||
|
||||
return (count < 0);
|
||||
}
|
||||
EXPORT_SYMBOL(down_trylock);
|
||||
|
||||
/**
|
||||
* down_timeout - acquire the semaphore within a specified time
|
||||
* @sem: the semaphore to be acquired
|
||||
* @timeout: how long to wait before failing
|
||||
*
|
||||
* Attempts to acquire the semaphore. If no more tasks are allowed to
|
||||
* acquire the semaphore, calling this function will put the task to sleep.
|
||||
* If the semaphore is not released within the specified number of jiffies,
|
||||
* this function returns -ETIME. It returns 0 if the semaphore was acquired.
|
||||
*/
|
||||
int down_timeout(struct semaphore *sem, long timeout)
|
||||
{
|
||||
unsigned long flags;
|
||||
int result = 0;
|
||||
|
||||
raw_spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(sem->count > 0))
|
||||
sem->count--;
|
||||
else
|
||||
result = __down_timeout(sem, timeout);
|
||||
raw_spin_unlock_irqrestore(&sem->lock, flags);
|
||||
|
||||
return result;
|
||||
}
|
||||
EXPORT_SYMBOL(down_timeout);
|
||||
|
||||
/**
|
||||
* up - release the semaphore
|
||||
* @sem: the semaphore to release
|
||||
*
|
||||
* Release the semaphore. Unlike mutexes, up() may be called from any
|
||||
* context and even by tasks which have never called down().
|
||||
*/
|
||||
void up(struct semaphore *sem)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&sem->lock, flags);
|
||||
if (likely(list_empty(&sem->wait_list)))
|
||||
sem->count++;
|
||||
else
|
||||
__up(sem);
|
||||
raw_spin_unlock_irqrestore(&sem->lock, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(up);
|
||||
|
||||
/* Functions for the contended case */
|
||||
|
||||
struct semaphore_waiter {
|
||||
struct list_head list;
|
||||
struct task_struct *task;
|
||||
bool up;
|
||||
};
|
||||
|
||||
/*
|
||||
* Because this function is inlined, the 'state' parameter will be
|
||||
* constant, and thus optimised away by the compiler. Likewise the
|
||||
* 'timeout' parameter for the cases without timeouts.
|
||||
*/
|
||||
static inline int __sched __down_common(struct semaphore *sem, long state,
|
||||
long timeout)
|
||||
{
|
||||
struct task_struct *task = current;
|
||||
struct semaphore_waiter waiter;
|
||||
|
||||
list_add_tail(&waiter.list, &sem->wait_list);
|
||||
waiter.task = task;
|
||||
waiter.up = false;
|
||||
|
||||
for (;;) {
|
||||
if (unlikely(timeout <= 0))
|
||||
goto timed_out;
|
||||
__set_task_state(task, state);
|
||||
raw_spin_unlock_irq(&sem->lock);
|
||||
timeout = schedule_timeout(timeout);
|
||||
raw_spin_lock_irq(&sem->lock);
|
||||
if (waiter.up)
|
||||
return 0;
|
||||
}
|
||||
|
||||
timed_out:
|
||||
list_del(&waiter.list);
|
||||
return -1;
|
||||
}
|
||||
|
||||
static noinline void __sched __down(struct semaphore *sem)
|
||||
{
|
||||
__down_common(sem, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
|
||||
}
|
||||
|
||||
static noinline int __sched __down_interruptible(struct semaphore *sem)
|
||||
{
|
||||
return __down_common(sem, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
|
||||
}
|
||||
|
||||
static noinline int __sched __down_killable(struct semaphore *sem)
|
||||
{
|
||||
return __down_common(sem, TASK_KILLABLE, MAX_SCHEDULE_TIMEOUT);
|
||||
}
|
||||
|
||||
static noinline int __sched __down_timeout(struct semaphore *sem, long timeout)
|
||||
{
|
||||
return __down_common(sem, TASK_UNINTERRUPTIBLE, timeout);
|
||||
}
|
||||
|
||||
static noinline void __sched __up(struct semaphore *sem)
|
||||
{
|
||||
struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
|
||||
struct semaphore_waiter, list);
|
||||
list_del(&waiter->list);
|
||||
waiter->up = true;
|
||||
wake_up_process(waiter->task);
|
||||
}
|
Loading…
Reference in New Issue
Block a user