2017-01-08 12:13:18 +03:00
|
|
|
#ifndef __LINUX_BIT_SPINLOCK_H
|
|
|
|
#define __LINUX_BIT_SPINLOCK_H
|
|
|
|
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/preempt.h>
|
2021-04-27 02:40:09 +03:00
|
|
|
#include <linux/futex.h>
|
2023-04-26 15:31:10 +03:00
|
|
|
#include <urcu/futex.h>
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2023-09-19 17:26:11 +03:00
|
|
|
/*
|
|
|
|
* The futex wait op wants an explicit 32-bit address and value. If the bitmap
|
|
|
|
* used for the spinlock is 64-bit, cast down and pass the right 32-bit region
|
|
|
|
* for the in-kernel checks. The value is the copy that has already been read
|
|
|
|
* from the atomic op.
|
|
|
|
*
|
|
|
|
* The futex wake op interprets the value as the number of waiters to wake (up
|
|
|
|
* to INT_MAX), so pass that along directly.
|
|
|
|
*/
|
|
|
|
static inline void do_futex(int nr, unsigned long *addr, unsigned long v, int futex_flags)
|
|
|
|
{
|
|
|
|
u32 *addr32 = (u32 *) addr;
|
|
|
|
u32 *v32 = (u32 *) &v;
|
|
|
|
int shift = 0;
|
|
|
|
|
|
|
|
futex_flags |= FUTEX_PRIVATE_FLAG;
|
|
|
|
|
|
|
|
#if BITS_PER_LONG == 64
|
|
|
|
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
|
|
|
shift = (nr >= 32) ? 1 : 0;
|
|
|
|
#else
|
|
|
|
shift = (nr < 32) ? 1 : 0;
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
if (shift) {
|
|
|
|
addr32 += shift;
|
|
|
|
v32 += shift;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* The shift to determine the futex address may have cast away a
|
|
|
|
* literal wake count value. The value is capped to INT_MAX and thus
|
|
|
|
* always in the low bytes of v regardless of bit nr. Copy in the wake
|
|
|
|
* count to whatever 32-bit range was selected.
|
|
|
|
*/
|
|
|
|
if (futex_flags == FUTEX_WAKE_PRIVATE)
|
|
|
|
*v32 = (u32) v;
|
|
|
|
futex(addr32, futex_flags, *v32, NULL, NULL, 0);
|
|
|
|
}
|
|
|
|
|
2021-04-27 02:40:09 +03:00
|
|
|
static inline void bit_spin_lock(int nr, unsigned long *_addr)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
2023-09-19 17:26:11 +03:00
|
|
|
unsigned long mask;
|
|
|
|
unsigned long *addr = _addr + (nr / BITS_PER_LONG);
|
|
|
|
unsigned long v;
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2023-09-19 17:26:11 +03:00
|
|
|
nr &= BITS_PER_LONG - 1;
|
|
|
|
mask = 1UL << nr;
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2021-04-27 02:40:09 +03:00
|
|
|
while (1) {
|
|
|
|
v = __atomic_fetch_or(addr, mask, __ATOMIC_ACQUIRE);
|
|
|
|
if (!(v & mask))
|
|
|
|
break;
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2023-09-19 17:26:11 +03:00
|
|
|
do_futex(nr, addr, v, FUTEX_WAIT);
|
2021-04-27 02:40:09 +03:00
|
|
|
}
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
2021-04-27 02:40:09 +03:00
|
|
|
static inline void bit_spin_wake(int nr, unsigned long *_addr)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
2023-09-19 17:26:11 +03:00
|
|
|
do_futex(nr, _addr, INT_MAX, FUTEX_WAKE);
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
2021-04-27 02:40:09 +03:00
|
|
|
static inline void bit_spin_unlock(int nr, unsigned long *_addr)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
2023-09-19 17:26:11 +03:00
|
|
|
unsigned long mask;
|
|
|
|
unsigned long *addr = _addr + (nr / BITS_PER_LONG);
|
2021-04-27 02:40:09 +03:00
|
|
|
|
2023-09-19 17:26:11 +03:00
|
|
|
nr &= BITS_PER_LONG - 1;
|
|
|
|
mask = 1UL << nr;
|
2021-04-27 02:40:09 +03:00
|
|
|
|
|
|
|
__atomic_and_fetch(addr, ~mask, __ATOMIC_RELEASE);
|
2023-09-19 17:26:11 +03:00
|
|
|
do_futex(nr, addr, INT_MAX, FUTEX_WAKE);
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#endif /* __LINUX_BIT_SPINLOCK_H */
|
|
|
|
|