bit_spinlocks now use futexes

Spinlocks aren't a good idea in userspace, where we can't actually
disable preemption.

Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
This commit is contained in:
Kent Overstreet 2021-04-26 19:40:09 -04:00
parent edc3ffe8f2
commit a14d39d7ac
2 changed files with 24 additions and 21 deletions

View File

@ -3,38 +3,40 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/atomic.h> #include <linux/futex.h>
#include <linux/bug.h>
static inline void bit_spin_lock(int bitnum, unsigned long *addr) static inline void bit_spin_lock(int nr, unsigned long *_addr)
{ {
while (unlikely(test_and_set_bit_lock(bitnum, addr))) { u32 mask, *addr = ((u32 *) _addr) + (nr / 32), v;
do {
cpu_relax(); nr &= 31;
} while (test_bit(bitnum, addr)); mask = 1U << nr;
while (1) {
v = __atomic_fetch_or(addr, mask, __ATOMIC_ACQUIRE);
if (!(v & mask))
break;
futex(addr, FUTEX_WAIT|FUTEX_PRIVATE_FLAG, v, NULL, NULL, 0);
} }
} }
static inline int bit_spin_trylock(int bitnum, unsigned long *addr) static inline void bit_spin_wake(int nr, unsigned long *_addr)
{ {
return !test_and_set_bit_lock(bitnum, addr); u32 *addr = ((u32 *) _addr) + (nr / 32);
futex(addr, FUTEX_WAKE|FUTEX_PRIVATE_FLAG, INT_MAX, NULL, NULL, 0);
} }
static inline void bit_spin_unlock(int bitnum, unsigned long *addr) static inline void bit_spin_unlock(int nr, unsigned long *_addr)
{ {
BUG_ON(!test_bit(bitnum, addr)); u32 mask, *addr = ((u32 *) _addr) + (nr / 32);
clear_bit_unlock(bitnum, addr); nr &= 31;
} mask = 1U << nr;
static inline void __bit_spin_unlock(int bitnum, unsigned long *addr) __atomic_and_fetch(addr, ~mask, __ATOMIC_RELEASE);
{ futex(addr, FUTEX_WAKE|FUTEX_PRIVATE_FLAG, INT_MAX, NULL, NULL, 0);
bit_spin_unlock(bitnum, addr);
}
static inline int bit_spin_is_locked(int bitnum, unsigned long *addr)
{
return test_bit(bitnum, addr);
} }
#endif /* __LINUX_BIT_SPINLOCK_H */ #endif /* __LINUX_BIT_SPINLOCK_H */

View File

@ -395,6 +395,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
rcu_assign_pointer(*bkt, (void *)obj); rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable(); preempt_enable();
__release(bitlock); __release(bitlock);
bit_spin_wake(0, (unsigned long *) bkt);
} }
/** /**