From ff4d082246ed542c28c90f3f30b9b964d67591d3 Mon Sep 17 00:00:00 2001 From: Justin Husted Date: Wed, 9 Oct 2019 19:27:22 -0700 Subject: [PATCH] Change preempt_disable() etc. to use a recursive mutex. This is part of the userspace implementation of the kernel APIs for bcachefs-tools. The previous implementation just provided a barrier, but this isn't sufficient to make the associated percpu implementation safe. Signed-off-by: Justin Husted --- include/linux/preempt.h | 15 ++++++++------- linux/preempt.c | 28 ++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 7 deletions(-) create mode 100644 linux/preempt.c diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 06186016..dbc7c24d 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -1,15 +1,16 @@ #ifndef __LINUX_PREEMPT_H #define __LINUX_PREEMPT_H -#define preempt_disable() barrier() -#define sched_preempt_enable_no_resched() barrier() -#define preempt_enable_no_resched() barrier() -#define preempt_enable() barrier() +extern void preempt_disable(void); +extern void preempt_enable(void); + +#define sched_preempt_enable_no_resched() preempt_enable() +#define preempt_enable_no_resched() preempt_enable() #define preempt_check_resched() do { } while (0) -#define preempt_disable_notrace() barrier() -#define preempt_enable_no_resched_notrace() barrier() -#define preempt_enable_notrace() barrier() +#define preempt_disable_notrace() preempt_disable() +#define preempt_enable_no_resched_notrace() preempt_enable() +#define preempt_enable_notrace() preempt_enable() #define preemptible() 0 #endif /* __LINUX_PREEMPT_H */ diff --git a/linux/preempt.c b/linux/preempt.c new file mode 100644 index 00000000..aa092c1d --- /dev/null +++ b/linux/preempt.c @@ -0,0 +1,28 @@ +#include + +#include "linux/preempt.h" + +/* + * In userspace, pthreads are preemptible and can migrate CPUs at any time. + * + * In the kernel, preempt_disable() logic essentially guarantees that a marked + * critical section owns its CPU for the relevant block. This is necessary for + * various code paths, critically including the percpu system as it allows for + * non-atomic reads and writes to CPU-local data structures. + * + * The high performance userspace equivalent would be to use thread local + * storage to replace percpu data, but that would be complicated. It should be + * correct to instead guarantee mutual exclusion for the critical sections. + */ + +static pthread_mutex_t preempt_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP; + +void preempt_disable(void) +{ + pthread_mutex_lock(&preempt_lock); +} + +void preempt_enable(void) +{ + pthread_mutex_unlock(&preempt_lock); +}