mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-01-23 00:07:07 +03:00
Change preempt_disable() etc. to use a recursive mutex.
This is part of the userspace implementation of the kernel APIs for bcachefs-tools. The previous implementation just provided a barrier, but this isn't sufficient to make the associated percpu implementation safe. Signed-off-by: Justin Husted <sigstop@gmail.com>
This commit is contained in:
parent
2bc1166919
commit
ff4d082246
@ -1,15 +1,16 @@
|
||||
#ifndef __LINUX_PREEMPT_H
|
||||
#define __LINUX_PREEMPT_H
|
||||
|
||||
#define preempt_disable() barrier()
|
||||
#define sched_preempt_enable_no_resched() barrier()
|
||||
#define preempt_enable_no_resched() barrier()
|
||||
#define preempt_enable() barrier()
|
||||
extern void preempt_disable(void);
|
||||
extern void preempt_enable(void);
|
||||
|
||||
#define sched_preempt_enable_no_resched() preempt_enable()
|
||||
#define preempt_enable_no_resched() preempt_enable()
|
||||
#define preempt_check_resched() do { } while (0)
|
||||
|
||||
#define preempt_disable_notrace() barrier()
|
||||
#define preempt_enable_no_resched_notrace() barrier()
|
||||
#define preempt_enable_notrace() barrier()
|
||||
#define preempt_disable_notrace() preempt_disable()
|
||||
#define preempt_enable_no_resched_notrace() preempt_enable()
|
||||
#define preempt_enable_notrace() preempt_enable()
|
||||
#define preemptible() 0
|
||||
|
||||
#endif /* __LINUX_PREEMPT_H */
|
||||
|
28
linux/preempt.c
Normal file
28
linux/preempt.c
Normal file
@ -0,0 +1,28 @@
|
||||
#include <pthread.h>
|
||||
|
||||
#include "linux/preempt.h"
|
||||
|
||||
/*
|
||||
* In userspace, pthreads are preemptible and can migrate CPUs at any time.
|
||||
*
|
||||
* In the kernel, preempt_disable() logic essentially guarantees that a marked
|
||||
* critical section owns its CPU for the relevant block. This is necessary for
|
||||
* various code paths, critically including the percpu system as it allows for
|
||||
* non-atomic reads and writes to CPU-local data structures.
|
||||
*
|
||||
* The high performance userspace equivalent would be to use thread local
|
||||
* storage to replace percpu data, but that would be complicated. It should be
|
||||
* correct to instead guarantee mutual exclusion for the critical sections.
|
||||
*/
|
||||
|
||||
static pthread_mutex_t preempt_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
|
||||
|
||||
void preempt_disable(void)
|
||||
{
|
||||
pthread_mutex_lock(&preempt_lock);
|
||||
}
|
||||
|
||||
void preempt_enable(void)
|
||||
{
|
||||
pthread_mutex_unlock(&preempt_lock);
|
||||
}
|
Loading…
Reference in New Issue
Block a user