Update closures from kernel source tree

This commit is contained in:
Kent Overstreet 2021-05-27 21:13:52 -04:00
parent ea750088b0
commit 4a2acdaf65
4 changed files with 59 additions and 73 deletions

View File

@ -1,8 +1,11 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CLOSURE_H #ifndef _LINUX_CLOSURE_H
#define _LINUX_CLOSURE_H #define _LINUX_CLOSURE_H
#include <linux/llist.h> #include <linux/llist.h>
#include <linux/rcupdate.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
/* /*
@ -103,6 +106,7 @@
struct closure; struct closure;
struct closure_syncer; struct closure_syncer;
typedef void (closure_fn) (struct closure *); typedef void (closure_fn) (struct closure *);
extern struct dentry *bcache_debug;
struct closure_waitlist { struct closure_waitlist {
struct llist_head list; struct llist_head list;
@ -125,10 +129,10 @@ enum closure_state {
* annotate where references are being transferred. * annotate where references are being transferred.
*/ */
CLOSURE_BITS_START = (1U << 27), CLOSURE_BITS_START = (1U << 26),
CLOSURE_DESTRUCTOR = (1U << 27), CLOSURE_DESTRUCTOR = (1U << 26),
CLOSURE_WAITING = (1U << 29), CLOSURE_WAITING = (1U << 28),
CLOSURE_RUNNING = (1U << 31), CLOSURE_RUNNING = (1U << 30),
}; };
#define CLOSURE_GUARD_MASK \ #define CLOSURE_GUARD_MASK \
@ -156,7 +160,7 @@ struct closure {
#define CLOSURE_MAGIC_DEAD 0xc054dead #define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e #define CLOSURE_MAGIC_ALIVE 0xc054a11e
unsigned magic; unsigned int magic;
struct list_head all; struct list_head all;
unsigned long ip; unsigned long ip;
unsigned long waiting_on; unsigned long waiting_on;
@ -232,10 +236,16 @@ static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
static inline void closure_queue(struct closure *cl) static inline void closure_queue(struct closure *cl)
{ {
struct workqueue_struct *wq = cl->wq; struct workqueue_struct *wq = cl->wq;
/**
* Changes made to closure, work_struct, or a couple of other structs
* may cause work.func not pointing to the right location.
*/
BUILD_BUG_ON(offsetof(struct closure, fn)
!= offsetof(struct work_struct, func));
if (wq) { if (wq) {
INIT_WORK(&cl->work, cl->work.func); INIT_WORK(&cl->work, cl->work.func);
queue_work(wq, &cl->work); BUG_ON(!queue_work(wq, &cl->work));
} else } else
cl->fn(cl); cl->fn(cl);
} }
@ -279,20 +289,16 @@ static inline void closure_init_stack(struct closure *cl)
} }
/** /**
* closure_wake_up - wake up all closures on a wait list. * closure_wake_up - wake up all closures on a wait list,
* with memory barrier
*/ */
static inline void closure_wake_up(struct closure_waitlist *list) static inline void closure_wake_up(struct closure_waitlist *list)
{ {
/* Memory barrier for the wait list */
smp_mb(); smp_mb();
__closure_wake_up(list); __closure_wake_up(list);
} }
#define continue_at_noreturn(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_sub(_cl, CLOSURE_RUNNING + 1); \
} while (0)
/** /**
* continue_at - jump to another function with barrier * continue_at - jump to another function with barrier
* *
@ -300,16 +306,16 @@ do { \
* been dropped with closure_put()), it will resume execution at @fn running out * been dropped with closure_put()), it will resume execution at @fn running out
* of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly). * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
* *
* NOTE: This macro expands to a return in the calling function!
*
* This is because after calling continue_at() you no longer have a ref on @cl, * This is because after calling continue_at() you no longer have a ref on @cl,
* and whatever @cl owns may be freed out from under you - a running closure fn * and whatever @cl owns may be freed out from under you - a running closure fn
* has a ref on its own closure which continue_at() drops. * has a ref on its own closure which continue_at() drops.
*
* Note you are expected to immediately return after using this macro.
*/ */
#define continue_at(_cl, _fn, _wq) \ #define continue_at(_cl, _fn, _wq) \
do { \ do { \
continue_at_noreturn(_cl, _fn, _wq); \ set_closure_fn(_cl, _fn, _wq); \
return; \ closure_sub(_cl, CLOSURE_RUNNING + 1); \
} while (0) } while (0)
/** /**
@ -328,32 +334,19 @@ do { \
* Causes @fn to be executed out of @cl, in @wq context (or called directly if * Causes @fn to be executed out of @cl, in @wq context (or called directly if
* @wq is NULL). * @wq is NULL).
* *
* NOTE: like continue_at(), this macro expands to a return in the caller!
*
* The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn, * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
* thus it's not safe to touch anything protected by @cl after a * thus it's not safe to touch anything protected by @cl after a
* continue_at_nobarrier(). * continue_at_nobarrier().
*/ */
#define continue_at_nobarrier(_cl, _fn, _wq) \ #define continue_at_nobarrier(_cl, _fn, _wq) \
do { \ do { \
closure_set_ip(_cl); \ set_closure_fn(_cl, _fn, _wq); \
if (_wq) { \ closure_queue(_cl); \
INIT_WORK(&(_cl)->work, (void *) _fn); \
queue_work((_wq), &(_cl)->work); \
} else { \
(_fn)(_cl); \
} \
return; \
} while (0)
#define closure_return_with_destructor_noreturn(_cl, _destructor) \
do { \
set_closure_fn(_cl, _destructor, NULL); \
closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
} while (0) } while (0)
/** /**
* closure_return - finish execution of a closure, with destructor * closure_return_with_destructor - finish execution of a closure,
* with destructor
* *
* Works like closure_return(), except @destructor will be called when all * Works like closure_return(), except @destructor will be called when all
* outstanding refs on @cl have been dropped; @destructor may be used to safely * outstanding refs on @cl have been dropped; @destructor may be used to safely
@ -363,8 +356,8 @@ do { \
*/ */
#define closure_return_with_destructor(_cl, _destructor) \ #define closure_return_with_destructor(_cl, _destructor) \
do { \ do { \
closure_return_with_destructor_noreturn(_cl, _destructor); \ set_closure_fn(_cl, _destructor, NULL); \
return; \ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
} while (0) } while (0)
/** /**

View File

View File

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/* /*
* Asynchronous refcounty things * Asynchronous refcounty things
* *
@ -9,6 +10,7 @@
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/sched/debug.h>
static inline void closure_put_after_sub(struct closure *cl, int flags) static inline void closure_put_after_sub(struct closure *cl, int flags)
{ {
@ -44,7 +46,7 @@ void closure_sub(struct closure *cl, int v)
} }
EXPORT_SYMBOL(closure_sub); EXPORT_SYMBOL(closure_sub);
/** /*
* closure_put - decrement a closure's refcount * closure_put - decrement a closure's refcount
*/ */
void closure_put(struct closure *cl) void closure_put(struct closure *cl)
@ -53,24 +55,22 @@ void closure_put(struct closure *cl)
} }
EXPORT_SYMBOL(closure_put); EXPORT_SYMBOL(closure_put);
/** /*
* closure_wake_up - wake up all closures on a wait list, without memory barrier * closure_wake_up - wake up all closures on a wait list, without memory barrier
*/ */
void __closure_wake_up(struct closure_waitlist *wait_list) void __closure_wake_up(struct closure_waitlist *wait_list)
{ {
struct llist_node *list, *next; struct llist_node *list;
struct closure *cl; struct closure *cl, *t;
struct llist_node *reverse = NULL;
/* list = llist_del_all(&wait_list->list);
* Grab entire list, reverse order to preserve FIFO ordering, and wake
* everything up
*/
for (list = llist_reverse_order(llist_del_all(&wait_list->list));
list;
list = next) {
next = llist_next(list);
cl = container_of(list, struct closure, list);
/* We first reverse the list to preserve FIFO ordering and fairness */
reverse = llist_reverse_order(list);
/* Then do the wakeups */
llist_for_each_entry_safe(cl, t, reverse, list) {
closure_set_waiting(cl, 0); closure_set_waiting(cl, 0);
closure_sub(cl, CLOSURE_WAITING + 1); closure_sub(cl, CLOSURE_WAITING + 1);
} }
@ -79,9 +79,9 @@ EXPORT_SYMBOL(__closure_wake_up);
/** /**
* closure_wait - add a closure to a waitlist * closure_wait - add a closure to a waitlist
* * @waitlist: will own a ref on @cl, which will be released when
* @waitlist will own a ref on @cl, which will be released when
* closure_wake_up() is called on @waitlist. * closure_wake_up() is called on @waitlist.
* @cl: closure pointer.
* *
*/ */
bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl) bool closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
@ -104,8 +104,14 @@ struct closure_syncer {
static void closure_sync_fn(struct closure *cl) static void closure_sync_fn(struct closure *cl)
{ {
cl->s->done = 1; struct closure_syncer *s = cl->s;
wake_up_process(cl->s->task); struct task_struct *p;
rcu_read_lock();
p = READ_ONCE(s->task);
s->done = 1;
wake_up_process(p);
rcu_read_unlock();
} }
void __sched __closure_sync(struct closure *cl) void __sched __closure_sync(struct closure *cl)
@ -113,11 +119,10 @@ void __sched __closure_sync(struct closure *cl)
struct closure_syncer s = { .task = current }; struct closure_syncer s = { .task = current };
cl->s = &s; cl->s = &s;
continue_at_noreturn(cl, closure_sync_fn, NULL); continue_at(cl, closure_sync_fn, NULL);
while (1) { while (1) {
__set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_UNINTERRUPTIBLE);
smp_mb();
if (s.done) if (s.done)
break; break;
schedule(); schedule();
@ -158,9 +163,7 @@ void closure_debug_destroy(struct closure *cl)
} }
EXPORT_SYMBOL(closure_debug_destroy); EXPORT_SYMBOL(closure_debug_destroy);
static struct dentry *debug; static int debug_show(struct seq_file *f, void *data)
static int debug_seq_show(struct seq_file *f, void *data)
{ {
struct closure *cl; struct closure *cl;
@ -169,7 +172,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
list_for_each_entry(cl, &closure_list, all) { list_for_each_entry(cl, &closure_list, all) {
int r = atomic_read(&cl->remaining); int r = atomic_read(&cl->remaining);
seq_printf(f, "%p: %pF -> %pf p %p r %i ", seq_printf(f, "%p: %pS -> %pS p %p r %i ",
cl, (void *) cl->ip, cl->fn, cl->parent, cl, (void *) cl->ip, cl->fn, cl->parent,
r & CLOSURE_REMAINING_MASK); r & CLOSURE_REMAINING_MASK);
@ -179,7 +182,7 @@ static int debug_seq_show(struct seq_file *f, void *data)
r & CLOSURE_RUNNING ? "R" : ""); r & CLOSURE_RUNNING ? "R" : "");
if (r & CLOSURE_WAITING) if (r & CLOSURE_WAITING)
seq_printf(f, " W %pF\n", seq_printf(f, " W %pS\n",
(void *) cl->waiting_on); (void *) cl->waiting_on);
seq_puts(f, "\n"); seq_puts(f, "\n");
@ -189,21 +192,11 @@ static int debug_seq_show(struct seq_file *f, void *data)
return 0; return 0;
} }
static int debug_seq_open(struct inode *inode, struct file *file) DEFINE_SHOW_ATTRIBUTE(debug);
{
return single_open(file, debug_seq_show, NULL);
}
static const struct file_operations debug_ops = {
.owner = THIS_MODULE,
.open = debug_seq_open,
.read = seq_read,
.release = single_release
};
static int __init closure_debug_init(void) static int __init closure_debug_init(void)
{ {
debug = debugfs_create_file("closures", 0400, NULL, NULL, &debug_ops); debugfs_create_file("closures", 0400, NULL, NULL, &debug_fops);
return 0; return 0;
} }
late_initcall(closure_debug_init) late_initcall(closure_debug_init)