Update bcachefs sources to 690e0168a8e0 bcachefs: vendor closures

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2025-09-18 19:41:31 -04:00
parent 8a61c2cb15
commit 9688e2e455
10 changed files with 730 additions and 18 deletions

View File

@ -1 +1 @@
68d390aa7f67b4ffb92497e0774560fc9ee5d188
690e0168a8e00058405a412dc74071c290c83429

6
.gitignore vendored
View File

@ -24,9 +24,9 @@ bcachefs-principles-of-operation.*
bch_bindgen/Cargo.lock
# will have compiled files and executables
debug/
target/
vendor/
/debug/
/target/
/vendor/
# These are backup files generated by rustfmt
**/*.rs.bk

View File

@ -189,10 +189,11 @@ install: all install_dkms
.PHONY: install_dkms
install_dkms: dkms/dkms.conf
$(INSTALL) -m0644 -D dkms/Makefile -t $(DESTDIR)$(DKMSDIR)
$(INSTALL) -m0644 -D dkms/dkms.conf -t $(DESTDIR)$(DKMSDIR)
$(INSTALL) -m0644 -D libbcachefs/Makefile -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs
$(INSTALL) -m0644 -D libbcachefs/*.[ch] -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs
$(INSTALL) -m0644 -D dkms/Makefile -t $(DESTDIR)$(DKMSDIR)
$(INSTALL) -m0644 -D dkms/dkms.conf -t $(DESTDIR)$(DKMSDIR)
$(INSTALL) -m0644 -D libbcachefs/Makefile -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs
$(INSTALL) -m0644 -D libbcachefs/*.[ch] -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs
$(INSTALL) -m0644 -D libbcachefs/vendor/*.[ch] -t $(DESTDIR)$(DKMSDIR)/src/fs/bcachefs/vendor
sed -i "s|^#define TRACE_INCLUDE_PATH \\.\\./\\.\\./fs/bcachefs$$|#define TRACE_INCLUDE_PATH $(DKMSDIR)/src/fs/bcachefs|" \
$(DESTDIR)$(DKMSDIR)/src/fs/bcachefs/trace.h
@ -225,16 +226,14 @@ cargo-update-msrv:
.PHONY: update-bcachefs-sources
update-bcachefs-sources:
git rm -rf --ignore-unmatch libbcachefs
test -d libbcachefs || mkdir libbcachefs
mkdir -p libbcachefs/vendor
cp $(LINUX_DIR)/fs/bcachefs/*.[ch] libbcachefs/
cp $(LINUX_DIR)/fs/bcachefs/vendor/*.[ch] libbcachefs/vendor/
cp $(LINUX_DIR)/fs/bcachefs/Makefile libbcachefs/
git add libbcachefs/*.[ch]
git add libbcachefs/vendor/*.[ch]
git add libbcachefs/Makefile
git rm -f libbcachefs/mean_and_variance_test.c
cp $(LINUX_DIR)/include/linux/closure.h include/linux/
git add include/linux/closure.h
cp $(LINUX_DIR)/lib/closure.c linux/
git add linux/closure.c
cp $(LINUX_DIR)/include/linux/xxhash.h include/linux/
git add include/linux/xxhash.h
cp $(LINUX_DIR)/lib/xxhash.c linux/

View File

@ -98,7 +98,8 @@ bcachefs-y := \
two_state_shared_lock.o \
util.o \
varint.o \
xattr.o
xattr.o \
vendor/closure.o
obj-$(CONFIG_MEAN_AND_VARIANCE_UNIT_TEST) += mean_and_variance_test.o

View File

@ -196,7 +196,6 @@
#include <linux/backing-dev-defs.h>
#include <linux/bug.h>
#include <linux/bio.h>
#include <linux/closure.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/math64.h>
@ -217,6 +216,7 @@
#include "bcachefs_format.h"
#include "btree_journal_iter_types.h"
#include "closure.h"
#include "disk_accounting_types.h"
#include "errcode.h"
#include "fast_list.h"

5
libbcachefs/closure.h Normal file
View File

@ -0,0 +1,5 @@
#include "vendor/closure.h"
#define closure_wait bch2_closure_wait
#define closure_return_sync bch2_closure_return_sync
#define __closure_wake_up __bch2_closure_wake_up

View File

@ -2,11 +2,10 @@
#include "bcachefs.h"
#include "bkey_methods.h"
#include "closure.h"
#include "nocow_locking.h"
#include "util.h"
#include <linux/closure.h>
bool bch2_bucket_nocow_is_locked(struct bucket_nocow_lock_table *t, struct bpos bucket)
{
u64 dev_bucket = bucket_to_u64(bucket);

View File

@ -4,7 +4,6 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/closure.h>
#include <linux/errno.h>
#include <linux/freezer.h>
#include <linux/kernel.h>
@ -21,6 +20,7 @@
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include "closure.h"
#include "mean_and_variance.h"
#include "darray.h"

218
libbcachefs/vendor/closure.c vendored Normal file
View File

@ -0,0 +1,218 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Asynchronous refcounty things
*
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
* Copyright 2012 Google, Inc.
*/
#include "closure.h"
#include <linux/debugfs.h>
#include <linux/export.h>
#include <linux/rcupdate.h>
#include <linux/seq_file.h>
#include <linux/sched/debug.h>
static void closure_val_checks(struct closure *cl, unsigned new, int d)
{
unsigned count = new & CLOSURE_REMAINING_MASK;
if (WARN(new & CLOSURE_GUARD_MASK,
"closure %ps has guard bits set: %x (%u), delta %i",
cl->fn,
new, (unsigned) __fls(new & CLOSURE_GUARD_MASK), d))
new &= ~CLOSURE_GUARD_MASK;
WARN(!count && (new & ~(CLOSURE_DESTRUCTOR|CLOSURE_SLEEPING)),
"closure %ps ref hit 0 with incorrect flags set: %x (%u)",
cl->fn,
new, (unsigned) __fls(new));
}
enum new_closure_state {
CLOSURE_normal_put,
CLOSURE_requeue,
CLOSURE_done,
};
/* For clearing flags with the same atomic op as a put */
void bch2_closure_sub(struct closure *cl, int v)
{
enum new_closure_state s;
struct task_struct *sleeper;
/* rcu_read_lock, atomic_read_acquire() are both for cl->sleeper: */
guard(rcu)();
int old = atomic_read_acquire(&cl->remaining), new;
do {
new = old - v;
if (new & CLOSURE_REMAINING_MASK) {
s = CLOSURE_normal_put;
} else {
if ((cl->fn || (new & CLOSURE_SLEEPING)) &&
!(new & CLOSURE_DESTRUCTOR)) {
s = CLOSURE_requeue;
new += CLOSURE_REMAINING_INITIALIZER;
} else
s = CLOSURE_done;
sleeper = new & CLOSURE_SLEEPING ? cl->sleeper : NULL;
new &= ~CLOSURE_SLEEPING;
}
closure_val_checks(cl, new, -v);
} while (!atomic_try_cmpxchg_release(&cl->remaining, &old, new));
if (s == CLOSURE_normal_put)
return;
if (sleeper) {
smp_mb();
wake_up_process(sleeper);
return;
}
if (s == CLOSURE_requeue) {
closure_queue(cl);
} else {
struct closure *parent = cl->parent;
closure_fn *destructor = cl->fn;
closure_debug_destroy(cl);
if (destructor)
destructor(&cl->work);
if (parent)
closure_put(parent);
}
}
/*
* closure_wake_up - wake up all closures on a wait list, without memory barrier
*/
void __bch2_closure_wake_up(struct closure_waitlist *wait_list)
{
struct llist_node *list;
struct closure *cl, *t;
struct llist_node *reverse = NULL;
list = llist_del_all(&wait_list->list);
/* We first reverse the list to preserve FIFO ordering and fairness */
reverse = llist_reverse_order(list);
/* Then do the wakeups */
llist_for_each_entry_safe(cl, t, reverse, list) {
closure_set_waiting(cl, 0);
bch2_closure_sub(cl, CLOSURE_WAITING + 1);
}
}
/**
* closure_wait - add a closure to a waitlist
* @waitlist: will own a ref on @cl, which will be released when
* closure_wake_up() is called on @waitlist.
* @cl: closure pointer.
*
*/
bool bch2_closure_wait(struct closure_waitlist *waitlist, struct closure *cl)
{
if (atomic_read(&cl->remaining) & CLOSURE_WAITING)
return false;
closure_set_waiting(cl, _RET_IP_);
unsigned r = atomic_add_return(CLOSURE_WAITING + 1, &cl->remaining);
closure_val_checks(cl, r, CLOSURE_WAITING + 1);
llist_add(&cl->list, &waitlist->list);
return true;
}
void __sched __bch2_closure_sync(struct closure *cl)
{
cl->sleeper = current;
bch2_closure_sub(cl,
CLOSURE_REMAINING_INITIALIZER -
CLOSURE_SLEEPING);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
break;
schedule();
}
__set_current_state(TASK_RUNNING);
}
/*
* closure_return_sync - finish running a closure, synchronously (i.e. waiting
* for outstanding get()s to finish) and returning once closure refcount is 0.
*
* Unlike closure_sync() this doesn't reinit the ref to 1; subsequent
* closure_get_not_zero() calls will fail.
*/
void __sched bch2_closure_return_sync(struct closure *cl)
{
cl->sleeper = current;
bch2_closure_sub(cl,
CLOSURE_REMAINING_INITIALIZER -
CLOSURE_DESTRUCTOR -
CLOSURE_SLEEPING);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
if (!(atomic_read(&cl->remaining) & CLOSURE_SLEEPING))
break;
schedule();
}
__set_current_state(TASK_RUNNING);
if (cl->parent)
closure_put(cl->parent);
}
int __sched __bch2_closure_sync_timeout(struct closure *cl, unsigned long timeout)
{
int ret = 0;
cl->sleeper = current;
bch2_closure_sub(cl,
CLOSURE_REMAINING_INITIALIZER -
CLOSURE_SLEEPING);
while (1) {
set_current_state(TASK_UNINTERRUPTIBLE);
/*
* Carefully undo the continue_at() - but only if it
* hasn't completed, i.e. the final closure_put() hasn't
* happened yet:
*/
unsigned old = atomic_read(&cl->remaining), new;
if (!(old & CLOSURE_SLEEPING))
goto success;
if (!timeout) {
do {
if (!(old & CLOSURE_SLEEPING))
goto success;
new = old + CLOSURE_REMAINING_INITIALIZER - CLOSURE_SLEEPING;
closure_val_checks(cl, new, CLOSURE_REMAINING_INITIALIZER - CLOSURE_SLEEPING);
} while (!atomic_try_cmpxchg(&cl->remaining, &old, new));
ret = -ETIME;
break;
}
timeout = schedule_timeout(timeout);
}
success:
__set_current_state(TASK_RUNNING);
return ret;
}

490
libbcachefs/vendor/closure.h vendored Normal file
View File

@ -0,0 +1,490 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CLOSURE_H
#define _LINUX_CLOSURE_H
#include <linux/llist.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/workqueue.h>
/*
* Closure is perhaps the most overused and abused term in computer science, but
* since I've been unable to come up with anything better you're stuck with it
* again.
*
* What are closures?
*
* They embed a refcount. The basic idea is they count "things that are in
* progress" - in flight bios, some other thread that's doing something else -
* anything you might want to wait on.
*
* The refcount may be manipulated with closure_get() and closure_put().
* closure_put() is where many of the interesting things happen, when it causes
* the refcount to go to 0.
*
* Closures can be used to wait on things both synchronously and asynchronously,
* and synchronous and asynchronous use can be mixed without restriction. To
* wait synchronously, use closure_sync() - you will sleep until your closure's
* refcount hits 1.
*
* To wait asynchronously, use
* continue_at(cl, next_function, workqueue);
*
* passing it, as you might expect, the function to run when nothing is pending
* and the workqueue to run that function out of.
*
* continue_at() also, critically, requires a 'return' immediately following the
* location where this macro is referenced, to return to the calling function.
* There's good reason for this.
*
* To use safely closures asynchronously, they must always have a refcount while
* they are running owned by the thread that is running them. Otherwise, suppose
* you submit some bios and wish to have a function run when they all complete:
*
* foo_endio(struct bio *bio)
* {
* closure_put(cl);
* }
*
* closure_init(cl);
*
* do_stuff();
* closure_get(cl);
* bio1->bi_endio = foo_endio;
* bio_submit(bio1);
*
* do_more_stuff();
* closure_get(cl);
* bio2->bi_endio = foo_endio;
* bio_submit(bio2);
*
* continue_at(cl, complete_some_read, system_wq);
*
* If closure's refcount started at 0, complete_some_read() could run before the
* second bio was submitted - which is almost always not what you want! More
* importantly, it wouldn't be possible to say whether the original thread or
* complete_some_read()'s thread owned the closure - and whatever state it was
* associated with!
*
* So, closure_init() initializes a closure's refcount to 1 - and when a
* closure_fn is run, the refcount will be reset to 1 first.
*
* Then, the rule is - if you got the refcount with closure_get(), release it
* with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
* on a closure because you called closure_init() or you were run out of a
* closure - _always_ use continue_at(). Doing so consistently will help
* eliminate an entire class of particularly pernicious races.
*
* Lastly, you might have a wait list dedicated to a specific event, and have no
* need for specifying the condition - you just want to wait until someone runs
* closure_wake_up() on the appropriate wait list. In that case, just use
* closure_wait(). It will return either true or false, depending on whether the
* closure was already on a wait list or not - a closure can only be on one wait
* list at a time.
*
* Parents:
*
* closure_init() takes two arguments - it takes the closure to initialize, and
* a (possibly null) parent.
*
* If parent is non null, the new closure will have a refcount for its lifetime;
* a closure is considered to be "finished" when its refcount hits 0 and the
* function to run is null. Hence
*
* continue_at(cl, NULL, NULL);
*
* returns up the (spaghetti) stack of closures, precisely like normal return
* returns up the C stack. continue_at() with non null fn is better thought of
* as doing a tail call.
*
* All this implies that a closure should typically be embedded in a particular
* struct (which its refcount will normally control the lifetime of), and that
* struct can very much be thought of as a stack frame.
*/
struct closure;
struct closure_syncer;
typedef void (closure_fn) (struct work_struct *);
extern struct dentry *bcache_debug;
struct closure_waitlist {
struct llist_head list;
};
enum closure_state {
/*
* CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
* the thread that owns the closure, and cleared by the thread that's
* waking up the closure.
*
* The rest are for debugging and don't affect behaviour:
*
* CLOSURE_RUNNING: Set when a closure is running (i.e. by
* closure_init() and when closure_put() runs then next function), and
* must be cleared before remaining hits 0. Primarily to help guard
* against incorrect usage and accidentally transferring references.
* continue_at() and closure_return() clear it for you, if you're doing
* something unusual you can use closure_set_dead() which also helps
* annotate where references are being transferred.
*/
CLOSURE_BITS_START = (1U << 24),
CLOSURE_DESTRUCTOR = (1U << 24),
CLOSURE_SLEEPING = (1U << 26),
CLOSURE_WAITING = (1U << 28),
CLOSURE_RUNNING = (1U << 30),
};
#define CLOSURE_GUARD_MASK \
(((CLOSURE_DESTRUCTOR|CLOSURE_SLEEPING|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)|(CLOSURE_BITS_START >> 1))
#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
struct closure {
union {
struct {
struct workqueue_struct *wq;
struct task_struct *sleeper;
struct llist_node list;
closure_fn *fn;
};
struct work_struct work;
};
struct closure *parent;
atomic_t remaining;
#ifdef CONFIG_DEBUG_CLOSURES
#define CLOSURE_MAGIC_DEAD 0xc054dead
#define CLOSURE_MAGIC_ALIVE 0xc054a11e
#define CLOSURE_MAGIC_STACK 0xc05451cc
unsigned int magic;
struct list_head all;
unsigned long ip;
unsigned long waiting_on;
#endif
};
void bch2_closure_sub(struct closure *cl, int v);
void __bch2_closure_wake_up(struct closure_waitlist *list);
bool bch2_closure_wait(struct closure_waitlist *list, struct closure *cl);
void __bch2_closure_sync(struct closure *cl);
/*
* closure_put - decrement a closure's refcount
*/
static inline void closure_put(struct closure *cl)
{
bch2_closure_sub(cl, 1);
}
static inline unsigned closure_nr_remaining(struct closure *cl)
{
return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
}
/**
* closure_sync - sleep until a closure a closure has nothing left to wait on
*
* Sleeps until the refcount hits 1 - the thread that's running the closure owns
* the last refcount.
*/
static inline void closure_sync(struct closure *cl)
{
if (closure_nr_remaining(cl) > 1)
__bch2_closure_sync(cl);
}
int __bch2_closure_sync_timeout(struct closure *cl, unsigned long timeout);
static inline int closure_sync_timeout(struct closure *cl, unsigned long timeout)
{
return closure_nr_remaining(cl) > 1
? __bch2_closure_sync_timeout(cl, timeout)
: 0;
}
//#ifdef CONFIG_DEBUG_CLOSURES
#if 0
void bch2_closure_debug_create(struct closure *cl);
void closure_debug_destroy(struct closure *cl);
#else
static inline void bch2_closure_debug_create(struct closure *cl) {}
static inline void closure_debug_destroy(struct closure *cl) {}
#endif
static inline void closure_set_ip(struct closure *cl)
{
#ifdef CONFIG_DEBUG_CLOSURES
cl->ip = _THIS_IP_;
#endif
}
static inline void closure_set_ret_ip(struct closure *cl)
{
#ifdef CONFIG_DEBUG_CLOSURES
cl->ip = _RET_IP_;
#endif
}
static inline void closure_set_waiting(struct closure *cl, unsigned long f)
{
#ifdef CONFIG_DEBUG_CLOSURES
cl->waiting_on = f;
#endif
}
static inline void closure_set_stopped(struct closure *cl)
{
atomic_sub(CLOSURE_RUNNING, &cl->remaining);
}
static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
struct workqueue_struct *wq)
{
closure_set_ip(cl);
cl->fn = fn;
cl->wq = wq;
}
static inline void closure_queue(struct closure *cl)
{
struct workqueue_struct *wq = cl->wq;
/**
* Changes made to closure, work_struct, or a couple of other structs
* may cause work.func not pointing to the right location.
*/
BUILD_BUG_ON(offsetof(struct closure, fn)
!= offsetof(struct work_struct, func));
if (wq) {
INIT_WORK(&cl->work, cl->work.func);
BUG_ON(!queue_work(wq, &cl->work));
} else
cl->fn(&cl->work);
}
/**
* closure_get - increment a closure's refcount
*/
static inline void closure_get(struct closure *cl)
{
#ifdef CONFIG_DEBUG_CLOSURES
BUG_ON((atomic_inc_return(&cl->remaining) &
CLOSURE_REMAINING_MASK) <= 1);
#else
atomic_inc(&cl->remaining);
#endif
}
/**
* closure_get_not_zero
*/
static inline bool closure_get_not_zero(struct closure *cl)
{
unsigned old = atomic_read(&cl->remaining);
do {
if (!(old & CLOSURE_REMAINING_MASK))
return false;
} while (!atomic_try_cmpxchg_acquire(&cl->remaining, &old, old + 1));
return true;
}
/**
* closure_init - Initialize a closure, setting the refcount to 1
* @cl: closure to initialize
* @parent: parent of the new closure. cl will take a refcount on it for its
* lifetime; may be NULL.
*/
static inline void closure_init(struct closure *cl, struct closure *parent)
{
cl->fn = NULL;
cl->parent = parent;
if (parent)
closure_get(parent);
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
bch2_closure_debug_create(cl);
closure_set_ip(cl);
}
static inline void closure_init_stack(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
#ifdef CONFIG_DEBUG_CLOSURES
cl->magic = CLOSURE_MAGIC_STACK;
#endif
}
static inline void closure_init_stack_release(struct closure *cl)
{
memset(cl, 0, sizeof(struct closure));
atomic_set_release(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
#ifdef CONFIG_DEBUG_CLOSURES
cl->magic = CLOSURE_MAGIC_STACK;
#endif
}
/**
* closure_wake_up - wake up all closures on a wait list,
* with memory barrier
*/
static inline void closure_wake_up(struct closure_waitlist *list)
{
/* Memory barrier for the wait list */
smp_mb();
__bch2_closure_wake_up(list);
}
#define CLOSURE_CALLBACK(name) void name(struct work_struct *ws)
#define closure_type(name, type, member) \
struct closure *cl = container_of(ws, struct closure, work); \
type *name = container_of(cl, type, member)
/**
* continue_at - jump to another function with barrier
*
* After @cl is no longer waiting on anything (i.e. all outstanding refs have
* been dropped with closure_put()), it will resume execution at @fn running out
* of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
*
* This is because after calling continue_at() you no longer have a ref on @cl,
* and whatever @cl owns may be freed out from under you - a running closure fn
* has a ref on its own closure which continue_at() drops.
*
* Note you are expected to immediately return after using this macro.
*/
#define continue_at(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
bch2_closure_sub(_cl, CLOSURE_RUNNING + 1); \
} while (0)
/**
* closure_return - finish execution of a closure
*
* This is used to indicate that @cl is finished: when all outstanding refs on
* @cl have been dropped @cl's ref on its parent closure (as passed to
* closure_init()) will be dropped, if one was specified - thus this can be
* thought of as returning to the parent closure.
*/
#define closure_return(_cl) continue_at((_cl), NULL, NULL)
void bch2_closure_return_sync(struct closure *cl);
/**
* continue_at_nobarrier - jump to another function without barrier
*
* Causes @fn to be executed out of @cl, in @wq context (or called directly if
* @wq is NULL).
*
* The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
* thus it's not safe to touch anything protected by @cl after a
* continue_at_nobarrier().
*/
#define continue_at_nobarrier(_cl, _fn, _wq) \
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_queue(_cl); \
} while (0)
/**
* closure_return_with_destructor - finish execution of a closure,
* with destructor
*
* Works like closure_return(), except @destructor will be called when all
* outstanding refs on @cl have been dropped; @destructor may be used to safely
* free the memory occupied by @cl, and it is called with the ref on the parent
* closure still held - so @destructor could safely return an item to a
* freelist protected by @cl's parent.
*/
#define closure_return_with_destructor(_cl, _destructor) \
do { \
set_closure_fn(_cl, _destructor, NULL); \
bch2_closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
} while (0)
/**
* closure_call - execute @fn out of a new, uninitialized closure
*
* Typically used when running out of one closure, and we want to run @fn
* asynchronously out of a new closure - @parent will then wait for @cl to
* finish.
*/
static inline void closure_call(struct closure *cl, closure_fn fn,
struct workqueue_struct *wq,
struct closure *parent)
{
closure_init(cl, parent);
continue_at_nobarrier(cl, fn, wq);
}
#define __closure_wait_event(waitlist, _cond) \
do { \
struct closure cl; \
\
closure_init_stack(&cl); \
\
while (1) { \
bch2_closure_wait(waitlist, &cl); \
if (_cond) \
break; \
closure_sync(&cl); \
} \
closure_wake_up(waitlist); \
closure_sync(&cl); \
} while (0)
#define closure_wait_event(waitlist, _cond) \
do { \
if (!(_cond)) \
__closure_wait_event(waitlist, _cond); \
} while (0)
#define __closure_wait_event_timeout(waitlist, _cond, _until) \
({ \
struct closure cl; \
long _t; \
\
closure_init_stack(&cl); \
\
while (1) { \
bch2_closure_wait(waitlist, &cl); \
if (_cond) { \
_t = max_t(long, 1L, _until - jiffies); \
break; \
} \
_t = max_t(long, 0L, _until - jiffies); \
if (!_t) \
break; \
closure_sync_timeout(&cl, _t); \
} \
closure_wake_up(waitlist); \
closure_sync(&cl); \
_t; \
})
/*
* Returns 0 if timeout expired, remaining time in jiffies (at least 1) if
* condition became true
*/
#define closure_wait_event_timeout(waitlist, _cond, _timeout) \
({ \
unsigned long _until = jiffies + _timeout; \
(_cond) \
? max_t(long, 1L, _until - jiffies) \
: __closure_wait_event_timeout(waitlist, _cond, _until);\
})
#endif /* _LINUX_CLOSURE_H */