Update bcachefs sources to a5e71b8200 bcachefs: Allocator startup fixes/refactoring

This commit is contained in:
Kent Overstreet 2019-03-02 14:45:50 -05:00
parent 35ab359897
commit 70bb5ab7a8
10 changed files with 392 additions and 358 deletions

View File

@ -1 +1 @@
dab980b6628bf07650b8cf1528dad715842fc109
a5e71b82006fdf563190c41955c2b462854af610

View File

@ -1466,24 +1466,16 @@ int bch2_dev_allocator_start(struct bch_dev *ca)
return 0;
}
static void flush_held_btree_writes(struct bch_fs *c)
static bool flush_held_btree_writes(struct bch_fs *c)
{
struct bucket_table *tbl;
struct rhash_head *pos;
struct btree *b;
bool nodes_blocked;
bool nodes_unwritten;
size_t i;
struct closure cl;
closure_init_stack(&cl);
clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
again:
pr_debug("flushing dirty btree nodes");
cond_resched();
closure_wait(&c->btree_interior_update_wait, &cl);
nodes_blocked = false;
nodes_unwritten = false;
rcu_read_lock();
for_each_cached_btree(b, c, tbl, i, pos)
@ -1495,24 +1487,18 @@ again:
six_unlock_read(&b->lock);
goto again;
} else {
nodes_blocked = true;
nodes_unwritten = true;
}
}
rcu_read_unlock();
if (c->btree_roots_dirty)
if (c->btree_roots_dirty) {
bch2_journal_meta(&c->journal);
if (nodes_blocked) {
closure_sync(&cl);
goto again;
}
closure_wake_up(&c->btree_interior_update_wait);
closure_sync(&cl);
closure_wait_event(&c->btree_interior_update_wait,
!bch2_btree_interior_updates_nr_pending(c));
return !nodes_unwritten &&
!bch2_btree_interior_updates_nr_pending(c);
}
static void allocator_start_issue_discards(struct bch_fs *c)
@ -1546,25 +1532,24 @@ static int resize_free_inc(struct bch_dev *ca)
return 0;
}
static int __bch2_fs_allocator_start(struct bch_fs *c)
static bool bch2_fs_allocator_start_fast(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned dev_iter;
u64 journal_seq = 0;
long bu;
int ret = 0;
bool ret = true;
if (test_alloc_startup(c))
goto not_enough;
return false;
down_read(&c->gc_lock);
/* Scan for buckets that are already invalidated: */
for_each_rw_member(ca, c, dev_iter) {
struct bucket_array *buckets;
struct bucket_mark m;
long bu;
down_read(&ca->bucket_lock);
percpu_down_read_preempt_disable(&c->mark_lock);
buckets = bucket_array(ca);
for (bu = buckets->first_bucket;
@ -1572,13 +1557,16 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
m = READ_ONCE(buckets->b[bu].mark);
if (!buckets->b[bu].gen_valid ||
!test_bit(bu, ca->buckets_nouse) ||
!is_available_bucket(m) ||
m.cached_sectors)
m.cached_sectors ||
(ca->buckets_nouse &&
test_bit(bu, ca->buckets_nouse)))
continue;
percpu_down_read_preempt_disable(&c->mark_lock);
bch2_mark_alloc_bucket(c, ca, bu, true,
gc_pos_alloc(c, NULL), 0);
percpu_up_read_preempt_enable(&c->mark_lock);
fifo_push(&ca->free_inc, bu);
@ -1587,19 +1575,28 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
if (fifo_full(&ca->free[RESERVE_BTREE]))
break;
}
percpu_up_read_preempt_enable(&c->mark_lock);
up_read(&ca->bucket_lock);
}
up_read(&c->gc_lock);
/* did we find enough buckets? */
for_each_rw_member(ca, c, dev_iter)
if (!fifo_full(&ca->free[RESERVE_BTREE])) {
percpu_ref_put(&ca->io_ref);
goto not_enough;
}
if (!fifo_full(&ca->free[RESERVE_BTREE]))
ret = false;
return ret;
}
static int __bch2_fs_allocator_start(struct bch_fs *c)
{
struct bch_dev *ca;
unsigned dev_iter;
u64 journal_seq = 0;
bool wrote;
long bu;
int ret = 0;
return 0;
not_enough:
pr_debug("not enough empty buckets; scanning for reclaimable buckets");
/*
@ -1613,8 +1610,9 @@ not_enough:
*/
set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
while (1) {
bool wrote = false;
down_read(&c->gc_lock);
do {
wrote = false;
for_each_rw_member(ca, c, dev_iter) {
find_reclaimable_buckets(c, ca);
@ -1624,7 +1622,8 @@ not_enough:
ret = resize_free_inc(ca);
if (ret) {
percpu_ref_put(&ca->io_ref);
return ret;
up_read(&c->gc_lock);
goto err;
}
bch2_invalidate_one_bucket(c, ca, bu,
@ -1650,27 +1649,26 @@ not_enough:
* enough buckets, so just scan and loop again as long as it
* made some progress:
*/
if (!wrote && ret)
return ret;
if (!wrote && !ret)
break;
}
} while (wrote);
up_read(&c->gc_lock);
if (ret)
goto err;
pr_debug("flushing journal");
ret = bch2_journal_flush(&c->journal);
if (ret)
return ret;
goto err;
pr_debug("issuing discards");
allocator_start_issue_discards(c);
err:
clear_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
closure_wait_event(&c->btree_interior_update_wait,
flush_held_btree_writes(c));
set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
/* now flush dirty btree nodes: */
flush_held_btree_writes(c);
return 0;
return ret;
}
int bch2_fs_allocator_start(struct bch_fs *c)
@ -1679,13 +1677,13 @@ int bch2_fs_allocator_start(struct bch_fs *c)
unsigned i;
int ret;
down_read(&c->gc_lock);
ret = __bch2_fs_allocator_start(c);
up_read(&c->gc_lock);
ret = bch2_fs_allocator_start_fast(c) ? 0 :
__bch2_fs_allocator_start(c);
if (ret)
return ret;
set_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags);
for_each_rw_member(ca, c, i) {
ret = bch2_dev_allocator_start(ca);
if (ret) {

View File

@ -84,17 +84,12 @@ void bch2_journal_halt(struct journal *j)
journal_wake(j);
closure_wake_up(&journal_cur_buf(j)->wait);
closure_wake_up(&journal_prev_buf(j)->wait);
}
/* journal entry close/open: */
void __bch2_journal_buf_put(struct journal *j, bool need_write_just_set)
{
struct journal_buf *w = journal_prev_buf(j);
atomic_dec_bug(&journal_seq_pin(j, le64_to_cpu(w->data->seq))->count);
if (!need_write_just_set &&
test_bit(JOURNAL_NEED_WRITE, &j->flags))
bch2_time_stats_update(j->delay_time,
@ -175,7 +170,6 @@ static bool __journal_entry_close(struct journal *j)
* Hence, we want update/set last_seq on the current journal entry right
* before we open a new one:
*/
bch2_journal_reclaim_fast(j);
buf->data->last_seq = cpu_to_le64(journal_last_seq(j));
if (journal_entry_empty(buf->data))
@ -189,8 +183,8 @@ static bool __journal_entry_close(struct journal *j)
cancel_delayed_work(&j->write_work);
/* ugh - might be called from __journal_res_get() under wait_event() */
__set_current_state(TASK_RUNNING);
bch2_journal_space_available(j);
bch2_journal_buf_put(j, old.idx, set_need_write);
return true;
}
@ -220,7 +214,7 @@ static int journal_entry_open(struct journal *j)
{
struct journal_buf *buf = journal_cur_buf(j);
union journal_res_state old, new;
int u64s, ret;
int u64s;
u64 v;
lockdep_assert_held(&j->lock);
@ -229,12 +223,10 @@ static int journal_entry_open(struct journal *j)
if (j->blocked)
return -EAGAIN;
if (!fifo_free(&j->pin))
return -ENOSPC;
if (j->cur_entry_error)
return j->cur_entry_error;
ret = bch2_journal_space_available(j);
if (ret)
return ret;
BUG_ON(!j->cur_entry_sectors);
buf->u64s_reserved = j->entry_u64s_reserved;
buf->disk_sectors = j->cur_entry_sectors;
@ -256,6 +248,8 @@ static int journal_entry_open(struct journal *j)
do {
old.v = new.v = v;
EBUG_ON(journal_state_count(new, new.idx));
if (old.cur_entry_offset == JOURNAL_ENTRY_ERROR_VAL)
return -EROFS;
@ -386,7 +380,8 @@ retry:
* freezing:
*/
trace_journal_full(c);
bch2_journal_reclaim_work(&j->reclaim_work.work);
if (!(flags & JOURNAL_RES_GET_NONBLOCK))
bch2_journal_reclaim_work(&j->reclaim_work.work);
ret = -EAGAIN;
}
@ -408,7 +403,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
{
int ret;
wait_event(j->wait,
closure_wait_event(&j->async_wait,
(ret = __journal_res_get(j, res, flags)) != -EAGAIN ||
(flags & JOURNAL_RES_GET_NONBLOCK));
return ret;
@ -429,7 +424,7 @@ void bch2_journal_entry_res_resize(struct journal *j,
if (d <= 0)
goto out;
j->cur_entry_u64s -= d;
j->cur_entry_u64s = max_t(int, 0, j->cur_entry_u64s - d);
smp_mb();
state = READ_ONCE(j->reservations);
@ -966,6 +961,7 @@ void bch2_fs_journal_start(struct journal *j)
c->last_bucket_seq_cleanup = journal_cur_seq(j);
bch2_journal_space_available(j);
spin_unlock(&j->lock);
/*
@ -975,7 +971,7 @@ void bch2_fs_journal_start(struct journal *j)
*/
bch2_journal_seq_blacklist_write(j);
queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, 0);
}
/* init/exit: */
@ -1078,35 +1074,54 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
{
struct printbuf out = _PBUF(buf, PAGE_SIZE);
struct bch_fs *c = container_of(j, struct bch_fs, journal);
union journal_res_state *s = &j->reservations;
union journal_res_state s;
struct bch_dev *ca;
unsigned iter;
rcu_read_lock();
spin_lock(&j->lock);
s = READ_ONCE(j->reservations);
pr_buf(&out,
"active journal entries:\t%llu\n"
"seq:\t\t\t%llu\n"
"last_seq:\t\t%llu\n"
"last_seq_ondisk:\t%llu\n"
"reservation count:\t%u\n"
"reservation offset:\t%u\n"
"current entry u64s:\t%u\n"
"io in flight:\t\t%i\n"
"need write:\t\t%i\n"
"dirty:\t\t\t%i\n"
"replay done:\t\t%i\n",
"current entry:\t\t",
fifo_used(&j->pin),
journal_cur_seq(j),
journal_last_seq(j),
j->last_seq_ondisk,
journal_state_count(*s, s->idx),
s->cur_entry_offset,
j->cur_entry_u64s,
s->prev_buf_unwritten,
j->last_seq_ondisk);
switch (s.cur_entry_offset) {
case JOURNAL_ENTRY_ERROR_VAL:
pr_buf(&out, "error\n");
break;
case JOURNAL_ENTRY_CLOSED_VAL:
pr_buf(&out, "closed\n");
break;
default:
pr_buf(&out, "%u/%u\n",
s.cur_entry_offset,
j->cur_entry_u64s);
break;
}
pr_buf(&out,
"current entry refs:\t%u\n"
"prev entry unwritten:\t",
journal_state_count(s, s.idx));
if (s.prev_buf_unwritten)
pr_buf(&out, "yes, ref %u\n",
journal_state_count(s, !s.idx));
else
pr_buf(&out, "no\n");
pr_buf(&out,
"need write:\t\t%i\n"
"replay done:\t\t%i\n",
test_bit(JOURNAL_NEED_WRITE, &j->flags),
journal_entry_is_open(j),
test_bit(JOURNAL_REPLAY_DONE, &j->flags));
for_each_member_device_rcu(ca, c, iter,
@ -1119,9 +1134,12 @@ ssize_t bch2_journal_print_debug(struct journal *j, char *buf)
pr_buf(&out,
"dev %u:\n"
"\tnr\t\t%u\n"
"\tavailable\t%u:%u\n"
"\tcur_idx\t\t%u (seq %llu)\n"
"\tlast_idx\t%u (seq %llu)\n",
iter, ja->nr,
bch2_journal_dev_buckets_available(j, ja),
ja->sectors_free,
ja->cur_idx, ja->bucket_seq[ja->cur_idx],
ja->last_idx, ja->bucket_seq[ja->last_idx]);
}

View File

@ -289,6 +289,8 @@ static inline int journal_res_get_fast(struct journal *j,
if (new.cur_entry_offset + res->u64s > j->cur_entry_u64s)
return 0;
EBUG_ON(!journal_state_count(new, new.idx));
if (flags & JOURNAL_RES_GET_CHECK)
return 1;

View File

@ -825,7 +825,6 @@ fsck_err:
int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
{
struct journal *j = &c->journal;
struct journal_entry_pin_list *pin_list;
struct bkey_i *k, *_n;
struct jset_entry *entry;
struct journal_replay *i, *n;
@ -867,10 +866,7 @@ int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
cond_resched();
}
pin_list = journal_seq_pin(j, j->replay_journal_seq);
if (atomic_dec_and_test(&pin_list->count))
journal_wake(j);
bch2_journal_pin_put(j, j->replay_journal_seq);
}
j->replay_journal_seq = 0;
@ -885,101 +881,6 @@ err:
/* journal write: */
static unsigned journal_dev_buckets_available(struct journal *j,
struct journal_device *ja)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
unsigned next = (ja->cur_idx + 1) % ja->nr;
unsigned available = (ja->last_idx + ja->nr - next) % ja->nr;
/*
* Allocator startup needs some journal space before we can do journal
* replay:
*/
if (available &&
test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags))
available--;
/*
* Don't use the last bucket unless writing the new last_seq
* will make another bucket available:
*/
if (available &&
journal_last_seq(j) <= ja->bucket_seq[ja->last_idx])
--available;
return available;
}
int bch2_journal_space_available(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
unsigned sectors_next_entry = UINT_MAX;
unsigned i, nr_online = 0, nr_devs = 0;
unsigned unwritten_sectors = j->reservations.prev_buf_unwritten
? journal_prev_buf(j)->sectors
: 0;
int ret = 0;
lockdep_assert_held(&j->lock);
rcu_read_lock();
for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_JOURNAL]) {
struct journal_device *ja = &ca->journal;
unsigned buckets_this_device, sectors_this_device;
if (!ja->nr)
continue;
nr_online++;
buckets_this_device = journal_dev_buckets_available(j, ja);
sectors_this_device = ja->sectors_free;
/*
* We that we don't allocate the space for a journal entry
* until we write it out - thus, account for it here:
*/
if (unwritten_sectors >= sectors_this_device) {
if (!buckets_this_device)
continue;
buckets_this_device--;
sectors_this_device = ca->mi.bucket_size;
}
sectors_this_device -= unwritten_sectors;
if (buckets_this_device)
sectors_this_device = ca->mi.bucket_size;
if (!sectors_this_device)
continue;
sectors_next_entry = min(sectors_next_entry,
sectors_this_device);
nr_devs++;
}
rcu_read_unlock();
if (nr_online < c->opts.metadata_replicas_required) {
ret = -EROFS;
sectors_next_entry = 0;
} else if (!sectors_next_entry ||
nr_devs < min_t(unsigned, nr_online,
c->opts.metadata_replicas)) {
ret = -ENOSPC;
sectors_next_entry = 0;
}
WRITE_ONCE(j->cur_entry_sectors, sectors_next_entry);
return ret;
}
static void __journal_write_alloc(struct journal *j,
struct journal_buf *w,
struct dev_alloc_list *devs_sorted,
@ -1053,7 +954,6 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w,
devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe,
&c->rw_devs[BCH_DATA_JOURNAL]);
spin_lock(&j->lock);
__journal_write_alloc(j, w, &devs_sorted,
sectors, &replicas, replicas_want);
@ -1069,7 +969,7 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w,
if (sectors > ja->sectors_free &&
sectors <= ca->mi.bucket_size &&
journal_dev_buckets_available(j, ja)) {
bch2_journal_dev_buckets_available(j, ja)) {
ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
ja->sectors_free = ca->mi.bucket_size;
}
@ -1078,7 +978,6 @@ static int journal_write_alloc(struct journal *j, struct journal_buf *w,
__journal_write_alloc(j, w, &devs_sorted,
sectors, &replicas, replicas_want);
done:
spin_unlock(&j->lock);
rcu_read_unlock();
return replicas >= c->opts.metadata_replicas_required ? 0 : -EROFS;
@ -1183,7 +1082,7 @@ static void journal_write_done(struct closure *cl)
* Must come before signaling write completion, for
* bch2_fs_journal_stop():
*/
mod_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
mod_delayed_work(c->journal_reclaim_wq, &j->reclaim_work, 0);
out:
/* also must come before signalling write completion: */
closure_debug_destroy(cl);
@ -1237,6 +1136,9 @@ void bch2_journal_write(struct closure *cl)
struct bch_extent_ptr *ptr;
bool validate_before_checksum = false;
unsigned i, sectors, bytes, u64s;
int ret;
bch2_journal_pin_put(j, le64_to_cpu(w->data->seq));
journal_buf_realloc(j, w);
jset = w->data;
@ -1292,7 +1194,23 @@ void bch2_journal_write(struct closure *cl)
bytes = vstruct_bytes(jset);
memset((void *) jset + bytes, 0, (sectors << 9) - bytes);
if (journal_write_alloc(j, w, sectors)) {
spin_lock(&j->lock);
ret = journal_write_alloc(j, w, sectors);
/*
* write is allocated, no longer need to account for it in
* bch2_journal_space_available():
*/
w->sectors = 0;
/*
* journal entry has been compacted and allocated, recalculate space
* available:
*/
bch2_journal_space_available(j);
spin_unlock(&j->lock);
if (ret) {
bch2_journal_halt(j);
bch_err(c, "Unable to allocate journal write");
bch2_fatal_error(c);
@ -1300,12 +1218,6 @@ void bch2_journal_write(struct closure *cl)
return;
}
/*
* write is allocated, no longer need to account for it in
* bch2_journal_entry_sectors:
*/
w->sectors = 0;
/*
* XXX: we really should just disable the entire journal in nochanges
* mode

View File

@ -39,7 +39,6 @@ int bch2_journal_read(struct bch_fs *, struct list_head *);
void bch2_journal_entries_free(struct list_head *);
int bch2_journal_replay(struct bch_fs *, struct list_head *);
int bch2_journal_space_available(struct journal *);
void bch2_journal_write(struct closure *);
#endif /* _BCACHEFS_JOURNAL_IO_H */

View File

@ -1,15 +1,213 @@
#include "bcachefs.h"
#include "journal.h"
#include "journal_io.h"
#include "journal_reclaim.h"
#include "replicas.h"
#include "super.h"
/* Free space calculations: */
unsigned bch2_journal_dev_buckets_available(struct journal *j,
struct journal_device *ja)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
unsigned next = (ja->cur_idx + 1) % ja->nr;
unsigned available = (ja->last_idx + ja->nr - next) % ja->nr;
/*
* Allocator startup needs some journal space before we can do journal
* replay:
*/
if (available &&
test_bit(BCH_FS_ALLOCATOR_STARTED, &c->flags))
available--;
/*
* Don't use the last bucket unless writing the new last_seq
* will make another bucket available:
*/
if (available &&
journal_last_seq(j) <= ja->bucket_seq[ja->last_idx])
--available;
return available;
}
void bch2_journal_space_available(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
unsigned sectors_next_entry = UINT_MAX;
unsigned sectors_total = UINT_MAX;
unsigned max_entry_size = min(j->buf[0].buf_size >> 9,
j->buf[1].buf_size >> 9);
unsigned i, nr_online = 0, nr_devs = 0;
unsigned unwritten_sectors = j->reservations.prev_buf_unwritten
? journal_prev_buf(j)->sectors
: 0;
int ret = 0;
lockdep_assert_held(&j->lock);
rcu_read_lock();
for_each_member_device_rcu(ca, c, i,
&c->rw_devs[BCH_DATA_JOURNAL]) {
struct journal_device *ja = &ca->journal;
unsigned buckets_this_device, sectors_this_device;
if (!ja->nr)
continue;
nr_online++;
buckets_this_device = bch2_journal_dev_buckets_available(j, ja);
sectors_this_device = ja->sectors_free;
/*
* We that we don't allocate the space for a journal entry
* until we write it out - thus, account for it here:
*/
if (unwritten_sectors >= sectors_this_device) {
if (!buckets_this_device)
continue;
buckets_this_device--;
sectors_this_device = ca->mi.bucket_size;
}
sectors_this_device -= unwritten_sectors;
if (sectors_this_device < ca->mi.bucket_size &&
buckets_this_device) {
buckets_this_device--;
sectors_this_device = ca->mi.bucket_size;
}
if (!sectors_this_device)
continue;
sectors_next_entry = min(sectors_next_entry,
sectors_this_device);
sectors_total = min(sectors_total,
buckets_this_device * ca->mi.bucket_size +
sectors_this_device);
max_entry_size = min_t(unsigned, max_entry_size,
ca->mi.bucket_size);
nr_devs++;
}
rcu_read_unlock();
if (nr_online < c->opts.metadata_replicas_required) {
ret = -EROFS;
sectors_next_entry = 0;
} else if (!sectors_next_entry ||
nr_devs < min_t(unsigned, nr_online,
c->opts.metadata_replicas)) {
ret = -ENOSPC;
sectors_next_entry = 0;
} else if (!fifo_free(&j->pin)) {
ret = -ENOSPC;
sectors_next_entry = 0;
}
j->cur_entry_sectors = sectors_next_entry;
j->cur_entry_error = ret;
if (!ret)
journal_wake(j);
}
/* Discards - last part of journal reclaim: */
static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
{
bool ret;
spin_lock(&j->lock);
ret = ja->nr &&
ja->last_idx != ja->cur_idx &&
ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk;
spin_unlock(&j->lock);
return ret;
}
/*
* Advance ja->last_idx as long as it points to buckets that are no longer
* dirty, issuing discards if necessary:
*/
static void journal_do_discards(struct journal *j)
{
struct bch_fs *c = container_of(j, struct bch_fs, journal);
struct bch_dev *ca;
unsigned iter;
mutex_lock(&j->reclaim_lock);
for_each_rw_member(ca, c, iter) {
struct journal_device *ja = &ca->journal;
while (should_discard_bucket(j, ja)) {
if (ca->mi.discard &&
blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
blkdev_issue_discard(ca->disk_sb.bdev,
bucket_to_sector(ca,
ja->buckets[ja->last_idx]),
ca->mi.bucket_size, GFP_NOIO, 0);
spin_lock(&j->lock);
ja->last_idx = (ja->last_idx + 1) % ja->nr;
bch2_journal_space_available(j);
spin_unlock(&j->lock);
}
}
mutex_unlock(&j->reclaim_lock);
}
/*
* Journal entry pinning - machinery for holding a reference on a given journal
* entry, holding it open to ensure it gets replayed during recovery:
*/
static void bch2_journal_reclaim_fast(struct journal *j)
{
struct journal_entry_pin_list temp;
bool popped = false;
lockdep_assert_held(&j->lock);
/*
* Unpin journal entries whose reference counts reached zero, meaning
* all btree nodes got written out
*/
while (!fifo_empty(&j->pin) &&
!atomic_read(&fifo_peek_front(&j->pin).count)) {
BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
BUG_ON(!fifo_pop(&j->pin, temp));
popped = true;
}
if (popped)
bch2_journal_space_available(j);
}
void bch2_journal_pin_put(struct journal *j, u64 seq)
{
struct journal_entry_pin_list *pin_list = journal_seq_pin(j, seq);
if (atomic_dec_and_test(&pin_list->count)) {
spin_lock(&j->lock);
bch2_journal_reclaim_fast(j);
spin_unlock(&j->lock);
}
}
static inline void __journal_pin_add(struct journal *j,
u64 seq,
struct journal_entry_pin *pin,
@ -24,10 +222,7 @@ static inline void __journal_pin_add(struct journal *j,
pin->seq = seq;
pin->flush = flush_fn;
if (flush_fn)
list_add(&pin->list, &pin_list->list);
else
INIT_LIST_HEAD(&pin->list);
list_add(&pin->list, flush_fn ? &pin_list->list : &pin_list->flushed);
/*
* If the journal is currently full, we might want to call flush_fn
@ -129,88 +324,53 @@ void bch2_journal_pin_flush(struct journal *j, struct journal_entry_pin *pin)
* data off of a specific device:
*/
/**
* bch2_journal_reclaim_fast - do the fast part of journal reclaim
*
* Called from IO submission context, does not block. Cleans up after btree
* write completions by advancing the journal pin and each cache's last_idx,
* kicking off discards and background reclaim as necessary.
*/
void bch2_journal_reclaim_fast(struct journal *j)
{
struct journal_entry_pin_list temp;
bool popped = false;
lockdep_assert_held(&j->lock);
/*
* Unpin journal entries whose reference counts reached zero, meaning
* all btree nodes got written out
*/
while (!fifo_empty(&j->pin) &&
!atomic_read(&fifo_peek_front(&j->pin).count)) {
BUG_ON(!list_empty(&fifo_peek_front(&j->pin).list));
BUG_ON(!fifo_pop(&j->pin, temp));
popped = true;
}
if (popped)
journal_wake(j);
}
static void journal_pin_mark_flushing(struct journal *j,
struct journal_entry_pin *pin,
u64 seq)
{
lockdep_assert_held(&j->reclaim_lock);
list_move(&pin->list, &journal_seq_pin(j, seq)->flushed);
BUG_ON(j->flush_in_progress);
j->flush_in_progress = pin;
}
static void journal_pin_flush(struct journal *j,
struct journal_entry_pin *pin,
u64 seq)
{
pin->flush(j, pin, seq);
BUG_ON(j->flush_in_progress != pin);
j->flush_in_progress = NULL;
wake_up(&j->pin_flush_wait);
}
static struct journal_entry_pin *
journal_get_next_pin(struct journal *j, u64 seq_to_flush, u64 *seq)
journal_get_next_pin(struct journal *j, u64 max_seq, u64 *seq)
{
struct journal_entry_pin_list *pin_list;
struct journal_entry_pin *ret = NULL;
/* no need to iterate over empty fifo entries: */
bch2_journal_reclaim_fast(j);
spin_lock(&j->lock);
fifo_for_each_entry_ptr(pin_list, &j->pin, *seq)
if (*seq > seq_to_flush ||
if (*seq > max_seq ||
(ret = list_first_entry_or_null(&pin_list->list,
struct journal_entry_pin, list)))
break;
return ret;
}
if (ret) {
list_move(&ret->list, &pin_list->flushed);
BUG_ON(j->flush_in_progress);
j->flush_in_progress = ret;
j->last_flushed = jiffies;
}
static bool should_discard_bucket(struct journal *j, struct journal_device *ja)
{
bool ret;
spin_lock(&j->lock);
ret = ja->nr &&
(ja->last_idx != ja->cur_idx &&
ja->bucket_seq[ja->last_idx] < j->last_seq_ondisk);
spin_unlock(&j->lock);
return ret;
}
static void journal_flush_pins(struct journal *j, u64 seq_to_flush,
unsigned min_nr)
{
struct journal_entry_pin *pin;
u64 seq;
lockdep_assert_held(&j->reclaim_lock);
while ((pin = journal_get_next_pin(j, min_nr
? U64_MAX : seq_to_flush, &seq))) {
if (min_nr)
min_nr--;
pin->flush(j, pin, seq);
BUG_ON(j->flush_in_progress != pin);
j->flush_in_progress = NULL;
wake_up(&j->pin_flush_wait);
}
}
/**
* bch2_journal_reclaim_work - free up journal buckets
*
@ -235,104 +395,44 @@ void bch2_journal_reclaim_work(struct work_struct *work)
struct bch_fs, journal.reclaim_work);
struct journal *j = &c->journal;
struct bch_dev *ca;
struct journal_entry_pin *pin;
u64 seq, seq_to_flush = 0;
unsigned iter, bucket_to_flush;
unsigned long next_flush;
bool reclaim_lock_held = false, need_flush;
unsigned iter, bucket_to_flush, min_nr = 0;
u64 seq_to_flush = 0;
journal_do_discards(j);
mutex_lock(&j->reclaim_lock);
spin_lock(&j->lock);
/*
* Advance last_idx to point to the oldest journal entry containing
* btree node updates that have not yet been written out
*/
for_each_rw_member(ca, c, iter) {
struct journal_device *ja = &ca->journal;
if (!ja->nr)
continue;
while (should_discard_bucket(j, ja)) {
if (!reclaim_lock_held) {
/*
* ugh:
* might be called from __journal_res_get()
* under wait_event() - have to go back to
* TASK_RUNNING before doing something that
* would block, but only if we're doing work:
*/
__set_current_state(TASK_RUNNING);
mutex_lock(&j->reclaim_lock);
reclaim_lock_held = true;
/* recheck under reclaim_lock: */
continue;
}
if (ca->mi.discard &&
blk_queue_discard(bdev_get_queue(ca->disk_sb.bdev)))
blkdev_issue_discard(ca->disk_sb.bdev,
bucket_to_sector(ca,
ja->buckets[ja->last_idx]),
ca->mi.bucket_size, GFP_NOIO, 0);
spin_lock(&j->lock);
ja->last_idx = (ja->last_idx + 1) % ja->nr;
spin_unlock(&j->lock);
journal_wake(j);
}
/*
* Write out enough btree nodes to free up 50% journal
* buckets
*/
spin_lock(&j->lock);
/* Try to keep the journal at most half full: */
bucket_to_flush = (ja->cur_idx + (ja->nr >> 1)) % ja->nr;
seq_to_flush = max_t(u64, seq_to_flush,
ja->bucket_seq[bucket_to_flush]);
spin_unlock(&j->lock);
}
/* Also flush if the pin fifo is more than half full */
spin_lock(&j->lock);
seq_to_flush = max_t(s64, seq_to_flush,
(s64) journal_cur_seq(j) -
(j->pin.size >> 1));
spin_unlock(&j->lock);
/*
* If it's been longer than j->reclaim_delay_ms since we last flushed,
* make sure to flush at least one journal pin:
*/
next_flush = j->last_flushed + msecs_to_jiffies(j->reclaim_delay_ms);
need_flush = time_after(jiffies, next_flush);
if (time_after(jiffies, j->last_flushed +
msecs_to_jiffies(j->reclaim_delay_ms)))
min_nr = 1;
while ((pin = journal_get_next_pin(j, need_flush
? U64_MAX
: seq_to_flush, &seq))) {
if (!reclaim_lock_held) {
spin_unlock(&j->lock);
__set_current_state(TASK_RUNNING);
mutex_lock(&j->reclaim_lock);
reclaim_lock_held = true;
spin_lock(&j->lock);
continue;
}
journal_flush_pins(j, seq_to_flush, min_nr);
journal_pin_mark_flushing(j, pin, seq);
spin_unlock(&j->lock);
journal_pin_flush(j, pin, seq);
need_flush = false;
j->last_flushed = jiffies;
spin_lock(&j->lock);
}
spin_unlock(&j->lock);
if (reclaim_lock_held)
mutex_unlock(&j->reclaim_lock);
mutex_unlock(&j->reclaim_lock);
if (!test_bit(BCH_FS_RO, &c->flags))
queue_delayed_work(c->journal_reclaim_wq, &j->reclaim_work,
@ -341,8 +441,6 @@ void bch2_journal_reclaim_work(struct work_struct *work)
static int journal_flush_done(struct journal *j, u64 seq_to_flush)
{
struct journal_entry_pin *pin;
u64 pin_seq;
int ret;
ret = bch2_journal_error(j);
@ -350,16 +448,10 @@ static int journal_flush_done(struct journal *j, u64 seq_to_flush)
return ret;
mutex_lock(&j->reclaim_lock);
journal_flush_pins(j, seq_to_flush, 0);
spin_lock(&j->lock);
while ((pin = journal_get_next_pin(j, seq_to_flush, &pin_seq))) {
journal_pin_mark_flushing(j, pin, pin_seq);
spin_unlock(&j->lock);
journal_pin_flush(j, pin, pin_seq);
spin_lock(&j->lock);
}
/*
* If journal replay hasn't completed, the unreplayed journal entries
* hold refs on their corresponding sequence numbers

View File

@ -3,6 +3,10 @@
#define JOURNAL_PIN (32 * 1024)
unsigned bch2_journal_dev_buckets_available(struct journal *,
struct journal_device *);
void bch2_journal_space_available(struct journal *);
static inline bool journal_pin_active(struct journal_entry_pin *pin)
{
return pin->seq != 0;
@ -16,6 +20,8 @@ journal_seq_pin(struct journal *j, u64 seq)
return &j->pin.data[seq & j->pin.mask];
}
void bch2_journal_pin_put(struct journal *, u64);
void bch2_journal_pin_add(struct journal *, u64, struct journal_entry_pin *,
journal_pin_flush_fn);
void bch2_journal_pin_update(struct journal *, u64, struct journal_entry_pin *,
@ -27,7 +33,6 @@ void bch2_journal_pin_add_if_older(struct journal *,
journal_pin_flush_fn);
void bch2_journal_pin_flush(struct journal *, struct journal_entry_pin *);
void bch2_journal_reclaim_fast(struct journal *);
void bch2_journal_reclaim_work(struct work_struct *);
void bch2_journal_flush_pins(struct journal *, u64);

View File

@ -135,6 +135,12 @@ struct journal {
unsigned cur_entry_u64s;
unsigned cur_entry_sectors;
/*
* 0, or -ENOSPC if waiting on journal reclaim, or -EROFS if
* insufficient devices:
*/
int cur_entry_error;
/* Reserved space in journal entry to be used just prior to write */
unsigned entry_u64s_reserved;

View File

@ -412,11 +412,13 @@ int bch2_fs_initialize(struct bch_fs *c)
bch2_btree_root_alloc(c, i);
err = "unable to allocate journal buckets";
for_each_online_member(ca, c, i)
if (bch2_dev_journal_alloc(ca)) {
for_each_online_member(ca, c, i) {
ret = bch2_dev_journal_alloc(ca);
if (ret) {
percpu_ref_put(&ca->io_ref);
goto err;
}
}
/*
* journal_res_get() will crash if called before this has