mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to ae6e8a59d3 bcachefs: quota limit enforcement
This commit is contained in:
parent
4de98a2712
commit
fe2d5ef75f
@ -1 +1 @@
|
|||||||
02ae70070acc3bc4740d221efa5ff5425cf6fce5
|
ae6e8a59d33008f46bb801850840dbd0a7608bbc
|
||||||
|
@ -266,7 +266,8 @@ static void write_data(struct bch_fs *c,
|
|||||||
op.write_point = writepoint_hashed(0);
|
op.write_point = writepoint_hashed(0);
|
||||||
op.pos = POS(dst_inode->bi_inum, dst_offset >> 9);
|
op.pos = POS(dst_inode->bi_inum, dst_offset >> 9);
|
||||||
|
|
||||||
int ret = bch2_disk_reservation_get(c, &op.res, len >> 9, 0);
|
int ret = bch2_disk_reservation_get(c, &op.res, len >> 9,
|
||||||
|
c->opts.data_replicas, 0);
|
||||||
if (ret)
|
if (ret)
|
||||||
die("error reserving space in new filesystem: %s", strerror(-ret));
|
die("error reserving space in new filesystem: %s", strerror(-ret));
|
||||||
|
|
||||||
@ -328,7 +329,7 @@ static void link_data(struct bch_fs *c, struct bch_inode_unpacked *dst,
|
|||||||
.gen = bucket(ca, b)->mark.gen,
|
.gen = bucket(ca, b)->mark.gen,
|
||||||
});
|
});
|
||||||
|
|
||||||
ret = bch2_disk_reservation_get(c, &res, sectors,
|
ret = bch2_disk_reservation_get(c, &res, sectors, 1,
|
||||||
BCH_DISK_RESERVATION_NOFAIL);
|
BCH_DISK_RESERVATION_NOFAIL);
|
||||||
if (ret)
|
if (ret)
|
||||||
die("error reserving space in new filesystem: %s",
|
die("error reserving space in new filesystem: %s",
|
||||||
|
@ -1859,6 +1859,27 @@ int bch2_dev_allocator_start(struct bch_dev *ca)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void allocator_start_issue_discards(struct bch_fs *c)
|
||||||
|
{
|
||||||
|
struct bch_dev *ca;
|
||||||
|
unsigned i, dev_iter;
|
||||||
|
size_t bu;
|
||||||
|
|
||||||
|
for_each_rw_member(ca, c, dev_iter) {
|
||||||
|
unsigned done = 0;
|
||||||
|
|
||||||
|
fifo_for_each_entry(bu, &ca->free_inc, i) {
|
||||||
|
if (done == ca->nr_invalidated)
|
||||||
|
break;
|
||||||
|
|
||||||
|
blkdev_issue_discard(ca->disk_sb.bdev,
|
||||||
|
bucket_to_sector(ca, bu),
|
||||||
|
ca->mi.bucket_size, GFP_NOIO, 0);
|
||||||
|
done++;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static int __bch2_fs_allocator_start(struct bch_fs *c)
|
static int __bch2_fs_allocator_start(struct bch_fs *c)
|
||||||
{
|
{
|
||||||
struct bch_dev *ca;
|
struct bch_dev *ca;
|
||||||
@ -1938,6 +1959,8 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
|
|||||||
*/
|
*/
|
||||||
if (invalidating_data)
|
if (invalidating_data)
|
||||||
set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
|
set_bit(BCH_FS_HOLD_BTREE_WRITES, &c->flags);
|
||||||
|
else
|
||||||
|
allocator_start_issue_discards(c);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: it's possible for this to deadlock waiting on journal reclaim,
|
* XXX: it's possible for this to deadlock waiting on journal reclaim,
|
||||||
@ -1959,12 +1982,12 @@ static int __bch2_fs_allocator_start(struct bch_fs *c)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (invalidating_data)
|
||||||
|
allocator_start_issue_discards(c);
|
||||||
|
|
||||||
for_each_rw_member(ca, c, dev_iter)
|
for_each_rw_member(ca, c, dev_iter)
|
||||||
while (ca->nr_invalidated) {
|
while (ca->nr_invalidated) {
|
||||||
BUG_ON(!fifo_pop(&ca->free_inc, bu));
|
BUG_ON(!fifo_pop(&ca->free_inc, bu));
|
||||||
blkdev_issue_discard(ca->disk_sb.bdev,
|
|
||||||
bucket_to_sector(ca, bu),
|
|
||||||
ca->mi.bucket_size, GFP_NOIO, 0);
|
|
||||||
ca->nr_invalidated--;
|
ca->nr_invalidated--;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1983,7 +2006,7 @@ again:
|
|||||||
if (btree_node_dirty(b) && (!b->written || b->level)) {
|
if (btree_node_dirty(b) && (!b->written || b->level)) {
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
six_lock_read(&b->lock);
|
six_lock_read(&b->lock);
|
||||||
bch2_btree_node_write(c, b, NULL, SIX_LOCK_read);
|
bch2_btree_node_write(c, b, SIX_LOCK_read);
|
||||||
six_unlock_read(&b->lock);
|
six_unlock_read(&b->lock);
|
||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
@ -178,9 +178,9 @@ static int __btree_node_reclaim(struct bch_fs *c, struct btree *b, bool flush)
|
|||||||
* the post write cleanup:
|
* the post write cleanup:
|
||||||
*/
|
*/
|
||||||
if (verify_btree_ondisk(c))
|
if (verify_btree_ondisk(c))
|
||||||
bch2_btree_node_write(c, b, NULL, SIX_LOCK_intent);
|
bch2_btree_node_write(c, b, SIX_LOCK_intent);
|
||||||
else
|
else
|
||||||
__bch2_btree_node_write(c, b, NULL, SIX_LOCK_read);
|
__bch2_btree_node_write(c, b, SIX_LOCK_read);
|
||||||
|
|
||||||
/* wait for any in flight btree write */
|
/* wait for any in flight btree write */
|
||||||
btree_node_wait_on_io(b);
|
btree_node_wait_on_io(b);
|
||||||
@ -626,7 +626,9 @@ struct btree *bch2_btree_node_get(struct bch_fs *c, struct btree_iter *iter,
|
|||||||
struct btree *b;
|
struct btree *b;
|
||||||
struct bset_tree *t;
|
struct bset_tree *t;
|
||||||
|
|
||||||
BUG_ON(level >= BTREE_MAX_DEPTH);
|
/* btree_node_fill() requires parent to be locked: */
|
||||||
|
EBUG_ON(!btree_node_locked(iter, level + 1));
|
||||||
|
EBUG_ON(level >= BTREE_MAX_DEPTH);
|
||||||
retry:
|
retry:
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
b = btree_cache_find(bc, k);
|
b = btree_cache_find(bc, k);
|
||||||
@ -763,6 +765,12 @@ struct btree *bch2_btree_node_get_sibling(struct bch_fs *c,
|
|||||||
|
|
||||||
if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) {
|
if (IS_ERR(ret) && PTR_ERR(ret) == -EINTR) {
|
||||||
btree_node_unlock(iter, level);
|
btree_node_unlock(iter, level);
|
||||||
|
|
||||||
|
if (!bch2_btree_node_relock(iter, level + 1)) {
|
||||||
|
bch2_btree_iter_set_locks_want(iter, level + 2);
|
||||||
|
return ERR_PTR(-EINTR);
|
||||||
|
}
|
||||||
|
|
||||||
ret = bch2_btree_node_get(c, iter, &tmp.k, level, SIX_LOCK_intent);
|
ret = bch2_btree_node_get(c, iter, &tmp.k, level, SIX_LOCK_intent);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -734,7 +734,7 @@ static void bch2_coalesce_nodes(struct bch_fs *c, struct btree_iter *iter,
|
|||||||
bch2_btree_build_aux_trees(n);
|
bch2_btree_build_aux_trees(n);
|
||||||
six_unlock_write(&n->lock);
|
six_unlock_write(&n->lock);
|
||||||
|
|
||||||
bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
|
bch2_btree_node_write(c, n, SIX_LOCK_intent);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1425,6 +1425,19 @@ err:
|
|||||||
void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
|
void bch2_btree_complete_write(struct bch_fs *c, struct btree *b,
|
||||||
struct btree_write *w)
|
struct btree_write *w)
|
||||||
{
|
{
|
||||||
|
unsigned long old, new, v = READ_ONCE(b->will_make_reachable);
|
||||||
|
|
||||||
|
do {
|
||||||
|
old = new = v;
|
||||||
|
if (!(old & 1))
|
||||||
|
break;
|
||||||
|
|
||||||
|
new &= ~1UL;
|
||||||
|
} while ((v = cmpxchg(&b->will_make_reachable, old, new)) != old);
|
||||||
|
|
||||||
|
if (old & 1)
|
||||||
|
closure_put(&((struct btree_update *) new)->cl);
|
||||||
|
|
||||||
bch2_journal_pin_drop(&c->journal, &w->journal);
|
bch2_journal_pin_drop(&c->journal, &w->journal);
|
||||||
closure_wake_up(&w->wait);
|
closure_wake_up(&w->wait);
|
||||||
}
|
}
|
||||||
@ -1441,7 +1454,6 @@ static void bch2_btree_node_write_error(struct bch_fs *c,
|
|||||||
struct btree_write_bio *wbio)
|
struct btree_write_bio *wbio)
|
||||||
{
|
{
|
||||||
struct btree *b = wbio->wbio.bio.bi_private;
|
struct btree *b = wbio->wbio.bio.bi_private;
|
||||||
struct closure *cl = wbio->cl;
|
|
||||||
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
||||||
struct bkey_i_extent *new_key;
|
struct bkey_i_extent *new_key;
|
||||||
struct bkey_s_extent e;
|
struct bkey_s_extent e;
|
||||||
@ -1488,8 +1500,6 @@ out:
|
|||||||
bch2_btree_iter_unlock(&iter);
|
bch2_btree_iter_unlock(&iter);
|
||||||
bio_put(&wbio->wbio.bio);
|
bio_put(&wbio->wbio.bio);
|
||||||
btree_node_write_done(c, b);
|
btree_node_write_done(c, b);
|
||||||
if (cl)
|
|
||||||
closure_put(cl);
|
|
||||||
return;
|
return;
|
||||||
err:
|
err:
|
||||||
set_btree_node_noevict(b);
|
set_btree_node_noevict(b);
|
||||||
@ -1520,7 +1530,6 @@ static void btree_node_write_work(struct work_struct *work)
|
|||||||
{
|
{
|
||||||
struct btree_write_bio *wbio =
|
struct btree_write_bio *wbio =
|
||||||
container_of(work, struct btree_write_bio, work);
|
container_of(work, struct btree_write_bio, work);
|
||||||
struct closure *cl = wbio->cl;
|
|
||||||
struct bch_fs *c = wbio->wbio.c;
|
struct bch_fs *c = wbio->wbio.c;
|
||||||
struct btree *b = wbio->wbio.bio.bi_private;
|
struct btree *b = wbio->wbio.bio.bi_private;
|
||||||
|
|
||||||
@ -1542,8 +1551,6 @@ static void btree_node_write_work(struct work_struct *work)
|
|||||||
|
|
||||||
bio_put(&wbio->wbio.bio);
|
bio_put(&wbio->wbio.bio);
|
||||||
btree_node_write_done(c, b);
|
btree_node_write_done(c, b);
|
||||||
if (cl)
|
|
||||||
closure_put(cl);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void btree_node_write_endio(struct bio *bio)
|
static void btree_node_write_endio(struct bio *bio)
|
||||||
@ -1598,7 +1605,6 @@ static int validate_bset_for_write(struct bch_fs *c, struct btree *b,
|
|||||||
}
|
}
|
||||||
|
|
||||||
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||||
struct closure *parent,
|
|
||||||
enum six_lock_type lock_type_held)
|
enum six_lock_type lock_type_held)
|
||||||
{
|
{
|
||||||
struct btree_write_bio *wbio;
|
struct btree_write_bio *wbio;
|
||||||
@ -1651,7 +1657,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
|||||||
|
|
||||||
BUG_ON(btree_node_fake(b));
|
BUG_ON(btree_node_fake(b));
|
||||||
BUG_ON(!list_empty(&b->write_blocked));
|
BUG_ON(!list_empty(&b->write_blocked));
|
||||||
BUG_ON((b->will_make_reachable != NULL) != !b->written);
|
BUG_ON((b->will_make_reachable != 0) != !b->written);
|
||||||
|
|
||||||
BUG_ON(b->written >= c->opts.btree_node_size);
|
BUG_ON(b->written >= c->opts.btree_node_size);
|
||||||
BUG_ON(bset_written(b, btree_bset_last(b)));
|
BUG_ON(bset_written(b, btree_bset_last(b)));
|
||||||
@ -1786,7 +1792,6 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
|||||||
struct btree_write_bio, wbio.bio);
|
struct btree_write_bio, wbio.bio);
|
||||||
wbio_init(&wbio->wbio.bio);
|
wbio_init(&wbio->wbio.bio);
|
||||||
wbio->data = data;
|
wbio->data = data;
|
||||||
wbio->cl = parent;
|
|
||||||
wbio->wbio.order = order;
|
wbio->wbio.order = order;
|
||||||
wbio->wbio.used_mempool = used_mempool;
|
wbio->wbio.used_mempool = used_mempool;
|
||||||
wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
|
wbio->wbio.bio.bi_opf = REQ_OP_WRITE|REQ_META|REQ_FUA;
|
||||||
@ -1794,9 +1799,6 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
|||||||
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
|
wbio->wbio.bio.bi_end_io = btree_node_write_endio;
|
||||||
wbio->wbio.bio.bi_private = b;
|
wbio->wbio.bio.bi_private = b;
|
||||||
|
|
||||||
if (parent)
|
|
||||||
closure_get(parent);
|
|
||||||
|
|
||||||
bch2_bio_map(&wbio->wbio.bio, data);
|
bch2_bio_map(&wbio->wbio.bio, data);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1893,7 +1895,6 @@ bool bch2_btree_post_write_cleanup(struct bch_fs *c, struct btree *b)
|
|||||||
* Use this one if the node is intent locked:
|
* Use this one if the node is intent locked:
|
||||||
*/
|
*/
|
||||||
void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
||||||
struct closure *parent,
|
|
||||||
enum six_lock_type lock_type_held)
|
enum six_lock_type lock_type_held)
|
||||||
{
|
{
|
||||||
BUG_ON(lock_type_held == SIX_LOCK_write);
|
BUG_ON(lock_type_held == SIX_LOCK_write);
|
||||||
@ -1901,7 +1902,7 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
|||||||
if (lock_type_held == SIX_LOCK_intent ||
|
if (lock_type_held == SIX_LOCK_intent ||
|
||||||
six_trylock_convert(&b->lock, SIX_LOCK_read,
|
six_trylock_convert(&b->lock, SIX_LOCK_read,
|
||||||
SIX_LOCK_intent)) {
|
SIX_LOCK_intent)) {
|
||||||
__bch2_btree_node_write(c, b, parent, SIX_LOCK_intent);
|
__bch2_btree_node_write(c, b, SIX_LOCK_intent);
|
||||||
|
|
||||||
/* don't cycle lock unnecessarily: */
|
/* don't cycle lock unnecessarily: */
|
||||||
if (btree_node_just_written(b)) {
|
if (btree_node_just_written(b)) {
|
||||||
@ -1913,7 +1914,7 @@ void bch2_btree_node_write(struct bch_fs *c, struct btree *b,
|
|||||||
if (lock_type_held == SIX_LOCK_read)
|
if (lock_type_held == SIX_LOCK_read)
|
||||||
six_lock_downgrade(&b->lock);
|
six_lock_downgrade(&b->lock);
|
||||||
} else {
|
} else {
|
||||||
__bch2_btree_node_write(c, b, parent, SIX_LOCK_read);
|
__bch2_btree_node_write(c, b, SIX_LOCK_read);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,7 +19,6 @@ struct btree_read_bio {
|
|||||||
};
|
};
|
||||||
|
|
||||||
struct btree_write_bio {
|
struct btree_write_bio {
|
||||||
struct closure *cl;
|
|
||||||
void *data;
|
void *data;
|
||||||
struct work_struct work;
|
struct work_struct work;
|
||||||
struct bch_write_bio wbio;
|
struct bch_write_bio wbio;
|
||||||
@ -91,22 +90,41 @@ void bch2_btree_complete_write(struct bch_fs *, struct btree *,
|
|||||||
void bch2_btree_write_error_work(struct work_struct *);
|
void bch2_btree_write_error_work(struct work_struct *);
|
||||||
|
|
||||||
void __bch2_btree_node_write(struct bch_fs *, struct btree *,
|
void __bch2_btree_node_write(struct bch_fs *, struct btree *,
|
||||||
struct closure *, enum six_lock_type);
|
enum six_lock_type);
|
||||||
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
|
bool bch2_btree_post_write_cleanup(struct bch_fs *, struct btree *);
|
||||||
|
|
||||||
void bch2_btree_node_write(struct bch_fs *, struct btree *,
|
void bch2_btree_node_write(struct bch_fs *, struct btree *,
|
||||||
struct closure *, enum six_lock_type);
|
enum six_lock_type);
|
||||||
|
|
||||||
#define bch2_btree_node_write_dirty(_c, _b, _cl, cond) \
|
/*
|
||||||
|
* btree_node_dirty() can be cleared with only a read lock,
|
||||||
|
* and for bch2_btree_node_write_cond() we want to set need_write iff it's
|
||||||
|
* still dirty:
|
||||||
|
*/
|
||||||
|
static inline void set_btree_node_need_write_if_dirty(struct btree *b)
|
||||||
|
{
|
||||||
|
unsigned long old, new, v = READ_ONCE(b->flags);
|
||||||
|
|
||||||
|
do {
|
||||||
|
old = new = v;
|
||||||
|
|
||||||
|
if (!(old & (1 << BTREE_NODE_dirty)))
|
||||||
|
return;
|
||||||
|
|
||||||
|
new |= (1 << BTREE_NODE_need_write);
|
||||||
|
} while ((v = cmpxchg(&b->flags, old, new)) != old);
|
||||||
|
}
|
||||||
|
|
||||||
|
#define bch2_btree_node_write_cond(_c, _b, cond) \
|
||||||
do { \
|
do { \
|
||||||
while ((_b)->written && btree_node_dirty(_b) && (cond)) { \
|
while ((_b)->written && btree_node_dirty(_b) && (cond)) { \
|
||||||
set_btree_node_need_write(_b); \
|
if (!btree_node_may_write(_b)) { \
|
||||||
\
|
set_btree_node_need_write_if_dirty(_b); \
|
||||||
if (!btree_node_may_write(_b)) \
|
|
||||||
break; \
|
break; \
|
||||||
|
} \
|
||||||
\
|
\
|
||||||
if (!btree_node_write_in_flight(_b)) { \
|
if (!btree_node_write_in_flight(_b)) { \
|
||||||
bch2_btree_node_write(_c, _b, _cl, SIX_LOCK_read);\
|
bch2_btree_node_write(_c, _b, SIX_LOCK_read); \
|
||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
\
|
\
|
||||||
|
@ -126,7 +126,7 @@ struct btree {
|
|||||||
* another write - because that write also won't yet be reachable and
|
* another write - because that write also won't yet be reachable and
|
||||||
* marking it as completed before it's reachable would be incorrect:
|
* marking it as completed before it's reachable would be incorrect:
|
||||||
*/
|
*/
|
||||||
struct btree_update *will_make_reachable;
|
unsigned long will_make_reachable;
|
||||||
|
|
||||||
struct btree_ob_ref ob;
|
struct btree_ob_ref ob;
|
||||||
|
|
||||||
|
@ -270,6 +270,17 @@ void bch2_btree_node_free_never_inserted(struct bch_fs *c, struct btree *b)
|
|||||||
void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
|
void bch2_btree_node_free_inmem(struct bch_fs *c, struct btree *b,
|
||||||
struct btree_iter *iter)
|
struct btree_iter *iter)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* Is this a node that isn't reachable on disk yet?
|
||||||
|
*
|
||||||
|
* Nodes that aren't reachable yet have writes blocked until they're
|
||||||
|
* reachable - now that we've cancelled any pending writes and moved
|
||||||
|
* things waiting on that write to wait on this update, we can drop this
|
||||||
|
* node from the list of nodes that the other update is making
|
||||||
|
* reachable, prior to freeing it:
|
||||||
|
*/
|
||||||
|
btree_update_drop_new_node(c, b);
|
||||||
|
|
||||||
bch2_btree_iter_node_drop_linked(iter, b);
|
bch2_btree_iter_node_drop_linked(iter, b);
|
||||||
|
|
||||||
__btree_node_free(c, b, iter);
|
__btree_node_free(c, b, iter);
|
||||||
@ -503,8 +514,7 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
|
|||||||
struct btree *b;
|
struct btree *b;
|
||||||
struct disk_reservation disk_res = { 0, 0 };
|
struct disk_reservation disk_res = { 0, 0 };
|
||||||
unsigned sectors = nr_nodes * c->opts.btree_node_size;
|
unsigned sectors = nr_nodes * c->opts.btree_node_size;
|
||||||
int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD|
|
int ret, disk_res_flags = BCH_DISK_RESERVATION_GC_LOCK_HELD;
|
||||||
BCH_DISK_RESERVATION_METADATA;
|
|
||||||
|
|
||||||
if (flags & BTREE_INSERT_NOFAIL)
|
if (flags & BTREE_INSERT_NOFAIL)
|
||||||
disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
|
disk_res_flags |= BCH_DISK_RESERVATION_NOFAIL;
|
||||||
@ -517,7 +527,9 @@ static struct btree_reserve *bch2_btree_reserve_get(struct bch_fs *c,
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
if (bch2_disk_reservation_get(c, &disk_res, sectors, disk_res_flags))
|
if (bch2_disk_reservation_get(c, &disk_res, sectors,
|
||||||
|
c->opts.metadata_replicas,
|
||||||
|
disk_res_flags))
|
||||||
return ERR_PTR(-ENOSPC);
|
return ERR_PTR(-ENOSPC);
|
||||||
|
|
||||||
BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
|
BUG_ON(nr_nodes > BTREE_RESERVE_MAX);
|
||||||
@ -597,12 +609,17 @@ static void btree_update_nodes_reachable(struct closure *cl)
|
|||||||
while (as->nr_new_nodes) {
|
while (as->nr_new_nodes) {
|
||||||
struct btree *b = as->new_nodes[--as->nr_new_nodes];
|
struct btree *b = as->new_nodes[--as->nr_new_nodes];
|
||||||
|
|
||||||
BUG_ON(b->will_make_reachable != as);
|
BUG_ON(b->will_make_reachable &&
|
||||||
b->will_make_reachable = NULL;
|
(struct btree_update *) b->will_make_reachable != as);
|
||||||
|
b->will_make_reachable = 0;
|
||||||
mutex_unlock(&c->btree_interior_update_lock);
|
mutex_unlock(&c->btree_interior_update_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* b->will_make_reachable prevented it from being written, so
|
||||||
|
* write it now if it needs to be written:
|
||||||
|
*/
|
||||||
six_lock_read(&b->lock);
|
six_lock_read(&b->lock);
|
||||||
bch2_btree_node_write_dirty(c, b, NULL, btree_node_need_write(b));
|
bch2_btree_node_write_cond(c, b, btree_node_need_write(b));
|
||||||
six_unlock_read(&b->lock);
|
six_unlock_read(&b->lock);
|
||||||
mutex_lock(&c->btree_interior_update_lock);
|
mutex_lock(&c->btree_interior_update_lock);
|
||||||
}
|
}
|
||||||
@ -651,8 +668,11 @@ retry:
|
|||||||
list_del(&as->write_blocked_list);
|
list_del(&as->write_blocked_list);
|
||||||
mutex_unlock(&c->btree_interior_update_lock);
|
mutex_unlock(&c->btree_interior_update_lock);
|
||||||
|
|
||||||
bch2_btree_node_write_dirty(c, b, NULL,
|
/*
|
||||||
btree_node_need_write(b));
|
* b->write_blocked prevented it from being written, so
|
||||||
|
* write it now if it needs to be written:
|
||||||
|
*/
|
||||||
|
bch2_btree_node_write_cond(c, b, btree_node_need_write(b));
|
||||||
six_unlock_read(&b->lock);
|
six_unlock_read(&b->lock);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
@ -851,17 +871,25 @@ static void btree_node_will_make_reachable(struct btree_update *as,
|
|||||||
BUG_ON(b->will_make_reachable);
|
BUG_ON(b->will_make_reachable);
|
||||||
|
|
||||||
as->new_nodes[as->nr_new_nodes++] = b;
|
as->new_nodes[as->nr_new_nodes++] = b;
|
||||||
b->will_make_reachable = as;
|
b->will_make_reachable = 1UL|(unsigned long) as;
|
||||||
|
|
||||||
|
closure_get(&as->cl);
|
||||||
mutex_unlock(&c->btree_interior_update_lock);
|
mutex_unlock(&c->btree_interior_update_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __btree_interior_update_drop_new_node(struct btree *b)
|
static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
|
||||||
{
|
{
|
||||||
struct btree_update *as = b->will_make_reachable;
|
struct btree_update *as;
|
||||||
|
unsigned long v;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
|
||||||
BUG_ON(!as);
|
if (!b->will_make_reachable)
|
||||||
|
return;
|
||||||
|
|
||||||
|
mutex_lock(&c->btree_interior_update_lock);
|
||||||
|
v = xchg(&b->will_make_reachable, 0);
|
||||||
|
|
||||||
|
as = (struct btree_update *) (v & ~1UL);
|
||||||
for (i = 0; i < as->nr_new_nodes; i++)
|
for (i = 0; i < as->nr_new_nodes; i++)
|
||||||
if (as->new_nodes[i] == b)
|
if (as->new_nodes[i] == b)
|
||||||
goto found;
|
goto found;
|
||||||
@ -869,14 +897,10 @@ static void __btree_interior_update_drop_new_node(struct btree *b)
|
|||||||
BUG();
|
BUG();
|
||||||
found:
|
found:
|
||||||
array_remove_item(as->new_nodes, as->nr_new_nodes, i);
|
array_remove_item(as->new_nodes, as->nr_new_nodes, i);
|
||||||
b->will_make_reachable = NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void btree_update_drop_new_node(struct bch_fs *c, struct btree *b)
|
|
||||||
{
|
|
||||||
mutex_lock(&c->btree_interior_update_lock);
|
|
||||||
__btree_interior_update_drop_new_node(b);
|
|
||||||
mutex_unlock(&c->btree_interior_update_lock);
|
mutex_unlock(&c->btree_interior_update_lock);
|
||||||
|
|
||||||
|
if (v & 1)
|
||||||
|
closure_put(&as->cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void btree_interior_update_add_node_reference(struct btree_update *as,
|
static void btree_interior_update_add_node_reference(struct btree_update *as,
|
||||||
@ -952,6 +976,12 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
|
|||||||
clear_btree_node_need_write(b);
|
clear_btree_node_need_write(b);
|
||||||
w = btree_current_write(b);
|
w = btree_current_write(b);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Does this node have any btree_update operations waiting on this node
|
||||||
|
* to be written?
|
||||||
|
*
|
||||||
|
* If so, wake them up when this btree_update operation is reachable:
|
||||||
|
*/
|
||||||
llist_for_each_entry_safe(cl, cl_n, llist_del_all(&w->wait.list), list)
|
llist_for_each_entry_safe(cl, cl_n, llist_del_all(&w->wait.list), list)
|
||||||
llist_add(&cl->list, &as->wait.list);
|
llist_add(&cl->list, &as->wait.list);
|
||||||
|
|
||||||
@ -972,9 +1002,6 @@ void bch2_btree_interior_update_will_free_node(struct btree_update *as,
|
|||||||
&as->journal, interior_update_flush);
|
&as->journal, interior_update_flush);
|
||||||
bch2_journal_pin_drop(&c->journal, &w->journal);
|
bch2_journal_pin_drop(&c->journal, &w->journal);
|
||||||
|
|
||||||
if (b->will_make_reachable)
|
|
||||||
__btree_interior_update_drop_new_node(b);
|
|
||||||
|
|
||||||
mutex_unlock(&c->btree_interior_update_lock);
|
mutex_unlock(&c->btree_interior_update_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1338,7 +1365,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
|
|||||||
six_unlock_write(&n2->lock);
|
six_unlock_write(&n2->lock);
|
||||||
six_unlock_write(&n1->lock);
|
six_unlock_write(&n1->lock);
|
||||||
|
|
||||||
bch2_btree_node_write(c, n2, &as->cl, SIX_LOCK_intent);
|
bch2_btree_node_write(c, n2, SIX_LOCK_intent);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Note that on recursive parent_keys == keys, so we
|
* Note that on recursive parent_keys == keys, so we
|
||||||
@ -1356,7 +1383,8 @@ static void btree_split(struct btree_update *as, struct btree *b,
|
|||||||
n3->sib_u64s[1] = U16_MAX;
|
n3->sib_u64s[1] = U16_MAX;
|
||||||
|
|
||||||
btree_split_insert_keys(as, n3, iter, &as->parent_keys);
|
btree_split_insert_keys(as, n3, iter, &as->parent_keys);
|
||||||
bch2_btree_node_write(c, n3, &as->cl, SIX_LOCK_intent);
|
|
||||||
|
bch2_btree_node_write(c, n3, SIX_LOCK_intent);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
trace_btree_compact(c, b);
|
trace_btree_compact(c, b);
|
||||||
@ -1367,7 +1395,7 @@ static void btree_split(struct btree_update *as, struct btree *b,
|
|||||||
bch2_keylist_add(&as->parent_keys, &n1->key);
|
bch2_keylist_add(&as->parent_keys, &n1->key);
|
||||||
}
|
}
|
||||||
|
|
||||||
bch2_btree_node_write(c, n1, &as->cl, SIX_LOCK_intent);
|
bch2_btree_node_write(c, n1, SIX_LOCK_intent);
|
||||||
|
|
||||||
/* New nodes all written, now make them visible: */
|
/* New nodes all written, now make them visible: */
|
||||||
|
|
||||||
@ -1668,7 +1696,7 @@ retry:
|
|||||||
bch2_keylist_add(&as->parent_keys, &delete);
|
bch2_keylist_add(&as->parent_keys, &delete);
|
||||||
bch2_keylist_add(&as->parent_keys, &n->key);
|
bch2_keylist_add(&as->parent_keys, &n->key);
|
||||||
|
|
||||||
bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
|
bch2_btree_node_write(c, n, SIX_LOCK_intent);
|
||||||
|
|
||||||
bch2_btree_insert_node(as, parent, iter, &as->parent_keys);
|
bch2_btree_insert_node(as, parent, iter, &as->parent_keys);
|
||||||
|
|
||||||
@ -1726,7 +1754,7 @@ static int __btree_node_rewrite(struct bch_fs *c, struct btree_iter *iter,
|
|||||||
|
|
||||||
trace_btree_gc_rewrite_node(c, b);
|
trace_btree_gc_rewrite_node(c, b);
|
||||||
|
|
||||||
bch2_btree_node_write(c, n, &as->cl, SIX_LOCK_intent);
|
bch2_btree_node_write(c, n, SIX_LOCK_intent);
|
||||||
|
|
||||||
if (parent) {
|
if (parent) {
|
||||||
bch2_btree_insert_node(as, parent, iter,
|
bch2_btree_insert_node(as, parent, iter,
|
||||||
|
@ -109,7 +109,7 @@ static void __btree_node_flush(struct journal *j, struct journal_entry_pin *pin,
|
|||||||
struct btree *b = container_of(w, struct btree, writes[i]);
|
struct btree *b = container_of(w, struct btree, writes[i]);
|
||||||
|
|
||||||
six_lock_read(&b->lock);
|
six_lock_read(&b->lock);
|
||||||
bch2_btree_node_write_dirty(c, b, NULL,
|
bch2_btree_node_write_cond(c, b,
|
||||||
(btree_current_write(b) == w &&
|
(btree_current_write(b) == w &&
|
||||||
w->journal.pin_list == journal_seq_pin(j, seq)));
|
w->journal.pin_list == journal_seq_pin(j, seq)));
|
||||||
six_unlock_read(&b->lock);
|
six_unlock_read(&b->lock);
|
||||||
|
@ -713,8 +713,6 @@ int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
|
|||||||
s64 sectors_available;
|
s64 sectors_available;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
sectors *= res->nr_replicas;
|
|
||||||
|
|
||||||
lg_local_lock(&c->usage_lock);
|
lg_local_lock(&c->usage_lock);
|
||||||
stats = this_cpu_ptr(c->usage_percpu);
|
stats = this_cpu_ptr(c->usage_percpu);
|
||||||
|
|
||||||
@ -788,19 +786,6 @@ recalculate:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_disk_reservation_get(struct bch_fs *c,
|
|
||||||
struct disk_reservation *res,
|
|
||||||
unsigned sectors, int flags)
|
|
||||||
{
|
|
||||||
res->sectors = 0;
|
|
||||||
res->gen = c->capacity_gen;
|
|
||||||
res->nr_replicas = (flags & BCH_DISK_RESERVATION_METADATA)
|
|
||||||
? c->opts.metadata_replicas
|
|
||||||
: c->opts.data_replicas;
|
|
||||||
|
|
||||||
return bch2_disk_reservation_add(c, res, sectors, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Startup/shutdown: */
|
/* Startup/shutdown: */
|
||||||
|
|
||||||
static void buckets_free_rcu(struct rcu_head *rcu)
|
static void buckets_free_rcu(struct rcu_head *rcu)
|
||||||
|
@ -230,16 +230,36 @@ static inline void bch2_disk_reservation_put(struct bch_fs *c,
|
|||||||
}
|
}
|
||||||
|
|
||||||
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
|
#define BCH_DISK_RESERVATION_NOFAIL (1 << 0)
|
||||||
#define BCH_DISK_RESERVATION_METADATA (1 << 1)
|
#define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 1)
|
||||||
#define BCH_DISK_RESERVATION_GC_LOCK_HELD (1 << 2)
|
#define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 2)
|
||||||
#define BCH_DISK_RESERVATION_BTREE_LOCKS_HELD (1 << 3)
|
|
||||||
|
|
||||||
int bch2_disk_reservation_add(struct bch_fs *,
|
int bch2_disk_reservation_add(struct bch_fs *,
|
||||||
struct disk_reservation *,
|
struct disk_reservation *,
|
||||||
unsigned, int);
|
unsigned, int);
|
||||||
int bch2_disk_reservation_get(struct bch_fs *,
|
|
||||||
struct disk_reservation *,
|
static inline struct disk_reservation
|
||||||
unsigned, int);
|
bch2_disk_reservation_init(struct bch_fs *c, unsigned nr_replicas)
|
||||||
|
{
|
||||||
|
return (struct disk_reservation) {
|
||||||
|
.sectors = 0,
|
||||||
|
#if 0
|
||||||
|
/* not used yet: */
|
||||||
|
.gen = c->capacity_gen,
|
||||||
|
#endif
|
||||||
|
.nr_replicas = nr_replicas,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int bch2_disk_reservation_get(struct bch_fs *c,
|
||||||
|
struct disk_reservation *res,
|
||||||
|
unsigned sectors,
|
||||||
|
unsigned nr_replicas,
|
||||||
|
int flags)
|
||||||
|
{
|
||||||
|
*res = bch2_disk_reservation_init(c, nr_replicas);
|
||||||
|
|
||||||
|
return bch2_disk_reservation_add(c, res, sectors * nr_replicas, flags);
|
||||||
|
}
|
||||||
|
|
||||||
int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
|
int bch2_dev_buckets_resize(struct bch_fs *, struct bch_dev *, u64);
|
||||||
void bch2_dev_buckets_free(struct bch_dev *);
|
void bch2_dev_buckets_free(struct bch_dev *);
|
||||||
|
@ -1264,7 +1264,8 @@ extent_insert_check_split_compressed(struct extent_insert_state *s,
|
|||||||
|
|
||||||
switch (bch2_disk_reservation_add(c,
|
switch (bch2_disk_reservation_add(c,
|
||||||
s->trans->disk_res,
|
s->trans->disk_res,
|
||||||
sectors, flags)) {
|
sectors * bch2_extent_nr_dirty_ptrs(k),
|
||||||
|
flags)) {
|
||||||
case 0:
|
case 0:
|
||||||
break;
|
break;
|
||||||
case -ENOSPC:
|
case -ENOSPC:
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -173,7 +173,8 @@ static int bch2_set_projid(struct bch_fs *c,
|
|||||||
qid.q[QTYP_PRJ] = projid;
|
qid.q[QTYP_PRJ] = projid;
|
||||||
|
|
||||||
ret = bch2_quota_transfer(c, 1 << QTYP_PRJ, qid, inode->ei_qid,
|
ret = bch2_quota_transfer(c, 1 << QTYP_PRJ, qid, inode->ei_qid,
|
||||||
inode->v.i_blocks);
|
inode->v.i_blocks +
|
||||||
|
inode->ei_quota_reserved);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
|
@ -217,10 +217,8 @@ static struct bch_inode_info *bch2_vfs_inode_create(struct bch_fs *c,
|
|||||||
|
|
||||||
#ifdef CONFIG_BCACHEFS_POSIX_ACL
|
#ifdef CONFIG_BCACHEFS_POSIX_ACL
|
||||||
ret = posix_acl_create(&dir->v, &inode->v.i_mode, &default_acl, &acl);
|
ret = posix_acl_create(&dir->v, &inode->v.i_mode, &default_acl, &acl);
|
||||||
if (ret) {
|
if (ret)
|
||||||
make_bad_inode(&inode->v);
|
|
||||||
goto err_make_bad;
|
goto err_make_bad;
|
||||||
}
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bch2_inode_init(c, &inode_u,
|
bch2_inode_init(c, &inode_u,
|
||||||
@ -232,20 +230,17 @@ static struct bch_inode_info *bch2_vfs_inode_create(struct bch_fs *c,
|
|||||||
inode_u.bi_project = dir->ei_qid.q[QTYP_PRJ];
|
inode_u.bi_project = dir->ei_qid.q[QTYP_PRJ];
|
||||||
|
|
||||||
ret = bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1, BCH_QUOTA_PREALLOC);
|
ret = bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, 1, BCH_QUOTA_PREALLOC);
|
||||||
if (ret) {
|
if (ret)
|
||||||
make_bad_inode(&inode->v);
|
|
||||||
goto err_make_bad;
|
goto err_make_bad;
|
||||||
}
|
|
||||||
|
|
||||||
ret = bch2_inode_create(c, &inode_u,
|
ret = bch2_inode_create(c, &inode_u,
|
||||||
BLOCKDEV_INODE_MAX, 0,
|
BLOCKDEV_INODE_MAX, 0,
|
||||||
&c->unused_inode_hint);
|
&c->unused_inode_hint);
|
||||||
if (unlikely(ret)) {
|
if (unlikely(ret))
|
||||||
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1, BCH_QUOTA_WARN);
|
goto err_acct_quota;
|
||||||
goto err_make_bad;
|
|
||||||
}
|
|
||||||
|
|
||||||
bch2_vfs_inode_init(c, inode, &inode_u);
|
bch2_vfs_inode_init(c, inode, &inode_u);
|
||||||
|
atomic_long_inc(&c->nr_inodes);
|
||||||
|
|
||||||
if (default_acl) {
|
if (default_acl) {
|
||||||
ret = bch2_set_acl(&inode->v, default_acl, ACL_TYPE_DEFAULT);
|
ret = bch2_set_acl(&inode->v, default_acl, ACL_TYPE_DEFAULT);
|
||||||
@ -260,11 +255,12 @@ static struct bch_inode_info *bch2_vfs_inode_create(struct bch_fs *c,
|
|||||||
}
|
}
|
||||||
|
|
||||||
insert_inode_hash(&inode->v);
|
insert_inode_hash(&inode->v);
|
||||||
atomic_long_inc(&c->nr_inodes);
|
|
||||||
out:
|
out:
|
||||||
posix_acl_release(default_acl);
|
posix_acl_release(default_acl);
|
||||||
posix_acl_release(acl);
|
posix_acl_release(acl);
|
||||||
return inode;
|
return inode;
|
||||||
|
err_acct_quota:
|
||||||
|
bch2_quota_acct(c, bch_qid(&inode_u), Q_INO, -1, BCH_QUOTA_WARN);
|
||||||
err_make_bad:
|
err_make_bad:
|
||||||
/*
|
/*
|
||||||
* indicate to bch_evict_inode that the inode was never actually
|
* indicate to bch_evict_inode that the inode was never actually
|
||||||
@ -643,7 +639,8 @@ static int bch2_setattr_nonsize(struct bch_inode_info *inode, struct iattr *iatt
|
|||||||
|
|
||||||
if (qtypes) {
|
if (qtypes) {
|
||||||
ret = bch2_quota_transfer(c, qtypes, qid, inode->ei_qid,
|
ret = bch2_quota_transfer(c, qtypes, qid, inode->ei_qid,
|
||||||
inode->v.i_blocks);
|
inode->v.i_blocks +
|
||||||
|
inode->ei_quota_reserved);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out_unlock;
|
goto out_unlock;
|
||||||
}
|
}
|
||||||
@ -953,6 +950,7 @@ static void bch2_vfs_inode_init(struct bch_fs *c,
|
|||||||
inode->v.i_ctime = bch2_time_to_timespec(c, bi->bi_ctime);
|
inode->v.i_ctime = bch2_time_to_timespec(c, bi->bi_ctime);
|
||||||
|
|
||||||
inode->ei_journal_seq = 0;
|
inode->ei_journal_seq = 0;
|
||||||
|
inode->ei_quota_reserved = 0;
|
||||||
inode->ei_qid = bch_qid(bi);
|
inode->ei_qid = bch_qid(bi);
|
||||||
inode->ei_str_hash = bch2_hash_info_init(c, bi);
|
inode->ei_str_hash = bch2_hash_info_init(c, bi);
|
||||||
inode->ei_inode = *bi;
|
inode->ei_inode = *bi;
|
||||||
@ -1038,6 +1036,8 @@ static void bch2_evict_inode(struct inode *vinode)
|
|||||||
|
|
||||||
clear_inode(&inode->v);
|
clear_inode(&inode->v);
|
||||||
|
|
||||||
|
BUG_ON(!is_bad_inode(&inode->v) && inode->ei_quota_reserved);
|
||||||
|
|
||||||
if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
|
if (!inode->v.i_nlink && !is_bad_inode(&inode->v)) {
|
||||||
bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
|
bch2_quota_acct(c, inode->ei_qid, Q_SPC, -((s64) inode->v.i_blocks),
|
||||||
BCH_QUOTA_WARN);
|
BCH_QUOTA_WARN);
|
||||||
|
@ -13,6 +13,7 @@ struct bch_inode_info {
|
|||||||
|
|
||||||
struct mutex ei_update_lock;
|
struct mutex ei_update_lock;
|
||||||
u64 ei_journal_seq;
|
u64 ei_journal_seq;
|
||||||
|
u64 ei_quota_reserved;
|
||||||
unsigned long ei_last_dirtied;
|
unsigned long ei_last_dirtied;
|
||||||
struct bch_qid ei_qid;
|
struct bch_qid ei_qid;
|
||||||
|
|
||||||
|
@ -1469,6 +1469,8 @@ void bch2_journal_start(struct bch_fs *c)
|
|||||||
journal_pin_new_entry(j, 1);
|
journal_pin_new_entry(j, 1);
|
||||||
bch2_journal_buf_init(j);
|
bch2_journal_buf_init(j);
|
||||||
|
|
||||||
|
spin_unlock(&j->lock);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Adding entries to the next journal entry before allocating space on
|
* Adding entries to the next journal entry before allocating space on
|
||||||
* disk for the next journal entry - this is ok, because these entries
|
* disk for the next journal entry - this is ok, because these entries
|
||||||
@ -1487,8 +1489,6 @@ void bch2_journal_start(struct bch_fs *c)
|
|||||||
bl->written = true;
|
bl->written = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_unlock(&j->lock);
|
|
||||||
|
|
||||||
queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
|
queue_delayed_work(system_freezable_wq, &j->reclaim_work, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1505,7 +1505,6 @@ int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
|
|||||||
journal_seq_pin(j, le64_to_cpu(i->j.seq));
|
journal_seq_pin(j, le64_to_cpu(i->j.seq));
|
||||||
|
|
||||||
for_each_jset_key(k, _n, entry, &i->j) {
|
for_each_jset_key(k, _n, entry, &i->j) {
|
||||||
struct disk_reservation disk_res;
|
|
||||||
|
|
||||||
if (entry->btree_id == BTREE_ID_ALLOC) {
|
if (entry->btree_id == BTREE_ID_ALLOC) {
|
||||||
/*
|
/*
|
||||||
@ -1514,19 +1513,18 @@ int bch2_journal_replay(struct bch_fs *c, struct list_head *list)
|
|||||||
*/
|
*/
|
||||||
ret = bch2_alloc_replay_key(c, k->k.p);
|
ret = bch2_alloc_replay_key(c, k->k.p);
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We might cause compressed extents to be
|
* We might cause compressed extents to be
|
||||||
* split, so we need to pass in a
|
* split, so we need to pass in a
|
||||||
* disk_reservation:
|
* disk_reservation:
|
||||||
*/
|
*/
|
||||||
BUG_ON(bch2_disk_reservation_get(c, &disk_res, 0, 0));
|
struct disk_reservation disk_res =
|
||||||
|
bch2_disk_reservation_init(c, 0);
|
||||||
|
|
||||||
ret = bch2_btree_insert(c, entry->btree_id, k,
|
ret = bch2_btree_insert(c, entry->btree_id, k,
|
||||||
&disk_res, NULL, NULL,
|
&disk_res, NULL, NULL,
|
||||||
BTREE_INSERT_NOFAIL|
|
BTREE_INSERT_NOFAIL|
|
||||||
BTREE_INSERT_JOURNAL_REPLAY);
|
BTREE_INSERT_JOURNAL_REPLAY);
|
||||||
bch2_disk_reservation_put(c, &disk_res);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -1580,7 +1578,7 @@ static int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
if (bch2_disk_reservation_get(c, &disk_res,
|
if (bch2_disk_reservation_get(c, &disk_res,
|
||||||
bucket_to_sector(ca, nr - ja->nr), 0))
|
bucket_to_sector(ca, nr - ja->nr), 1, 0))
|
||||||
return -ENOSPC;
|
return -ENOSPC;
|
||||||
|
|
||||||
mutex_lock(&c->sb_lock);
|
mutex_lock(&c->sb_lock);
|
||||||
|
@ -749,14 +749,14 @@ static int bch2_set_quota(struct super_block *sb, struct kqid qid,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (qdq->d_fieldmask & QC_SPC_SOFT)
|
if (qdq->d_fieldmask & QC_SPC_SOFT)
|
||||||
new_quota.v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit);
|
new_quota.v.c[Q_SPC].softlimit = cpu_to_le64(qdq->d_spc_softlimit >> 9);
|
||||||
if (qdq->d_fieldmask & QC_SPC_HARD)
|
if (qdq->d_fieldmask & QC_SPC_HARD)
|
||||||
new_quota.v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit);
|
new_quota.v.c[Q_SPC].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit >> 9);
|
||||||
|
|
||||||
if (qdq->d_fieldmask & QC_INO_SOFT)
|
if (qdq->d_fieldmask & QC_INO_SOFT)
|
||||||
new_quota.v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_spc_softlimit);
|
new_quota.v.c[Q_INO].softlimit = cpu_to_le64(qdq->d_ino_softlimit);
|
||||||
if (qdq->d_fieldmask & QC_INO_HARD)
|
if (qdq->d_fieldmask & QC_INO_HARD)
|
||||||
new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_spc_hardlimit);
|
new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
|
||||||
|
|
||||||
ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
|
ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
|
||||||
BTREE_INSERT_ENTRY(&iter, &new_quota.k_i));
|
BTREE_INSERT_ENTRY(&iter, &new_quota.k_i));
|
||||||
|
@ -36,8 +36,19 @@ extern const struct quotactl_ops bch2_quotactl_operations;
|
|||||||
|
|
||||||
#else
|
#else
|
||||||
|
|
||||||
#define bch2_quota_acct(_c, _uid, _gid, _counter, _v) (0)
|
static inline int bch2_quota_acct(struct bch_fs *c, struct bch_qid qid,
|
||||||
#define bch2_quota_transfer(_c, _type, _src, _dst, _v) (0)
|
enum quota_counters counter, s64 v,
|
||||||
|
enum quota_acct_mode mode)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int bch2_quota_transfer(struct bch_fs *c, unsigned qtypes,
|
||||||
|
struct bch_qid dst,
|
||||||
|
struct bch_qid src, u64 space)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static inline void bch2_fs_quota_exit(struct bch_fs *c) {}
|
static inline void bch2_fs_quota_exit(struct bch_fs *c) {}
|
||||||
static inline void bch2_fs_quota_init(struct bch_fs *c) {}
|
static inline void bch2_fs_quota_init(struct bch_fs *c) {}
|
||||||
|
Loading…
Reference in New Issue
Block a user