mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-12-10 00:00:24 +03:00
Update bcachefs sources to 7e03c1ab0e bcachefs: Kill bchfs_extent_update()
This commit is contained in:
parent
62f5e4fa67
commit
7f3557f57e
@ -1 +1 @@
|
|||||||
ce9293e9d063f7f1a22209f9cc2f5cb7478e886c
|
7e03c1ab0ef2e3148ba70656eab67471c85a0419
|
||||||
|
|||||||
@ -0,0 +1,11 @@
|
|||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _LINUX_SCHED_SIGNAL_H
|
||||||
|
#define _LINUX_SCHED_SIGNAL_H
|
||||||
|
|
||||||
|
static inline int fatal_signal_pending(struct task_struct *p)
|
||||||
|
{
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* _LINUX_SCHED_SIGNAL_H */
|
||||||
|
|
||||||
@ -1510,7 +1510,7 @@ LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
|
|||||||
x(XATTRS, 3, "xattrs") \
|
x(XATTRS, 3, "xattrs") \
|
||||||
x(ALLOC, 4, "alloc") \
|
x(ALLOC, 4, "alloc") \
|
||||||
x(QUOTAS, 5, "quotas") \
|
x(QUOTAS, 5, "quotas") \
|
||||||
x(EC, 6, "erasure_coding") \
|
x(EC, 6, "stripes") \
|
||||||
x(REFLINK, 7, "reflink")
|
x(REFLINK, 7, "reflink")
|
||||||
|
|
||||||
enum btree_id {
|
enum btree_id {
|
||||||
|
|||||||
@ -216,7 +216,7 @@ static int bch2_gc_btree(struct bch_fs *c, enum btree_id btree_id,
|
|||||||
: expensive_debug_checks(c) ? 0
|
: expensive_debug_checks(c) ? 0
|
||||||
: !btree_node_type_needs_gc(btree_id) ? 1
|
: !btree_node_type_needs_gc(btree_id) ? 1
|
||||||
: 0;
|
: 0;
|
||||||
u8 max_stale;
|
u8 max_stale = 0;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
bch2_trans_init(&trans, c, 0, 0);
|
bch2_trans_init(&trans, c, 0, 0);
|
||||||
@ -640,12 +640,7 @@ static int bch2_gc_start(struct bch_fs *c,
|
|||||||
{
|
{
|
||||||
struct bch_dev *ca;
|
struct bch_dev *ca;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
|
int ret;
|
||||||
/*
|
|
||||||
* indicate to stripe code that we need to allocate for the gc stripes
|
|
||||||
* radix tree, too
|
|
||||||
*/
|
|
||||||
gc_pos_set(c, gc_phase(GC_PHASE_START));
|
|
||||||
|
|
||||||
BUG_ON(c->usage_gc);
|
BUG_ON(c->usage_gc);
|
||||||
|
|
||||||
@ -673,6 +668,18 @@ static int bch2_gc_start(struct bch_fs *c,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ret = bch2_ec_mem_alloc(c, true);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
percpu_down_write(&c->mark_lock);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* indicate to stripe code that we need to allocate for the gc stripes
|
||||||
|
* radix tree, too
|
||||||
|
*/
|
||||||
|
gc_pos_set(c, gc_phase(GC_PHASE_START));
|
||||||
|
|
||||||
for_each_member_device(ca, c, i) {
|
for_each_member_device(ca, c, i) {
|
||||||
struct bucket_array *dst = __bucket_array(ca, 1);
|
struct bucket_array *dst = __bucket_array(ca, 1);
|
||||||
struct bucket_array *src = __bucket_array(ca, 0);
|
struct bucket_array *src = __bucket_array(ca, 0);
|
||||||
@ -697,7 +704,9 @@ static int bch2_gc_start(struct bch_fs *c,
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
return bch2_ec_mem_alloc(c, true);
|
percpu_up_write(&c->mark_lock);
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -730,10 +739,7 @@ int bch2_gc(struct bch_fs *c, struct journal_keys *journal_keys,
|
|||||||
|
|
||||||
down_write(&c->gc_lock);
|
down_write(&c->gc_lock);
|
||||||
again:
|
again:
|
||||||
percpu_down_write(&c->mark_lock);
|
|
||||||
ret = bch2_gc_start(c, metadata_only);
|
ret = bch2_gc_start(c, metadata_only);
|
||||||
percpu_up_write(&c->mark_lock);
|
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
|||||||
@ -1440,6 +1440,14 @@ struct bkey_s_c bch2_btree_iter_next(struct btree_iter *iter)
|
|||||||
return bch2_btree_iter_peek(iter);
|
return bch2_btree_iter_peek(iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(bkey_deleted(&iter->k))) {
|
||||||
|
/*
|
||||||
|
* we're currently pointed at a hole, because previously we were
|
||||||
|
* iterating over slots:
|
||||||
|
*/
|
||||||
|
return bch2_btree_iter_peek(iter);
|
||||||
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
bch2_btree_node_iter_advance(&l->iter, l->b);
|
bch2_btree_node_iter_advance(&l->iter, l->b);
|
||||||
p = bch2_btree_node_iter_peek_all(&l->iter, l->b);
|
p = bch2_btree_node_iter_peek_all(&l->iter, l->b);
|
||||||
|
|||||||
@ -451,6 +451,7 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
|
|||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
struct bch_fs_usage *fs_usage = NULL;
|
struct bch_fs_usage *fs_usage = NULL;
|
||||||
struct btree_insert_entry *i;
|
struct btree_insert_entry *i;
|
||||||
|
struct btree_iter *iter;
|
||||||
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
|
unsigned mark_flags = trans->flags & BTREE_INSERT_BUCKET_INVALIDATE
|
||||||
? BCH_BUCKET_MARK_BUCKET_INVALIDATE
|
? BCH_BUCKET_MARK_BUCKET_INVALIDATE
|
||||||
: 0;
|
: 0;
|
||||||
@ -473,6 +474,14 @@ static inline int do_btree_insert_at(struct btree_trans *trans,
|
|||||||
goto out_clear_replicas;
|
goto out_clear_replicas;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
trans_for_each_iter(trans, iter) {
|
||||||
|
if (iter->nodes_locked != iter->nodes_intent_locked) {
|
||||||
|
BUG_ON(iter->flags & BTREE_ITER_KEEP_UNTIL_COMMIT);
|
||||||
|
BUG_ON(trans->iters_live & (1ULL << iter->idx));
|
||||||
|
__bch2_btree_iter_unlock(iter);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
|
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG))
|
||||||
trans_for_each_update(trans, i)
|
trans_for_each_update(trans, i)
|
||||||
btree_insert_entry_checks(trans, i);
|
btree_insert_entry_checks(trans, i);
|
||||||
|
|||||||
@ -807,26 +807,42 @@ void bch2_mark_metadata_bucket(struct bch_fs *c, struct bch_dev *ca,
|
|||||||
preempt_enable();
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static s64 disk_sectors_scaled(unsigned n, unsigned d, unsigned sectors)
|
||||||
|
{
|
||||||
|
return DIV_ROUND_UP(sectors * n, d);
|
||||||
|
}
|
||||||
|
|
||||||
|
static s64 __ptr_disk_sectors_delta(unsigned old_size,
|
||||||
|
unsigned offset, s64 delta,
|
||||||
|
unsigned flags,
|
||||||
|
unsigned n, unsigned d)
|
||||||
|
{
|
||||||
|
BUG_ON(!n || !d);
|
||||||
|
|
||||||
|
if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
|
||||||
|
BUG_ON(offset + -delta > old_size);
|
||||||
|
|
||||||
|
return -disk_sectors_scaled(n, d, old_size) +
|
||||||
|
disk_sectors_scaled(n, d, offset) +
|
||||||
|
disk_sectors_scaled(n, d, old_size - offset + delta);
|
||||||
|
} else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
|
||||||
|
BUG_ON(offset + -delta > old_size);
|
||||||
|
|
||||||
|
return -disk_sectors_scaled(n, d, old_size) +
|
||||||
|
disk_sectors_scaled(n, d, old_size + delta);
|
||||||
|
} else {
|
||||||
|
return disk_sectors_scaled(n, d, delta);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
|
static s64 ptr_disk_sectors_delta(struct extent_ptr_decoded p,
|
||||||
unsigned offset, s64 delta,
|
unsigned offset, s64 delta,
|
||||||
unsigned flags)
|
unsigned flags)
|
||||||
{
|
{
|
||||||
if (flags & BCH_BUCKET_MARK_OVERWRITE_SPLIT) {
|
return __ptr_disk_sectors_delta(p.crc.live_size,
|
||||||
BUG_ON(offset + -delta > p.crc.live_size);
|
offset, delta, flags,
|
||||||
|
p.crc.compressed_size,
|
||||||
return -((s64) ptr_disk_sectors(p)) +
|
p.crc.uncompressed_size);
|
||||||
__ptr_disk_sectors(p, offset) +
|
|
||||||
__ptr_disk_sectors(p, p.crc.live_size -
|
|
||||||
offset + delta);
|
|
||||||
} else if (flags & BCH_BUCKET_MARK_OVERWRITE) {
|
|
||||||
BUG_ON(offset + -delta > p.crc.live_size);
|
|
||||||
|
|
||||||
return -((s64) ptr_disk_sectors(p)) +
|
|
||||||
__ptr_disk_sectors(p, p.crc.live_size +
|
|
||||||
delta);
|
|
||||||
} else {
|
|
||||||
return ptr_disk_sectors(p);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bucket_set_stripe(struct bch_fs *c,
|
static void bucket_set_stripe(struct bch_fs *c,
|
||||||
@ -964,15 +980,15 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c,
|
|||||||
struct bch_extent_stripe_ptr p,
|
struct bch_extent_stripe_ptr p,
|
||||||
enum bch_data_type data_type,
|
enum bch_data_type data_type,
|
||||||
struct bch_fs_usage *fs_usage,
|
struct bch_fs_usage *fs_usage,
|
||||||
s64 sectors, unsigned flags)
|
s64 sectors, unsigned flags,
|
||||||
|
struct bch_replicas_padded *r,
|
||||||
|
unsigned *nr_data,
|
||||||
|
unsigned *nr_parity)
|
||||||
{
|
{
|
||||||
bool gc = flags & BCH_BUCKET_MARK_GC;
|
bool gc = flags & BCH_BUCKET_MARK_GC;
|
||||||
struct stripe *m;
|
struct stripe *m;
|
||||||
unsigned old, new, nr_data;
|
unsigned old, new;
|
||||||
int blocks_nonempty_delta;
|
int blocks_nonempty_delta;
|
||||||
s64 parity_sectors;
|
|
||||||
|
|
||||||
BUG_ON(!sectors);
|
|
||||||
|
|
||||||
m = genradix_ptr(&c->stripes[gc], p.idx);
|
m = genradix_ptr(&c->stripes[gc], p.idx);
|
||||||
|
|
||||||
@ -987,13 +1003,9 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c,
|
|||||||
|
|
||||||
BUG_ON(m->r.e.data_type != data_type);
|
BUG_ON(m->r.e.data_type != data_type);
|
||||||
|
|
||||||
nr_data = m->nr_blocks - m->nr_redundant;
|
*nr_data = m->nr_blocks - m->nr_redundant;
|
||||||
|
*nr_parity = m->nr_redundant;
|
||||||
parity_sectors = DIV_ROUND_UP(abs(sectors) * m->nr_redundant, nr_data);
|
*r = m->r;
|
||||||
|
|
||||||
if (sectors < 0)
|
|
||||||
parity_sectors = -parity_sectors;
|
|
||||||
sectors += parity_sectors;
|
|
||||||
|
|
||||||
old = m->block_sectors[p.block];
|
old = m->block_sectors[p.block];
|
||||||
m->block_sectors[p.block] += sectors;
|
m->block_sectors[p.block] += sectors;
|
||||||
@ -1011,8 +1023,6 @@ static int bch2_mark_stripe_ptr(struct bch_fs *c,
|
|||||||
|
|
||||||
spin_unlock(&c->ec_stripes_heap_lock);
|
spin_unlock(&c->ec_stripes_heap_lock);
|
||||||
|
|
||||||
update_replicas(c, fs_usage, &m->r.e, sectors);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1027,7 +1037,6 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
|
|||||||
struct extent_ptr_decoded p;
|
struct extent_ptr_decoded p;
|
||||||
struct bch_replicas_padded r;
|
struct bch_replicas_padded r;
|
||||||
s64 dirty_sectors = 0;
|
s64 dirty_sectors = 0;
|
||||||
unsigned i;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
r.e.data_type = data_type;
|
r.e.data_type = data_type;
|
||||||
@ -1047,22 +1056,39 @@ static int bch2_mark_extent(struct bch_fs *c, struct bkey_s_c k,
|
|||||||
if (!stale)
|
if (!stale)
|
||||||
update_cached_sectors(c, fs_usage, p.ptr.dev,
|
update_cached_sectors(c, fs_usage, p.ptr.dev,
|
||||||
disk_sectors);
|
disk_sectors);
|
||||||
} else if (!p.ec_nr) {
|
} else if (!p.has_ec) {
|
||||||
dirty_sectors += disk_sectors;
|
dirty_sectors += disk_sectors;
|
||||||
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
|
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < p.ec_nr; i++) {
|
struct bch_replicas_padded ec_r;
|
||||||
ret = bch2_mark_stripe_ptr(c, p.ec[i],
|
unsigned nr_data, nr_parity;
|
||||||
data_type, fs_usage,
|
s64 parity_sectors;
|
||||||
disk_sectors, flags);
|
|
||||||
|
ret = bch2_mark_stripe_ptr(c, p.ec, data_type,
|
||||||
|
fs_usage, disk_sectors, flags,
|
||||||
|
&ec_r, &nr_data, &nr_parity);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
|
parity_sectors =
|
||||||
|
__ptr_disk_sectors_delta(p.crc.live_size,
|
||||||
|
offset, sectors, flags,
|
||||||
|
p.crc.compressed_size * nr_parity,
|
||||||
|
p.crc.uncompressed_size * nr_data);
|
||||||
|
|
||||||
|
update_replicas(c, fs_usage, &ec_r.e,
|
||||||
|
disk_sectors + parity_sectors);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There may be other dirty pointers in this extent, but
|
||||||
|
* if so they're not required for mounting if we have an
|
||||||
|
* erasure coded pointer in this extent:
|
||||||
|
*/
|
||||||
r.e.nr_required = 0;
|
r.e.nr_required = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (r.e.nr_devs)
|
||||||
update_replicas(c, fs_usage, &r.e, dirty_sectors);
|
update_replicas(c, fs_usage, &r.e, dirty_sectors);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@ -1501,16 +1527,16 @@ out:
|
|||||||
|
|
||||||
static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
|
static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
|
||||||
struct bch_extent_stripe_ptr p,
|
struct bch_extent_stripe_ptr p,
|
||||||
s64 sectors, enum bch_data_type data_type)
|
s64 sectors, enum bch_data_type data_type,
|
||||||
|
struct bch_replicas_padded *r,
|
||||||
|
unsigned *nr_data,
|
||||||
|
unsigned *nr_parity)
|
||||||
{
|
{
|
||||||
struct bch_fs *c = trans->c;
|
struct bch_fs *c = trans->c;
|
||||||
struct bch_replicas_padded r;
|
|
||||||
struct btree_iter *iter;
|
struct btree_iter *iter;
|
||||||
struct bkey_i *new_k;
|
struct bkey_i *new_k;
|
||||||
struct bkey_s_c k;
|
struct bkey_s_c k;
|
||||||
struct bkey_s_stripe s;
|
struct bkey_s_stripe s;
|
||||||
unsigned nr_data;
|
|
||||||
s64 parity_sectors;
|
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
|
ret = trans_get_key(trans, BTREE_ID_EC, POS(0, p.idx), &iter, &k);
|
||||||
@ -1533,20 +1559,13 @@ static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
|
|||||||
bkey_reassemble(new_k, k);
|
bkey_reassemble(new_k, k);
|
||||||
s = bkey_i_to_s_stripe(new_k);
|
s = bkey_i_to_s_stripe(new_k);
|
||||||
|
|
||||||
nr_data = s.v->nr_blocks - s.v->nr_redundant;
|
|
||||||
|
|
||||||
parity_sectors = DIV_ROUND_UP(abs(sectors) * s.v->nr_redundant, nr_data);
|
|
||||||
|
|
||||||
if (sectors < 0)
|
|
||||||
parity_sectors = -parity_sectors;
|
|
||||||
|
|
||||||
stripe_blockcount_set(s.v, p.block,
|
stripe_blockcount_set(s.v, p.block,
|
||||||
stripe_blockcount_get(s.v, p.block) +
|
stripe_blockcount_get(s.v, p.block) +
|
||||||
sectors + parity_sectors);
|
sectors);
|
||||||
|
|
||||||
bch2_bkey_to_replicas(&r.e, s.s_c);
|
*nr_data = s.v->nr_blocks - s.v->nr_redundant;
|
||||||
|
*nr_parity = s.v->nr_redundant;
|
||||||
update_replicas_list(trans, &r.e, sectors);
|
bch2_bkey_to_replicas(&r->e, s.s_c);
|
||||||
out:
|
out:
|
||||||
bch2_trans_iter_put(trans, iter);
|
bch2_trans_iter_put(trans, iter);
|
||||||
return ret;
|
return ret;
|
||||||
@ -1563,7 +1582,6 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
|
|||||||
struct bch_replicas_padded r;
|
struct bch_replicas_padded r;
|
||||||
s64 dirty_sectors = 0;
|
s64 dirty_sectors = 0;
|
||||||
bool stale;
|
bool stale;
|
||||||
unsigned i;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
r.e.data_type = data_type;
|
r.e.data_type = data_type;
|
||||||
@ -1588,21 +1606,34 @@ static int bch2_trans_mark_extent(struct btree_trans *trans,
|
|||||||
if (!stale)
|
if (!stale)
|
||||||
update_cached_sectors_list(trans, p.ptr.dev,
|
update_cached_sectors_list(trans, p.ptr.dev,
|
||||||
disk_sectors);
|
disk_sectors);
|
||||||
} else if (!p.ec_nr) {
|
} else if (!p.has_ec) {
|
||||||
dirty_sectors += disk_sectors;
|
dirty_sectors += disk_sectors;
|
||||||
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
|
r.e.devs[r.e.nr_devs++] = p.ptr.dev;
|
||||||
} else {
|
} else {
|
||||||
for (i = 0; i < p.ec_nr; i++) {
|
struct bch_replicas_padded ec_r;
|
||||||
ret = bch2_trans_mark_stripe_ptr(trans, p.ec[i],
|
unsigned nr_data, nr_parity;
|
||||||
disk_sectors, data_type);
|
s64 parity_sectors;
|
||||||
|
|
||||||
|
ret = bch2_trans_mark_stripe_ptr(trans, p.ec,
|
||||||
|
disk_sectors, data_type,
|
||||||
|
&ec_r, &nr_data, &nr_parity);
|
||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
|
||||||
|
parity_sectors =
|
||||||
|
__ptr_disk_sectors_delta(p.crc.live_size,
|
||||||
|
offset, sectors, flags,
|
||||||
|
p.crc.compressed_size * nr_parity,
|
||||||
|
p.crc.uncompressed_size * nr_data);
|
||||||
|
|
||||||
|
update_replicas_list(trans, &ec_r.e,
|
||||||
|
disk_sectors + parity_sectors);
|
||||||
|
|
||||||
r.e.nr_required = 0;
|
r.e.nr_required = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (r.e.nr_devs)
|
||||||
update_replicas_list(trans, &r.e, dirty_sectors);
|
update_replicas_list(trans, &r.e, dirty_sectors);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|||||||
@ -137,7 +137,7 @@ static inline u8 ptr_stale(struct bch_dev *ca,
|
|||||||
return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
|
return gen_after(ptr_bucket_mark(ca, ptr).gen, ptr->gen);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned __ptr_disk_sectors(struct extent_ptr_decoded p,
|
static inline s64 __ptr_disk_sectors(struct extent_ptr_decoded p,
|
||||||
unsigned live_size)
|
unsigned live_size)
|
||||||
{
|
{
|
||||||
return live_size && p.crc.compression_type
|
return live_size && p.crc.compression_type
|
||||||
@ -146,7 +146,7 @@ static inline unsigned __ptr_disk_sectors(struct extent_ptr_decoded p,
|
|||||||
: live_size;
|
: live_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned ptr_disk_sectors(struct extent_ptr_decoded p)
|
static inline s64 ptr_disk_sectors(struct extent_ptr_decoded p)
|
||||||
{
|
{
|
||||||
return __ptr_disk_sectors(p, p.crc.live_size);
|
return __ptr_disk_sectors(p, p.crc.live_size);
|
||||||
}
|
}
|
||||||
|
|||||||
@ -296,10 +296,10 @@ int bch2_dirent_delete(struct bch_fs *c, u64 dir_inum,
|
|||||||
struct btree_iter *
|
struct btree_iter *
|
||||||
__bch2_dirent_lookup_trans(struct btree_trans *trans, u64 dir_inum,
|
__bch2_dirent_lookup_trans(struct btree_trans *trans, u64 dir_inum,
|
||||||
const struct bch_hash_info *hash_info,
|
const struct bch_hash_info *hash_info,
|
||||||
const struct qstr *name)
|
const struct qstr *name, unsigned flags)
|
||||||
{
|
{
|
||||||
return bch2_hash_lookup(trans, bch2_dirent_hash_desc,
|
return bch2_hash_lookup(trans, bch2_dirent_hash_desc,
|
||||||
hash_info, dir_inum, name, 0);
|
hash_info, dir_inum, name, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
|
u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
|
||||||
@ -313,7 +313,8 @@ u64 bch2_dirent_lookup(struct bch_fs *c, u64 dir_inum,
|
|||||||
|
|
||||||
bch2_trans_init(&trans, c, 0, 0);
|
bch2_trans_init(&trans, c, 0, 0);
|
||||||
|
|
||||||
iter = __bch2_dirent_lookup_trans(&trans, dir_inum, hash_info, name);
|
iter = __bch2_dirent_lookup_trans(&trans, dir_inum,
|
||||||
|
hash_info, name, 0);
|
||||||
if (IS_ERR(iter)) {
|
if (IS_ERR(iter)) {
|
||||||
BUG_ON(PTR_ERR(iter) == -EINTR);
|
BUG_ON(PTR_ERR(iter) == -EINTR);
|
||||||
goto out;
|
goto out;
|
||||||
@ -353,36 +354,31 @@ int bch2_readdir(struct bch_fs *c, u64 inum, struct dir_context *ctx)
|
|||||||
struct btree_iter *iter;
|
struct btree_iter *iter;
|
||||||
struct bkey_s_c k;
|
struct bkey_s_c k;
|
||||||
struct bkey_s_c_dirent dirent;
|
struct bkey_s_c_dirent dirent;
|
||||||
unsigned len;
|
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
bch2_trans_init(&trans, c, 0, 0);
|
bch2_trans_init(&trans, c, 0, 0);
|
||||||
|
|
||||||
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
|
for_each_btree_key(&trans, iter, BTREE_ID_DIRENTS,
|
||||||
POS(inum, ctx->pos), 0, k, ret) {
|
POS(inum, ctx->pos), 0, k, ret) {
|
||||||
|
if (k.k->p.inode > inum)
|
||||||
|
break;
|
||||||
|
|
||||||
if (k.k->type != KEY_TYPE_dirent)
|
if (k.k->type != KEY_TYPE_dirent)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
dirent = bkey_s_c_to_dirent(k);
|
dirent = bkey_s_c_to_dirent(k);
|
||||||
|
|
||||||
if (bkey_cmp(k.k->p, POS(inum, ctx->pos)) < 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (k.k->p.inode > inum)
|
|
||||||
break;
|
|
||||||
|
|
||||||
len = bch2_dirent_name_bytes(dirent);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* XXX: dir_emit() can fault and block, while we're holding
|
* XXX: dir_emit() can fault and block, while we're holding
|
||||||
* locks
|
* locks
|
||||||
*/
|
*/
|
||||||
if (!dir_emit(ctx, dirent.v->d_name, len,
|
ctx->pos = dirent.k->p.offset;
|
||||||
|
if (!dir_emit(ctx, dirent.v->d_name,
|
||||||
|
bch2_dirent_name_bytes(dirent),
|
||||||
le64_to_cpu(dirent.v->d_inum),
|
le64_to_cpu(dirent.v->d_inum),
|
||||||
dirent.v->d_type))
|
dirent.v->d_type))
|
||||||
break;
|
break;
|
||||||
|
ctx->pos = dirent.k->p.offset + 1;
|
||||||
ctx->pos = k.k->p.offset + 1;
|
|
||||||
}
|
}
|
||||||
ret = bch2_trans_exit(&trans) ?: ret;
|
ret = bch2_trans_exit(&trans) ?: ret;
|
||||||
|
|
||||||
|
|||||||
@ -55,7 +55,7 @@ int bch2_dirent_rename(struct btree_trans *,
|
|||||||
struct btree_iter *
|
struct btree_iter *
|
||||||
__bch2_dirent_lookup_trans(struct btree_trans *, u64,
|
__bch2_dirent_lookup_trans(struct btree_trans *, u64,
|
||||||
const struct bch_hash_info *,
|
const struct bch_hash_info *,
|
||||||
const struct qstr *);
|
const struct qstr *, unsigned);
|
||||||
u64 bch2_dirent_lookup(struct bch_fs *, u64, const struct bch_hash_info *,
|
u64 bch2_dirent_lookup(struct bch_fs *, u64, const struct bch_hash_info *,
|
||||||
const struct qstr *);
|
const struct qstr *);
|
||||||
|
|
||||||
|
|||||||
@ -135,8 +135,6 @@ void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
|
|||||||
pr_buf(out, " %u:%llu:%u", s->ptrs[i].dev,
|
pr_buf(out, " %u:%llu:%u", s->ptrs[i].dev,
|
||||||
(u64) s->ptrs[i].offset,
|
(u64) s->ptrs[i].offset,
|
||||||
stripe_blockcount_get(s, i));
|
stripe_blockcount_get(s, i));
|
||||||
|
|
||||||
bch2_bkey_ptrs_to_text(out, c, k);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ptr_matches_stripe(struct bch_fs *c,
|
static int ptr_matches_stripe(struct bch_fs *c,
|
||||||
@ -433,10 +431,9 @@ int bch2_ec_read_extent(struct bch_fs *c, struct bch_read_bio *rbio)
|
|||||||
|
|
||||||
closure_init_stack(&cl);
|
closure_init_stack(&cl);
|
||||||
|
|
||||||
BUG_ON(!rbio->pick.idx ||
|
BUG_ON(!rbio->pick.has_ec);
|
||||||
rbio->pick.idx - 1 >= rbio->pick.ec_nr);
|
|
||||||
|
|
||||||
stripe_idx = rbio->pick.ec[rbio->pick.idx - 1].idx;
|
stripe_idx = rbio->pick.ec.idx;
|
||||||
|
|
||||||
buf = kzalloc(sizeof(*buf), GFP_NOIO);
|
buf = kzalloc(sizeof(*buf), GFP_NOIO);
|
||||||
if (!buf)
|
if (!buf)
|
||||||
@ -561,7 +558,7 @@ static int ec_stripe_mem_alloc(struct bch_fs *c,
|
|||||||
size_t idx = iter->pos.offset;
|
size_t idx = iter->pos.offset;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT))
|
if (!__ec_stripe_mem_alloc(c, idx, GFP_NOWAIT|__GFP_NOWARN))
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
bch2_trans_unlock(iter->trans);
|
bch2_trans_unlock(iter->trans);
|
||||||
@ -1278,7 +1275,7 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
|
|||||||
struct btree_trans trans;
|
struct btree_trans trans;
|
||||||
struct btree_iter *btree_iter;
|
struct btree_iter *btree_iter;
|
||||||
struct journal_iter journal_iter;
|
struct journal_iter journal_iter;
|
||||||
struct bkey_s_c btree_k, journal_k, k;
|
struct bkey_s_c btree_k, journal_k;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = bch2_fs_ec_start(c);
|
ret = bch2_fs_ec_start(c);
|
||||||
@ -1294,33 +1291,31 @@ int bch2_stripes_read(struct bch_fs *c, struct journal_keys *journal_keys)
|
|||||||
journal_k = bch2_journal_iter_peek(&journal_iter);
|
journal_k = bch2_journal_iter_peek(&journal_iter);
|
||||||
|
|
||||||
while (1) {
|
while (1) {
|
||||||
|
bool btree;
|
||||||
|
|
||||||
if (btree_k.k && journal_k.k) {
|
if (btree_k.k && journal_k.k) {
|
||||||
int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
|
int cmp = bkey_cmp(btree_k.k->p, journal_k.k->p);
|
||||||
|
|
||||||
if (cmp < 0) {
|
if (!cmp)
|
||||||
k = btree_k;
|
|
||||||
btree_k = bch2_btree_iter_next(btree_iter);
|
btree_k = bch2_btree_iter_next(btree_iter);
|
||||||
} else if (cmp == 0) {
|
btree = cmp < 0;
|
||||||
btree_k = bch2_btree_iter_next(btree_iter);
|
|
||||||
k = journal_k;
|
|
||||||
journal_k = bch2_journal_iter_next(&journal_iter);
|
|
||||||
} else {
|
|
||||||
k = journal_k;
|
|
||||||
journal_k = bch2_journal_iter_next(&journal_iter);
|
|
||||||
}
|
|
||||||
} else if (btree_k.k) {
|
} else if (btree_k.k) {
|
||||||
k = btree_k;
|
btree = true;
|
||||||
btree_k = bch2_btree_iter_next(btree_iter);
|
|
||||||
} else if (journal_k.k) {
|
} else if (journal_k.k) {
|
||||||
k = journal_k;
|
btree = false;
|
||||||
journal_k = bch2_journal_iter_next(&journal_iter);
|
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
bch2_mark_key(c, k, 0, 0, NULL, 0,
|
bch2_mark_key(c, btree ? btree_k : journal_k,
|
||||||
|
0, 0, NULL, 0,
|
||||||
BCH_BUCKET_MARK_ALLOC_READ|
|
BCH_BUCKET_MARK_ALLOC_READ|
|
||||||
BCH_BUCKET_MARK_NOATOMIC);
|
BCH_BUCKET_MARK_NOATOMIC);
|
||||||
|
|
||||||
|
if (btree)
|
||||||
|
btree_k = bch2_btree_iter_next(btree_iter);
|
||||||
|
else
|
||||||
|
journal_k = bch2_journal_iter_next(&journal_iter);
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = bch2_trans_exit(&trans) ?: ret;
|
ret = bch2_trans_exit(&trans) ?: ret;
|
||||||
|
|||||||
@ -67,7 +67,7 @@ unsigned bch2_bkey_nr_dirty_ptrs(struct bkey_s_c k)
|
|||||||
static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
|
static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
|
||||||
struct extent_ptr_decoded p)
|
struct extent_ptr_decoded p)
|
||||||
{
|
{
|
||||||
unsigned i, durability = 0;
|
unsigned durability = 0;
|
||||||
struct bch_dev *ca;
|
struct bch_dev *ca;
|
||||||
|
|
||||||
if (p.ptr.cached)
|
if (p.ptr.cached)
|
||||||
@ -78,16 +78,16 @@ static unsigned bch2_extent_ptr_durability(struct bch_fs *c,
|
|||||||
if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
|
if (ca->mi.state != BCH_MEMBER_STATE_FAILED)
|
||||||
durability = max_t(unsigned, durability, ca->mi.durability);
|
durability = max_t(unsigned, durability, ca->mi.durability);
|
||||||
|
|
||||||
for (i = 0; i < p.ec_nr; i++) {
|
if (p.has_ec) {
|
||||||
struct stripe *s =
|
struct stripe *s =
|
||||||
genradix_ptr(&c->stripes[0], p.idx);
|
genradix_ptr(&c->stripes[0], p.ec.idx);
|
||||||
|
|
||||||
if (WARN_ON(!s))
|
if (WARN_ON(!s))
|
||||||
continue;
|
goto out;
|
||||||
|
|
||||||
durability = max_t(unsigned, durability, s->nr_redundant);
|
durability = max_t(unsigned, durability, s->nr_redundant);
|
||||||
}
|
}
|
||||||
|
out:
|
||||||
return durability;
|
return durability;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,10 +206,10 @@ int bch2_bkey_pick_read_device(struct bch_fs *c, struct bkey_s_c k,
|
|||||||
p.idx++;
|
p.idx++;
|
||||||
|
|
||||||
if (force_reconstruct_read(c) &&
|
if (force_reconstruct_read(c) &&
|
||||||
!p.idx && p.ec_nr)
|
!p.idx && p.has_ec)
|
||||||
p.idx++;
|
p.idx++;
|
||||||
|
|
||||||
if (p.idx >= p.ec_nr + 1)
|
if (p.idx >= (unsigned) p.has_ec + 1)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (ret > 0 && !ptr_better(c, p, *pick))
|
if (ret > 0 && !ptr_better(c, p, *pick))
|
||||||
@ -1011,13 +1011,19 @@ int bch2_extent_atomic_end(struct btree_iter *iter,
|
|||||||
struct bpos *end)
|
struct bpos *end)
|
||||||
{
|
{
|
||||||
struct btree_trans *trans = iter->trans;
|
struct btree_trans *trans = iter->trans;
|
||||||
struct btree *b = iter->l[0].b;
|
struct btree *b;
|
||||||
struct btree_node_iter node_iter = iter->l[0].iter;
|
struct btree_node_iter node_iter;
|
||||||
struct bkey_packed *_k;
|
struct bkey_packed *_k;
|
||||||
unsigned nr_iters = 0;
|
unsigned nr_iters = 0;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
BUG_ON(iter->uptodate > BTREE_ITER_NEED_PEEK);
|
ret = bch2_btree_iter_traverse(iter);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
b = iter->l[0].b;
|
||||||
|
node_iter = iter->l[0].iter;
|
||||||
|
|
||||||
BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
|
BUG_ON(bkey_cmp(bkey_start_pos(&insert->k), b->data->min_key) < 0);
|
||||||
|
|
||||||
*end = bpos_min(insert->k.p, b->key.k.p);
|
*end = bpos_min(insert->k.p, b->key.k.p);
|
||||||
@ -1538,7 +1544,6 @@ void bch2_extent_ptr_decoded_append(struct bkey_i *k,
|
|||||||
struct bch_extent_crc_unpacked crc =
|
struct bch_extent_crc_unpacked crc =
|
||||||
bch2_extent_crc_unpack(&k->k, NULL);
|
bch2_extent_crc_unpack(&k->k, NULL);
|
||||||
union bch_extent_entry *pos;
|
union bch_extent_entry *pos;
|
||||||
unsigned i;
|
|
||||||
|
|
||||||
if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
|
if (!bch2_crc_unpacked_cmp(crc, p->crc)) {
|
||||||
pos = ptrs.start;
|
pos = ptrs.start;
|
||||||
@ -1557,9 +1562,9 @@ found:
|
|||||||
p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
|
p->ptr.type = 1 << BCH_EXTENT_ENTRY_ptr;
|
||||||
__extent_entry_insert(k, pos, to_entry(&p->ptr));
|
__extent_entry_insert(k, pos, to_entry(&p->ptr));
|
||||||
|
|
||||||
for (i = 0; i < p->ec_nr; i++) {
|
if (p->has_ec) {
|
||||||
p->ec[i].type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
|
p->ec.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr;
|
||||||
__extent_entry_insert(k, pos, to_entry(&p->ec[i]));
|
__extent_entry_insert(k, pos, to_entry(&p->ec));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -228,7 +228,7 @@ struct bkey_ptrs {
|
|||||||
__label__ out; \
|
__label__ out; \
|
||||||
\
|
\
|
||||||
(_ptr).idx = 0; \
|
(_ptr).idx = 0; \
|
||||||
(_ptr).ec_nr = 0; \
|
(_ptr).has_ec = false; \
|
||||||
\
|
\
|
||||||
__bkey_extent_entry_for_each_from(_entry, _end, _entry) \
|
__bkey_extent_entry_for_each_from(_entry, _end, _entry) \
|
||||||
switch (extent_entry_type(_entry)) { \
|
switch (extent_entry_type(_entry)) { \
|
||||||
@ -242,7 +242,8 @@ struct bkey_ptrs {
|
|||||||
entry_to_crc(_entry)); \
|
entry_to_crc(_entry)); \
|
||||||
break; \
|
break; \
|
||||||
case BCH_EXTENT_ENTRY_stripe_ptr: \
|
case BCH_EXTENT_ENTRY_stripe_ptr: \
|
||||||
(_ptr).ec[(_ptr).ec_nr++] = _entry->stripe_ptr; \
|
(_ptr).ec = _entry->stripe_ptr; \
|
||||||
|
(_ptr).has_ec = true; \
|
||||||
break; \
|
break; \
|
||||||
} \
|
} \
|
||||||
out: \
|
out: \
|
||||||
|
|||||||
@ -21,10 +21,10 @@ struct bch_extent_crc_unpacked {
|
|||||||
|
|
||||||
struct extent_ptr_decoded {
|
struct extent_ptr_decoded {
|
||||||
unsigned idx;
|
unsigned idx;
|
||||||
unsigned ec_nr;
|
bool has_ec;
|
||||||
struct bch_extent_crc_unpacked crc;
|
struct bch_extent_crc_unpacked crc;
|
||||||
struct bch_extent_ptr ptr;
|
struct bch_extent_ptr ptr;
|
||||||
struct bch_extent_stripe_ptr ec[4];
|
struct bch_extent_stripe_ptr ec;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct bch_io_failures {
|
struct bch_io_failures {
|
||||||
|
|||||||
@ -24,8 +24,7 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum,
|
|||||||
u64 now = bch2_current_time(trans->c);
|
u64 now = bch2_current_time(trans->c);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
dir_iter = bch2_inode_peek(trans, dir_u, dir_inum,
|
dir_iter = bch2_inode_peek(trans, dir_u, dir_inum, BTREE_ITER_INTENT);
|
||||||
name ? BTREE_ITER_INTENT : 0);
|
|
||||||
if (IS_ERR(dir_iter))
|
if (IS_ERR(dir_iter))
|
||||||
return PTR_ERR(dir_iter);
|
return PTR_ERR(dir_iter);
|
||||||
|
|
||||||
@ -76,8 +75,7 @@ int bch2_create_trans(struct btree_trans *trans, u64 dir_inum,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_link_trans(struct btree_trans *trans,
|
int bch2_link_trans(struct btree_trans *trans, u64 dir_inum,
|
||||||
u64 dir_inum,
|
|
||||||
u64 inum, struct bch_inode_unpacked *inode_u,
|
u64 inum, struct bch_inode_unpacked *inode_u,
|
||||||
const struct qstr *name)
|
const struct qstr *name)
|
||||||
{
|
{
|
||||||
@ -86,19 +84,22 @@ int bch2_link_trans(struct btree_trans *trans,
|
|||||||
struct bch_hash_info dir_hash;
|
struct bch_hash_info dir_hash;
|
||||||
u64 now = bch2_current_time(trans->c);
|
u64 now = bch2_current_time(trans->c);
|
||||||
|
|
||||||
dir_iter = bch2_inode_peek(trans, &dir_u, dir_inum, 0);
|
|
||||||
if (IS_ERR(dir_iter))
|
|
||||||
return PTR_ERR(dir_iter);
|
|
||||||
|
|
||||||
inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT);
|
inode_iter = bch2_inode_peek(trans, inode_u, inum, BTREE_ITER_INTENT);
|
||||||
if (IS_ERR(inode_iter))
|
if (IS_ERR(inode_iter))
|
||||||
return PTR_ERR(inode_iter);
|
return PTR_ERR(inode_iter);
|
||||||
|
|
||||||
dir_hash = bch2_hash_info_init(trans->c, &dir_u);
|
|
||||||
|
|
||||||
inode_u->bi_ctime = now;
|
inode_u->bi_ctime = now;
|
||||||
bch2_inode_nlink_inc(inode_u);
|
bch2_inode_nlink_inc(inode_u);
|
||||||
|
|
||||||
|
dir_iter = bch2_inode_peek(trans, &dir_u, dir_inum, 0);
|
||||||
|
if (IS_ERR(dir_iter))
|
||||||
|
return PTR_ERR(dir_iter);
|
||||||
|
|
||||||
|
/* XXX: shouldn't we be updating mtime/ctime on the directory? */
|
||||||
|
|
||||||
|
dir_hash = bch2_hash_info_init(trans->c, &dir_u);
|
||||||
|
bch2_trans_iter_put(trans, dir_iter);
|
||||||
|
|
||||||
return bch2_dirent_create(trans, dir_inum, &dir_hash,
|
return bch2_dirent_create(trans, dir_inum, &dir_hash,
|
||||||
mode_to_type(inode_u->bi_mode),
|
mode_to_type(inode_u->bi_mode),
|
||||||
name, inum, BCH_HASH_SET_MUST_CREATE) ?:
|
name, inum, BCH_HASH_SET_MUST_CREATE) ?:
|
||||||
@ -121,8 +122,8 @@ int bch2_unlink_trans(struct btree_trans *trans,
|
|||||||
|
|
||||||
dir_hash = bch2_hash_info_init(trans->c, dir_u);
|
dir_hash = bch2_hash_info_init(trans->c, dir_u);
|
||||||
|
|
||||||
dirent_iter = __bch2_dirent_lookup_trans(trans, dir_inum,
|
dirent_iter = __bch2_dirent_lookup_trans(trans, dir_inum, &dir_hash,
|
||||||
&dir_hash, name);
|
name, BTREE_ITER_INTENT);
|
||||||
if (IS_ERR(dirent_iter))
|
if (IS_ERR(dirent_iter))
|
||||||
return PTR_ERR(dirent_iter);
|
return PTR_ERR(dirent_iter);
|
||||||
|
|
||||||
|
|||||||
@ -12,8 +12,7 @@ int bch2_create_trans(struct btree_trans *, u64,
|
|||||||
struct posix_acl *,
|
struct posix_acl *,
|
||||||
struct posix_acl *);
|
struct posix_acl *);
|
||||||
|
|
||||||
int bch2_link_trans(struct btree_trans *,
|
int bch2_link_trans(struct btree_trans *, u64,
|
||||||
u64,
|
|
||||||
u64, struct bch_inode_unpacked *,
|
u64, struct bch_inode_unpacked *,
|
||||||
const struct qstr *);
|
const struct qstr *);
|
||||||
|
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@ -11,16 +11,6 @@
|
|||||||
|
|
||||||
struct quota_res;
|
struct quota_res;
|
||||||
|
|
||||||
int bch2_extent_update(struct btree_trans *,
|
|
||||||
struct bch_inode_info *,
|
|
||||||
struct disk_reservation *,
|
|
||||||
struct quota_res *,
|
|
||||||
struct btree_iter *,
|
|
||||||
struct bkey_i *,
|
|
||||||
u64, bool, bool, s64 *);
|
|
||||||
int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
|
|
||||||
struct bpos, struct bch_inode_info *, u64);
|
|
||||||
|
|
||||||
int __must_check bch2_write_inode_size(struct bch_fs *,
|
int __must_check bch2_write_inode_size(struct bch_fs *,
|
||||||
struct bch_inode_info *,
|
struct bch_inode_info *,
|
||||||
loff_t, unsigned);
|
loff_t, unsigned);
|
||||||
|
|||||||
@ -49,34 +49,6 @@ static void journal_seq_copy(struct bch_inode_info *dst,
|
|||||||
} while ((v = cmpxchg(&dst->ei_journal_seq, old, journal_seq)) != old);
|
} while ((v = cmpxchg(&dst->ei_journal_seq, old, journal_seq)) != old);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* I_SIZE_DIRTY requires special handling:
|
|
||||||
*
|
|
||||||
* To the recovery code, the flag means that there is stale data past i_size
|
|
||||||
* that needs to be deleted; it's used for implementing atomic appends and
|
|
||||||
* truncates.
|
|
||||||
*
|
|
||||||
* On append, we set I_SIZE_DIRTY before doing the write, then after the write
|
|
||||||
* we clear I_SIZE_DIRTY atomically with updating i_size to the new larger size
|
|
||||||
* that exposes the data we just wrote.
|
|
||||||
*
|
|
||||||
* On truncate, it's the reverse: We set I_SIZE_DIRTY atomically with setting
|
|
||||||
* i_size to the new smaller size, then we delete the data that we just made
|
|
||||||
* invisible, and then we clear I_SIZE_DIRTY.
|
|
||||||
*
|
|
||||||
* Because there can be multiple appends in flight at a time, we need a refcount
|
|
||||||
* (i_size_dirty_count) instead of manipulating the flag directly. Nonzero
|
|
||||||
* refcount means I_SIZE_DIRTY is set, zero means it's cleared.
|
|
||||||
*
|
|
||||||
* Because write_inode() can be called at any time, i_size_dirty_count means
|
|
||||||
* something different to the runtime code - it means to write_inode() "don't
|
|
||||||
* update i_size yet".
|
|
||||||
*
|
|
||||||
* We don't clear I_SIZE_DIRTY directly, we let write_inode() clear it when
|
|
||||||
* i_size_dirty_count is zero - but the reverse is not true, I_SIZE_DIRTY must
|
|
||||||
* be set explicitly.
|
|
||||||
*/
|
|
||||||
|
|
||||||
void bch2_inode_update_after_write(struct bch_fs *c,
|
void bch2_inode_update_after_write(struct bch_fs *c,
|
||||||
struct bch_inode_info *inode,
|
struct bch_inode_info *inode,
|
||||||
struct bch_inode_unpacked *bi,
|
struct bch_inode_unpacked *bi,
|
||||||
|
|||||||
@ -111,6 +111,15 @@ static inline u64 bch2_inode_opt_get(struct bch_inode_unpacked *inode,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct bch_io_opts
|
||||||
|
io_opts(struct bch_fs *c, struct bch_inode_unpacked *inode)
|
||||||
|
{
|
||||||
|
struct bch_io_opts opts = bch2_opts_to_inode_opts(c->opts);
|
||||||
|
|
||||||
|
bch2_io_opts_apply(&opts, bch2_inode_opts_get(inode));
|
||||||
|
return opts;
|
||||||
|
}
|
||||||
|
|
||||||
static inline u8 mode_to_type(umode_t mode)
|
static inline u8 mode_to_type(umode_t mode)
|
||||||
{
|
{
|
||||||
return (mode >> 12) & 15;
|
return (mode >> 12) & 15;
|
||||||
|
|||||||
305
libbcachefs/io.c
305
libbcachefs/io.c
@ -19,6 +19,7 @@
|
|||||||
#include "ec.h"
|
#include "ec.h"
|
||||||
#include "error.h"
|
#include "error.h"
|
||||||
#include "extents.h"
|
#include "extents.h"
|
||||||
|
#include "inode.h"
|
||||||
#include "io.h"
|
#include "io.h"
|
||||||
#include "journal.h"
|
#include "journal.h"
|
||||||
#include "keylist.h"
|
#include "keylist.h"
|
||||||
@ -168,6 +169,258 @@ void bch2_bio_alloc_pages_pool(struct bch_fs *c, struct bio *bio,
|
|||||||
mutex_unlock(&c->bio_bounce_pages_lock);
|
mutex_unlock(&c->bio_bounce_pages_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Extent update path: */
|
||||||
|
|
||||||
|
static int sum_sector_overwrites(struct btree_trans *trans,
|
||||||
|
struct btree_iter *extent_iter,
|
||||||
|
struct bkey_i *new,
|
||||||
|
bool may_allocate,
|
||||||
|
bool *maybe_extending,
|
||||||
|
s64 *delta)
|
||||||
|
{
|
||||||
|
struct btree_iter *iter;
|
||||||
|
struct bkey_s_c old;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
*maybe_extending = true;
|
||||||
|
*delta = 0;
|
||||||
|
|
||||||
|
iter = bch2_trans_copy_iter(trans, extent_iter);
|
||||||
|
if (IS_ERR(iter))
|
||||||
|
return PTR_ERR(iter);
|
||||||
|
|
||||||
|
for_each_btree_key_continue(iter, BTREE_ITER_SLOTS, old, ret) {
|
||||||
|
if (!may_allocate &&
|
||||||
|
bch2_bkey_nr_ptrs_allocated(old) <
|
||||||
|
bch2_bkey_nr_dirty_ptrs(bkey_i_to_s_c(new))) {
|
||||||
|
ret = -ENOSPC;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
*delta += (min(new->k.p.offset,
|
||||||
|
old.k->p.offset) -
|
||||||
|
max(bkey_start_offset(&new->k),
|
||||||
|
bkey_start_offset(old.k))) *
|
||||||
|
(bkey_extent_is_allocation(&new->k) -
|
||||||
|
bkey_extent_is_allocation(old.k));
|
||||||
|
|
||||||
|
if (bkey_cmp(old.k->p, new->k.p) >= 0) {
|
||||||
|
/*
|
||||||
|
* Check if there's already data above where we're
|
||||||
|
* going to be writing to - this means we're definitely
|
||||||
|
* not extending the file:
|
||||||
|
*
|
||||||
|
* Note that it's not sufficient to check if there's
|
||||||
|
* data up to the sector offset we're going to be
|
||||||
|
* writing to, because i_size could be up to one block
|
||||||
|
* less:
|
||||||
|
*/
|
||||||
|
if (!bkey_cmp(old.k->p, new->k.p))
|
||||||
|
old = bch2_btree_iter_next(iter);
|
||||||
|
|
||||||
|
if (old.k && !bkey_err(old) &&
|
||||||
|
old.k->p.inode == extent_iter->pos.inode &&
|
||||||
|
bkey_extent_is_data(old.k))
|
||||||
|
*maybe_extending = false;
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bch2_trans_iter_put(trans, iter);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int bch2_extent_update(struct btree_trans *trans,
|
||||||
|
struct btree_iter *iter,
|
||||||
|
struct bkey_i *k,
|
||||||
|
struct disk_reservation *disk_res,
|
||||||
|
u64 *journal_seq,
|
||||||
|
u64 new_i_size,
|
||||||
|
s64 *i_sectors_delta)
|
||||||
|
{
|
||||||
|
/* this must live until after bch2_trans_commit(): */
|
||||||
|
struct bkey_inode_buf inode_p;
|
||||||
|
bool extending = false;
|
||||||
|
s64 delta = 0;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = bch2_extent_trim_atomic(k, iter);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
ret = sum_sector_overwrites(trans, iter, k,
|
||||||
|
disk_res && disk_res->sectors != 0,
|
||||||
|
&extending, &delta);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
new_i_size = extending
|
||||||
|
? min(k->k.p.offset << 9, new_i_size)
|
||||||
|
: 0;
|
||||||
|
|
||||||
|
if (delta || new_i_size) {
|
||||||
|
struct btree_iter *inode_iter;
|
||||||
|
struct bch_inode_unpacked inode_u;
|
||||||
|
|
||||||
|
inode_iter = bch2_inode_peek(trans, &inode_u,
|
||||||
|
k->k.p.inode, BTREE_ITER_INTENT);
|
||||||
|
if (IS_ERR(inode_iter))
|
||||||
|
return PTR_ERR(inode_iter);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX:
|
||||||
|
* writeback can race a bit with truncate, because truncate
|
||||||
|
* first updates the inode then truncates the pagecache. This is
|
||||||
|
* ugly, but lets us preserve the invariant that the in memory
|
||||||
|
* i_size is always >= the on disk i_size.
|
||||||
|
*
|
||||||
|
BUG_ON(new_i_size > inode_u.bi_size &&
|
||||||
|
(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY));
|
||||||
|
*/
|
||||||
|
BUG_ON(new_i_size > inode_u.bi_size && !extending);
|
||||||
|
|
||||||
|
if (!(inode_u.bi_flags & BCH_INODE_I_SIZE_DIRTY) &&
|
||||||
|
new_i_size > inode_u.bi_size)
|
||||||
|
inode_u.bi_size = new_i_size;
|
||||||
|
else
|
||||||
|
new_i_size = 0;
|
||||||
|
|
||||||
|
inode_u.bi_sectors += delta;
|
||||||
|
|
||||||
|
if (delta || new_i_size) {
|
||||||
|
bch2_inode_pack(&inode_p, &inode_u);
|
||||||
|
bch2_trans_update(trans, inode_iter,
|
||||||
|
&inode_p.inode.k_i);
|
||||||
|
}
|
||||||
|
|
||||||
|
bch2_trans_iter_put(trans, inode_iter);
|
||||||
|
}
|
||||||
|
|
||||||
|
bch2_trans_update(trans, iter, k);
|
||||||
|
|
||||||
|
ret = bch2_trans_commit(trans, disk_res, journal_seq,
|
||||||
|
BTREE_INSERT_NOFAIL|
|
||||||
|
BTREE_INSERT_ATOMIC|
|
||||||
|
BTREE_INSERT_USE_RESERVE);
|
||||||
|
if (!ret && i_sectors_delta)
|
||||||
|
*i_sectors_delta += delta;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int bch2_fpunch_at(struct btree_trans *trans, struct btree_iter *iter,
|
||||||
|
struct bpos end, u64 *journal_seq,
|
||||||
|
s64 *i_sectors_delta)
|
||||||
|
{
|
||||||
|
struct bch_fs *c = trans->c;
|
||||||
|
unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
|
||||||
|
struct bkey_s_c k;
|
||||||
|
int ret = 0, ret2 = 0;
|
||||||
|
|
||||||
|
while ((k = bch2_btree_iter_peek(iter)).k &&
|
||||||
|
bkey_cmp(iter->pos, end) < 0) {
|
||||||
|
struct disk_reservation disk_res =
|
||||||
|
bch2_disk_reservation_init(c, 0);
|
||||||
|
struct bkey_i delete;
|
||||||
|
|
||||||
|
ret = bkey_err(k);
|
||||||
|
if (ret)
|
||||||
|
goto btree_err;
|
||||||
|
|
||||||
|
bkey_init(&delete.k);
|
||||||
|
delete.k.p = iter->pos;
|
||||||
|
|
||||||
|
/* create the biggest key we can */
|
||||||
|
bch2_key_resize(&delete.k, max_sectors);
|
||||||
|
bch2_cut_back(end, &delete.k);
|
||||||
|
|
||||||
|
bch2_trans_begin_updates(trans);
|
||||||
|
|
||||||
|
ret = bch2_extent_update(trans, iter, &delete,
|
||||||
|
&disk_res, journal_seq,
|
||||||
|
0, i_sectors_delta);
|
||||||
|
bch2_disk_reservation_put(c, &disk_res);
|
||||||
|
btree_err:
|
||||||
|
if (ret == -EINTR) {
|
||||||
|
ret2 = ret;
|
||||||
|
ret = 0;
|
||||||
|
}
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (bkey_cmp(iter->pos, end) > 0) {
|
||||||
|
bch2_btree_iter_set_pos(iter, end);
|
||||||
|
ret = bch2_btree_iter_traverse(iter);
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret ?: ret2;
|
||||||
|
}
|
||||||
|
|
||||||
|
int bch2_fpunch(struct bch_fs *c, u64 inum, u64 start, u64 end,
|
||||||
|
u64 *journal_seq, s64 *i_sectors_delta)
|
||||||
|
{
|
||||||
|
struct btree_trans trans;
|
||||||
|
struct btree_iter *iter;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
|
||||||
|
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
|
||||||
|
POS(inum, start),
|
||||||
|
BTREE_ITER_INTENT);
|
||||||
|
|
||||||
|
ret = bch2_fpunch_at(&trans, iter, POS(inum, end),
|
||||||
|
journal_seq, i_sectors_delta);
|
||||||
|
bch2_trans_exit(&trans);
|
||||||
|
|
||||||
|
if (ret == -EINTR)
|
||||||
|
ret = 0;
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
int bch2_write_index_default(struct bch_write_op *op)
|
||||||
|
{
|
||||||
|
struct bch_fs *c = op->c;
|
||||||
|
struct keylist *keys = &op->insert_keys;
|
||||||
|
struct bkey_i *k = bch2_keylist_front(keys);
|
||||||
|
struct btree_trans trans;
|
||||||
|
struct btree_iter *iter;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 1024);
|
||||||
|
|
||||||
|
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
|
||||||
|
bkey_start_pos(&k->k),
|
||||||
|
BTREE_ITER_SLOTS|BTREE_ITER_INTENT);
|
||||||
|
|
||||||
|
do {
|
||||||
|
BKEY_PADDED(k) tmp;
|
||||||
|
|
||||||
|
bkey_copy(&tmp.k, bch2_keylist_front(keys));
|
||||||
|
|
||||||
|
bch2_trans_begin_updates(&trans);
|
||||||
|
|
||||||
|
ret = bch2_extent_update(&trans, iter, &tmp.k,
|
||||||
|
&op->res, op_journal_seq(op),
|
||||||
|
op->new_i_size, &op->i_sectors_delta);
|
||||||
|
if (ret == -EINTR)
|
||||||
|
continue;
|
||||||
|
if (ret)
|
||||||
|
break;
|
||||||
|
|
||||||
|
if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
|
||||||
|
bch2_cut_front(iter->pos, bch2_keylist_front(keys));
|
||||||
|
else
|
||||||
|
bch2_keylist_pop_front(keys);
|
||||||
|
} while (!bch2_keylist_empty(keys));
|
||||||
|
|
||||||
|
bch2_trans_exit(&trans);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
/* Writes */
|
/* Writes */
|
||||||
|
|
||||||
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
|
void bch2_submit_wbio_replicas(struct bch_write_bio *wbio, struct bch_fs *c,
|
||||||
@ -246,58 +499,6 @@ static void bch2_write_done(struct closure *cl)
|
|||||||
closure_return(cl);
|
closure_return(cl);
|
||||||
}
|
}
|
||||||
|
|
||||||
int bch2_write_index_default(struct bch_write_op *op)
|
|
||||||
{
|
|
||||||
struct bch_fs *c = op->c;
|
|
||||||
struct btree_trans trans;
|
|
||||||
struct btree_iter *iter;
|
|
||||||
struct keylist *keys = &op->insert_keys;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
BUG_ON(bch2_keylist_empty(keys));
|
|
||||||
bch2_verify_keylist_sorted(keys);
|
|
||||||
|
|
||||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 256);
|
|
||||||
retry:
|
|
||||||
bch2_trans_begin(&trans);
|
|
||||||
|
|
||||||
iter = bch2_trans_get_iter(&trans, BTREE_ID_EXTENTS,
|
|
||||||
bkey_start_pos(&bch2_keylist_front(keys)->k),
|
|
||||||
BTREE_ITER_INTENT);
|
|
||||||
|
|
||||||
do {
|
|
||||||
BKEY_PADDED(k) split;
|
|
||||||
|
|
||||||
bkey_copy(&split.k, bch2_keylist_front(keys));
|
|
||||||
|
|
||||||
ret = bch2_extent_trim_atomic(&split.k, iter);
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
|
|
||||||
bch2_trans_update(&trans, iter, &split.k);
|
|
||||||
|
|
||||||
ret = bch2_trans_commit(&trans, &op->res, op_journal_seq(op),
|
|
||||||
BTREE_INSERT_NOFAIL|
|
|
||||||
BTREE_INSERT_USE_RESERVE);
|
|
||||||
if (ret)
|
|
||||||
break;
|
|
||||||
|
|
||||||
if (bkey_cmp(iter->pos, bch2_keylist_front(keys)->k.p) < 0)
|
|
||||||
bch2_cut_front(iter->pos, bch2_keylist_front(keys));
|
|
||||||
else
|
|
||||||
bch2_keylist_pop_front(keys);
|
|
||||||
} while (!bch2_keylist_empty(keys));
|
|
||||||
|
|
||||||
if (ret == -EINTR) {
|
|
||||||
ret = 0;
|
|
||||||
goto retry;
|
|
||||||
}
|
|
||||||
|
|
||||||
bch2_trans_exit(&trans);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* bch_write_index - after a write, update index to point to new data
|
* bch_write_index - after a write, update index to point to new data
|
||||||
*/
|
*/
|
||||||
|
|||||||
@ -54,6 +54,13 @@ static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
|
|||||||
: op->c->wq;
|
: op->c->wq;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int bch2_extent_update(struct btree_trans *, struct btree_iter *,
|
||||||
|
struct bkey_i *, struct disk_reservation *,
|
||||||
|
u64 *, u64, s64 *);
|
||||||
|
int bch2_fpunch_at(struct btree_trans *, struct btree_iter *,
|
||||||
|
struct bpos, u64 *, s64 *);
|
||||||
|
int bch2_fpunch(struct bch_fs *c, u64, u64, u64, u64 *, s64 *);
|
||||||
|
|
||||||
int bch2_write_index_default(struct bch_write_op *);
|
int bch2_write_index_default(struct bch_write_op *);
|
||||||
|
|
||||||
static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
|
static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
|
||||||
@ -78,6 +85,8 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
|
|||||||
op->write_point = (struct write_point_specifier) { 0 };
|
op->write_point = (struct write_point_specifier) { 0 };
|
||||||
op->res = (struct disk_reservation) { 0 };
|
op->res = (struct disk_reservation) { 0 };
|
||||||
op->journal_seq = 0;
|
op->journal_seq = 0;
|
||||||
|
op->new_i_size = U64_MAX;
|
||||||
|
op->i_sectors_delta = 0;
|
||||||
op->index_update_fn = bch2_write_index_default;
|
op->index_update_fn = bch2_write_index_default;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -132,6 +132,8 @@ struct bch_write_op {
|
|||||||
u64 *journal_seq_p;
|
u64 *journal_seq_p;
|
||||||
u64 journal_seq;
|
u64 journal_seq;
|
||||||
};
|
};
|
||||||
|
u64 new_i_size;
|
||||||
|
s64 i_sectors_delta;
|
||||||
|
|
||||||
int (*index_update_fn)(struct bch_write_op *);
|
int (*index_update_fn)(struct bch_write_op *);
|
||||||
|
|
||||||
|
|||||||
@ -272,6 +272,8 @@ retry:
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
atomic_end = bpos_min(k->k.p, iter->l[0].b->key.k.p);
|
||||||
|
|
||||||
split_iter = bch2_trans_copy_iter(&trans, iter);
|
split_iter = bch2_trans_copy_iter(&trans, iter);
|
||||||
ret = PTR_ERR_OR_ZERO(split_iter);
|
ret = PTR_ERR_OR_ZERO(split_iter);
|
||||||
if (ret)
|
if (ret)
|
||||||
@ -282,10 +284,6 @@ retry:
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
ret = bch2_extent_atomic_end(split_iter, k, &atomic_end);
|
|
||||||
if (ret)
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
if (!remark &&
|
if (!remark &&
|
||||||
remark_if_split &&
|
remark_if_split &&
|
||||||
bkey_cmp(atomic_end, k->k.p) < 0) {
|
bkey_cmp(atomic_end, k->k.p) < 0) {
|
||||||
|
|||||||
@ -2,8 +2,8 @@
|
|||||||
#include "bcachefs.h"
|
#include "bcachefs.h"
|
||||||
#include "btree_update.h"
|
#include "btree_update.h"
|
||||||
#include "extents.h"
|
#include "extents.h"
|
||||||
#include "fs.h"
|
#include "inode.h"
|
||||||
#include "fs-io.h"
|
#include "io.h"
|
||||||
#include "reflink.h"
|
#include "reflink.h"
|
||||||
|
|
||||||
#include <linux/sched/signal.h>
|
#include <linux/sched/signal.h>
|
||||||
@ -70,12 +70,6 @@ void bch2_reflink_v_to_text(struct printbuf *out, struct bch_fs *c,
|
|||||||
bch2_bkey_ptrs_to_text(out, c, k);
|
bch2_bkey_ptrs_to_text(out, c, k);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* bch2_remap_range() depends on bch2_extent_update(), which depends on various
|
|
||||||
* things tied to the linux vfs for inode updates, for now:
|
|
||||||
*/
|
|
||||||
#ifndef NO_BCACHEFS_FS
|
|
||||||
|
|
||||||
static int bch2_make_extent_indirect(struct btree_trans *trans,
|
static int bch2_make_extent_indirect(struct btree_trans *trans,
|
||||||
struct btree_iter *extent_iter,
|
struct btree_iter *extent_iter,
|
||||||
struct bkey_i_extent *e)
|
struct bkey_i_extent *e)
|
||||||
@ -144,26 +138,24 @@ err:
|
|||||||
static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
|
static struct bkey_s_c get_next_src(struct btree_iter *iter, struct bpos end)
|
||||||
{
|
{
|
||||||
struct bkey_s_c k = bch2_btree_iter_peek(iter);
|
struct bkey_s_c k = bch2_btree_iter_peek(iter);
|
||||||
|
int ret;
|
||||||
|
|
||||||
while (1) {
|
for_each_btree_key_continue(iter, 0, k, ret) {
|
||||||
if (bkey_err(k))
|
|
||||||
return k;
|
|
||||||
|
|
||||||
if (bkey_cmp(iter->pos, end) >= 0)
|
if (bkey_cmp(iter->pos, end) >= 0)
|
||||||
return bkey_s_c_null;
|
return bkey_s_c_null;
|
||||||
|
|
||||||
if (k.k->type == KEY_TYPE_extent ||
|
if (k.k->type == KEY_TYPE_extent ||
|
||||||
k.k->type == KEY_TYPE_reflink_p)
|
k.k->type == KEY_TYPE_reflink_p)
|
||||||
return k;
|
break;
|
||||||
|
|
||||||
k = bch2_btree_iter_next(iter);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return k;
|
||||||
}
|
}
|
||||||
|
|
||||||
s64 bch2_remap_range(struct bch_fs *c,
|
s64 bch2_remap_range(struct bch_fs *c,
|
||||||
struct bch_inode_info *dst_inode,
|
|
||||||
struct bpos dst_start, struct bpos src_start,
|
struct bpos dst_start, struct bpos src_start,
|
||||||
u64 remap_sectors, u64 new_i_size)
|
u64 remap_sectors, u64 *journal_seq,
|
||||||
|
u64 new_i_size, s64 *i_sectors_delta)
|
||||||
{
|
{
|
||||||
struct btree_trans trans;
|
struct btree_trans trans;
|
||||||
struct btree_iter *dst_iter, *src_iter;
|
struct btree_iter *dst_iter, *src_iter;
|
||||||
@ -172,7 +164,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
|||||||
struct bpos dst_end = dst_start, src_end = src_start;
|
struct bpos dst_end = dst_start, src_end = src_start;
|
||||||
struct bpos dst_want, src_want;
|
struct bpos dst_want, src_want;
|
||||||
u64 src_done, dst_done;
|
u64 src_done, dst_done;
|
||||||
int ret = 0;
|
int ret = 0, ret2 = 0;
|
||||||
|
|
||||||
if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
|
if (!(c->sb.features & (1ULL << BCH_FEATURE_REFLINK))) {
|
||||||
mutex_lock(&c->sb_lock);
|
mutex_lock(&c->sb_lock);
|
||||||
@ -215,7 +207,7 @@ s64 bch2_remap_range(struct bch_fs *c,
|
|||||||
|
|
||||||
if (bkey_cmp(dst_iter->pos, dst_want) < 0) {
|
if (bkey_cmp(dst_iter->pos, dst_want) < 0) {
|
||||||
ret = bch2_fpunch_at(&trans, dst_iter, dst_want,
|
ret = bch2_fpunch_at(&trans, dst_iter, dst_want,
|
||||||
dst_inode, new_i_size);
|
journal_seq, i_sectors_delta);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto btree_err;
|
goto btree_err;
|
||||||
continue;
|
continue;
|
||||||
@ -261,9 +253,9 @@ s64 bch2_remap_range(struct bch_fs *c,
|
|||||||
min(src_k.k->p.offset - src_iter->pos.offset,
|
min(src_k.k->p.offset - src_iter->pos.offset,
|
||||||
dst_end.offset - dst_iter->pos.offset));
|
dst_end.offset - dst_iter->pos.offset));
|
||||||
|
|
||||||
ret = bch2_extent_update(&trans, dst_inode, NULL, NULL,
|
ret = bch2_extent_update(&trans, dst_iter, &new_dst.k,
|
||||||
dst_iter, &new_dst.k,
|
NULL, journal_seq,
|
||||||
new_i_size, false, true, NULL);
|
new_i_size, i_sectors_delta);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto btree_err;
|
goto btree_err;
|
||||||
|
|
||||||
@ -284,17 +276,24 @@ err:
|
|||||||
dst_done = dst_iter->pos.offset - dst_start.offset;
|
dst_done = dst_iter->pos.offset - dst_start.offset;
|
||||||
new_i_size = min(dst_iter->pos.offset << 9, new_i_size);
|
new_i_size = min(dst_iter->pos.offset << 9, new_i_size);
|
||||||
|
|
||||||
|
bch2_trans_begin(&trans);
|
||||||
|
|
||||||
|
do {
|
||||||
|
struct bch_inode_unpacked inode_u;
|
||||||
|
struct btree_iter *inode_iter;
|
||||||
|
|
||||||
|
inode_iter = bch2_inode_peek(&trans, &inode_u,
|
||||||
|
dst_start.inode, BTREE_ITER_INTENT);
|
||||||
|
ret2 = PTR_ERR_OR_ZERO(inode_iter);
|
||||||
|
|
||||||
|
if (!ret2 &&
|
||||||
|
inode_u.bi_size < new_i_size)
|
||||||
|
ret2 = bch2_inode_write(&trans, inode_iter, &inode_u) ?:
|
||||||
|
bch2_trans_commit(&trans, NULL, journal_seq,
|
||||||
|
BTREE_INSERT_ATOMIC);
|
||||||
|
} while (ret2 == -EINTR);
|
||||||
|
|
||||||
ret = bch2_trans_exit(&trans) ?: ret;
|
ret = bch2_trans_exit(&trans) ?: ret;
|
||||||
|
|
||||||
mutex_lock(&dst_inode->ei_update_lock);
|
return dst_done ?: ret ?: ret2;
|
||||||
if (dst_inode->v.i_size < new_i_size) {
|
|
||||||
i_size_write(&dst_inode->v, new_i_size);
|
|
||||||
ret = bch2_write_inode_size(c, dst_inode, new_i_size,
|
|
||||||
ATTR_MTIME|ATTR_CTIME);
|
|
||||||
}
|
|
||||||
mutex_unlock(&dst_inode->ei_update_lock);
|
|
||||||
|
|
||||||
return dst_done ?: ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif /* NO_BCACHEFS_FS */
|
|
||||||
|
|||||||
@ -24,9 +24,7 @@ void bch2_reflink_v_to_text(struct printbuf *, struct bch_fs *,
|
|||||||
.val_to_text = bch2_reflink_v_to_text, \
|
.val_to_text = bch2_reflink_v_to_text, \
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef NO_BCACHEFS_FS
|
s64 bch2_remap_range(struct bch_fs *, struct bpos, struct bpos,
|
||||||
s64 bch2_remap_range(struct bch_fs *, struct bch_inode_info *,
|
u64, u64 *, u64, s64 *);
|
||||||
struct bpos, struct bpos, u64, u64);
|
|
||||||
#endif /* NO_BCACHEFS_FS */
|
|
||||||
|
|
||||||
#endif /* _BCACHEFS_REFLINK_H */
|
#endif /* _BCACHEFS_REFLINK_H */
|
||||||
|
|||||||
@ -84,10 +84,8 @@ static void extent_to_replicas(struct bkey_s_c k,
|
|||||||
if (p.ptr.cached)
|
if (p.ptr.cached)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (p.ec_nr) {
|
if (p.has_ec)
|
||||||
r->nr_required = 0;
|
r->nr_required = 0;
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
r->devs[r->nr_devs++] = p.ptr.dev;
|
r->devs[r->nr_devs++] = p.ptr.dev;
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1030,9 +1030,10 @@ bch2_journal_super_entries_add_common(struct bch_fs *c,
|
|||||||
struct jset_entry_data_usage *u =
|
struct jset_entry_data_usage *u =
|
||||||
container_of(entry, struct jset_entry_data_usage, entry);
|
container_of(entry, struct jset_entry_data_usage, entry);
|
||||||
|
|
||||||
memset(u, 0, sizeof(*u));
|
int u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
|
||||||
u->entry.u64s = DIV_ROUND_UP(sizeof(*u) + e->nr_devs,
|
|
||||||
sizeof(u64)) - 1;
|
sizeof(u64)) - 1;
|
||||||
|
memset(u, 0, u64s * sizeof(u64));
|
||||||
|
u->entry.u64s = u64s;
|
||||||
u->entry.type = BCH_JSET_ENTRY_data_usage;
|
u->entry.type = BCH_JSET_ENTRY_data_usage;
|
||||||
u->v = cpu_to_le64(c->usage_base->replicas[i]);
|
u->v = cpu_to_le64(c->usage_base->replicas[i]);
|
||||||
memcpy(&u->r, e, replicas_entry_bytes(e));
|
memcpy(&u->r, e, replicas_entry_bytes(e));
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user