mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-12-08 00:00:12 +03:00
Update bcachefs sources to 553ac0f97573 bcachefs: kill gotos in bch2_disk_accounting_validate_late()
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
fa1882de61
commit
a8a7967dd6
@ -1 +1 @@
|
||||
e3e6e947d0c9af7dce749a5d9a88ef5d6cc60311
|
||||
553ac0f9757322932398e1a36bf44fc787d06ba8
|
||||
|
||||
@ -150,7 +150,7 @@ impl<'t> BtreeNodeIter<'t> {
|
||||
) -> BtreeNodeIter<'t> {
|
||||
unsafe {
|
||||
let mut iter: MaybeUninit<c::btree_iter> = MaybeUninit::uninit();
|
||||
c::bch2_trans_node_iter_init(
|
||||
c::__bch2_trans_node_iter_init(
|
||||
trans.raw,
|
||||
iter.as_mut_ptr(),
|
||||
btree,
|
||||
|
||||
@ -212,7 +212,8 @@ int bch2_accounting_validate(struct bch_fs *c, struct bkey_s_c k,
|
||||
"accounting key replicas entry with bad nr_required");
|
||||
|
||||
for (unsigned i = 0; i + 1 < acc_k.replicas.nr_devs; i++)
|
||||
bkey_fsck_err_on(acc_k.replicas.devs[i] >= acc_k.replicas.devs[i + 1],
|
||||
bkey_fsck_err_on(acc_k.replicas.devs[i] != BCH_SB_MEMBER_INVALID &&
|
||||
acc_k.replicas.devs[i] >= acc_k.replicas.devs[i + 1],
|
||||
c, accounting_key_replicas_devs_unsorted,
|
||||
"accounting key replicas entry with unsorted devs");
|
||||
|
||||
@ -663,13 +664,38 @@ static int accounting_read_key(struct btree_trans *trans, struct bkey_s_c k)
|
||||
BCH_ACCOUNTING_read, false);
|
||||
}
|
||||
|
||||
static int disk_accounting_invalid_dev(struct btree_trans *trans,
|
||||
struct disk_accounting_pos *acc,
|
||||
u64 *v, unsigned nr,
|
||||
unsigned dev)
|
||||
{
|
||||
CLASS(printbuf, buf)();
|
||||
bch2_accounting_key_to_text(&buf, acc);
|
||||
int ret = 0;
|
||||
|
||||
if (fsck_err(trans, accounting_to_invalid_device,
|
||||
"accounting entry points to invalid device %u\n%s",
|
||||
dev, buf.buf)) {
|
||||
bch2_u64s_neg(v, nr);
|
||||
|
||||
return bch2_disk_accounting_mod(trans, acc, v, nr, false) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?:
|
||||
-BCH_ERR_remove_disk_accounting_entry;
|
||||
} else {
|
||||
return bch_err_throw(trans->c, remove_disk_accounting_entry);
|
||||
}
|
||||
fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static int bch2_disk_accounting_validate_late(struct btree_trans *trans,
|
||||
struct disk_accounting_pos *acc,
|
||||
u64 *v, unsigned nr)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
int ret = 0, invalid_dev = -1;
|
||||
int ret = 0;
|
||||
|
||||
switch (acc->type) {
|
||||
case BCH_DISK_ACCOUNTING_replicas: {
|
||||
@ -678,10 +704,8 @@ static int bch2_disk_accounting_validate_late(struct btree_trans *trans,
|
||||
|
||||
for (unsigned i = 0; i < r.e.nr_devs; i++)
|
||||
if (r.e.devs[i] != BCH_SB_MEMBER_INVALID &&
|
||||
!bch2_dev_exists(c, r.e.devs[i])) {
|
||||
invalid_dev = r.e.devs[i];
|
||||
goto invalid_device;
|
||||
}
|
||||
!bch2_dev_exists(c, r.e.devs[i]))
|
||||
return disk_accounting_invalid_dev(trans, acc, v, nr, r.e.devs[i]);
|
||||
|
||||
/*
|
||||
* All replicas entry checks except for invalid device are done
|
||||
@ -694,47 +718,20 @@ static int bch2_disk_accounting_validate_late(struct btree_trans *trans,
|
||||
"accounting not marked in superblock replicas\n%s",
|
||||
(printbuf_reset(&buf),
|
||||
bch2_accounting_key_to_text(&buf, acc),
|
||||
buf.buf))) {
|
||||
/*
|
||||
* We're not RW yet and still single threaded, dropping
|
||||
* and retaking lock is ok:
|
||||
*/
|
||||
percpu_up_write(&c->mark_lock);
|
||||
ret = bch2_mark_replicas(c, &r.e);
|
||||
if (ret)
|
||||
goto fsck_err;
|
||||
percpu_down_write(&c->mark_lock);
|
||||
}
|
||||
buf.buf)))
|
||||
try(bch2_mark_replicas(c, &r.e));
|
||||
break;
|
||||
}
|
||||
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type:
|
||||
if (!bch2_dev_exists(c, acc->dev_data_type.dev)) {
|
||||
invalid_dev = acc->dev_data_type.dev;
|
||||
goto invalid_device;
|
||||
}
|
||||
if (!bch2_dev_exists(c, acc->dev_data_type.dev))
|
||||
return disk_accounting_invalid_dev(trans, acc, v, nr,
|
||||
acc->dev_data_type.dev);
|
||||
break;
|
||||
}
|
||||
|
||||
fsck_err:
|
||||
return ret;
|
||||
invalid_device:
|
||||
if (fsck_err(trans, accounting_to_invalid_device,
|
||||
"accounting entry points to invalid device %i\n%s",
|
||||
invalid_dev,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_accounting_key_to_text(&buf, acc),
|
||||
buf.buf))) {
|
||||
for (unsigned i = 0; i < nr; i++)
|
||||
v[i] = -v[i];
|
||||
|
||||
ret = commit_do(trans, NULL, NULL, 0,
|
||||
bch2_disk_accounting_mod(trans, acc, v, nr, false)) ?:
|
||||
-BCH_ERR_remove_disk_accounting_entry;
|
||||
} else {
|
||||
ret = bch_err_throw(c, remove_disk_accounting_entry);
|
||||
}
|
||||
goto fsck_err;
|
||||
}
|
||||
|
||||
static struct journal_key *accumulate_newer_accounting_keys(struct btree_trans *trans, struct journal_key *i)
|
||||
@ -789,100 +786,99 @@ static int accounting_read_mem_fixups(struct btree_trans *trans)
|
||||
struct bch_accounting_mem *acc = &c->accounting;
|
||||
CLASS(printbuf, underflow_err)();
|
||||
|
||||
scoped_guard(percpu_write, &c->mark_lock) {
|
||||
darray_for_each_reverse(acc->k, i) {
|
||||
struct disk_accounting_pos acc_k;
|
||||
bpos_to_disk_accounting_pos(&acc_k, i->pos);
|
||||
darray_for_each_reverse(acc->k, i) {
|
||||
struct disk_accounting_pos acc_k;
|
||||
bpos_to_disk_accounting_pos(&acc_k, i->pos);
|
||||
|
||||
u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
|
||||
memset(v, 0, sizeof(v));
|
||||
u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
|
||||
memset(v, 0, sizeof(v));
|
||||
|
||||
for (unsigned j = 0; j < i->nr_counters; j++)
|
||||
v[j] = percpu_u64_get(i->v[0] + j);
|
||||
for (unsigned j = 0; j < i->nr_counters; j++)
|
||||
v[j] = percpu_u64_get(i->v[0] + j);
|
||||
|
||||
/*
|
||||
* If the entry counters are zeroed, it should be treated as
|
||||
* nonexistent - it might point to an invalid device.
|
||||
*
|
||||
* Remove it, so that if it's re-added it gets re-marked in the
|
||||
* superblock:
|
||||
*/
|
||||
int ret = bch2_is_zero(v, sizeof(v[0]) * i->nr_counters)
|
||||
? -BCH_ERR_remove_disk_accounting_entry
|
||||
: bch2_disk_accounting_validate_late(trans, &acc_k, v, i->nr_counters);
|
||||
/*
|
||||
* If the entry counters are zeroed, it should be treated as
|
||||
* nonexistent - it might point to an invalid device.
|
||||
*
|
||||
* Remove it, so that if it's re-added it gets re-marked in the
|
||||
* superblock:
|
||||
*/
|
||||
int ret = bch2_is_zero(v, sizeof(v[0]) * i->nr_counters)
|
||||
? -BCH_ERR_remove_disk_accounting_entry
|
||||
: lockrestart_do(trans,
|
||||
bch2_disk_accounting_validate_late(trans, &acc_k, v, i->nr_counters));
|
||||
|
||||
if (ret == -BCH_ERR_remove_disk_accounting_entry) {
|
||||
free_percpu(i->v[0]);
|
||||
free_percpu(i->v[1]);
|
||||
darray_remove_item(&acc->k, i);
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
if (ret == -BCH_ERR_remove_disk_accounting_entry) {
|
||||
free_percpu(i->v[0]);
|
||||
free_percpu(i->v[1]);
|
||||
darray_remove_item(&acc->k, i);
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
|
||||
eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
|
||||
accounting_pos_cmp, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (unsigned i = 0; i < acc->k.nr; i++) {
|
||||
struct disk_accounting_pos k;
|
||||
bpos_to_disk_accounting_pos(&k, acc->k.data[i].pos);
|
||||
eytzinger0_sort(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
|
||||
accounting_pos_cmp, NULL);
|
||||
|
||||
u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
|
||||
bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false);
|
||||
for (unsigned i = 0; i < acc->k.nr; i++) {
|
||||
struct disk_accounting_pos k;
|
||||
bpos_to_disk_accounting_pos(&k, acc->k.data[i].pos);
|
||||
|
||||
u64 v[BCH_ACCOUNTING_MAX_COUNTERS];
|
||||
bch2_accounting_mem_read_counters(acc, i, v, ARRAY_SIZE(v), false);
|
||||
|
||||
/*
|
||||
* Check for underflow, schedule check_allocations
|
||||
* necessary:
|
||||
*
|
||||
* XXX - see if we can factor this out to run on a bkey
|
||||
* so we can check everything lazily, right now we don't
|
||||
* check the non in-mem counters at all
|
||||
*/
|
||||
bool underflow = false;
|
||||
for (unsigned j = 0; j < acc->k.data[i].nr_counters; j++)
|
||||
underflow |= (s64) v[j] < 0;
|
||||
|
||||
if (underflow) {
|
||||
if (!underflow_err.pos) {
|
||||
bch2_log_msg_start(c, &underflow_err);
|
||||
prt_printf(&underflow_err, "Accounting underflow for\n");
|
||||
}
|
||||
bch2_accounting_key_to_text(&underflow_err, &k);
|
||||
|
||||
/*
|
||||
* Check for underflow, schedule check_allocations
|
||||
* necessary:
|
||||
*
|
||||
* XXX - see if we can factor this out to run on a bkey
|
||||
* so we can check everything lazily, right now we don't
|
||||
* check the non in-mem counters at all
|
||||
*/
|
||||
bool underflow = false;
|
||||
for (unsigned j = 0; j < acc->k.data[i].nr_counters; j++)
|
||||
underflow |= (s64) v[j] < 0;
|
||||
prt_printf(&underflow_err, " %lli", v[j]);
|
||||
prt_newline(&underflow_err);
|
||||
}
|
||||
|
||||
if (underflow) {
|
||||
if (!underflow_err.pos) {
|
||||
bch2_log_msg_start(c, &underflow_err);
|
||||
prt_printf(&underflow_err, "Accounting underflow for\n");
|
||||
}
|
||||
bch2_accounting_key_to_text(&underflow_err, &k);
|
||||
guard(preempt)();
|
||||
struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage);
|
||||
|
||||
for (unsigned j = 0; j < acc->k.data[i].nr_counters; j++)
|
||||
prt_printf(&underflow_err, " %lli", v[j]);
|
||||
prt_newline(&underflow_err);
|
||||
}
|
||||
|
||||
guard(preempt)();
|
||||
struct bch_fs_usage_base *usage = this_cpu_ptr(c->usage);
|
||||
|
||||
switch (k.type) {
|
||||
case BCH_DISK_ACCOUNTING_persistent_reserved:
|
||||
usage->reserved += v[0] * k.persistent_reserved.nr_replicas;
|
||||
break;
|
||||
case BCH_DISK_ACCOUNTING_replicas:
|
||||
fs_usage_data_type_to_base(usage, k.replicas.data_type, v[0]);
|
||||
break;
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type: {
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.dev_data_type.dev);
|
||||
if (ca) {
|
||||
struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type];
|
||||
percpu_u64_set(&d->buckets, v[0]);
|
||||
percpu_u64_set(&d->sectors, v[1]);
|
||||
percpu_u64_set(&d->fragmented, v[2]);
|
||||
|
||||
if (k.dev_data_type.data_type == BCH_DATA_sb ||
|
||||
k.dev_data_type.data_type == BCH_DATA_journal)
|
||||
usage->hidden += v[0] * ca->mi.bucket_size;
|
||||
}
|
||||
break;
|
||||
}
|
||||
switch (k.type) {
|
||||
case BCH_DISK_ACCOUNTING_persistent_reserved:
|
||||
usage->reserved += v[0] * k.persistent_reserved.nr_replicas;
|
||||
break;
|
||||
case BCH_DISK_ACCOUNTING_replicas:
|
||||
fs_usage_data_type_to_base(usage, k.replicas.data_type, v[0]);
|
||||
break;
|
||||
case BCH_DISK_ACCOUNTING_dev_data_type: {
|
||||
guard(rcu)();
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.dev_data_type.dev);
|
||||
if (ca) {
|
||||
struct bch_dev_usage_type __percpu *d = &ca->usage->d[k.dev_data_type.data_type];
|
||||
percpu_u64_set(&d->buckets, v[0]);
|
||||
percpu_u64_set(&d->sectors, v[1]);
|
||||
percpu_u64_set(&d->fragmented, v[2]);
|
||||
|
||||
if (k.dev_data_type.data_type == BCH_DATA_sb ||
|
||||
k.dev_data_type.data_type == BCH_DATA_journal)
|
||||
usage->hidden += v[0] * ca->mi.bucket_size;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -483,16 +483,9 @@ bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_i
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
|
||||
int ret = bkey_err(k);
|
||||
if (unlikely(ret))
|
||||
goto err;
|
||||
return ERR_PTR(ret);
|
||||
|
||||
struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k);
|
||||
ret = PTR_ERR_OR_ZERO(a);
|
||||
if (unlikely(ret))
|
||||
goto err;
|
||||
return a;
|
||||
err:
|
||||
bch2_trans_iter_exit(iter);
|
||||
return ERR_PTR(ret);
|
||||
return bch2_alloc_to_v4_mut_inlined(trans, k);
|
||||
}
|
||||
|
||||
__flatten
|
||||
@ -1357,35 +1350,30 @@ again:
|
||||
return k;
|
||||
}
|
||||
|
||||
static void bch2_do_invalidates_work(struct work_struct *work)
|
||||
static void __bch2_do_invalidates(struct bch_dev *ca)
|
||||
{
|
||||
struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work);
|
||||
struct bch_fs *c = ca->fs;
|
||||
CLASS(btree_trans, trans)(c);
|
||||
int ret = 0;
|
||||
|
||||
struct wb_maybe_flush last_flushed __cleanup(wb_maybe_flush_exit);
|
||||
wb_maybe_flush_init(&last_flushed);
|
||||
|
||||
ret = bch2_btree_write_buffer_tryflush(trans);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch2_btree_write_buffer_tryflush(trans);
|
||||
|
||||
s64 nr_to_invalidate =
|
||||
should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
|
||||
struct btree_iter iter;
|
||||
bool wrapped = false;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_lru,
|
||||
lru_pos(ca->dev_idx, 0,
|
||||
((bch2_current_io_time(c, READ) + U32_MAX) &
|
||||
LRU_TIME_MAX)), 0);
|
||||
CLASS(btree_iter, iter)(trans, BTREE_ID_lru,
|
||||
lru_pos(ca->dev_idx, 0,
|
||||
((bch2_current_io_time(c, READ) + U32_MAX) &
|
||||
LRU_TIME_MAX)), 0);
|
||||
|
||||
while (true) {
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped);
|
||||
ret = bkey_err(k);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto restart_err;
|
||||
if (!k.k)
|
||||
@ -1401,8 +1389,15 @@ restart_err:
|
||||
wb_maybe_flush_inc(&last_flushed);
|
||||
bch2_btree_iter_advance(&iter);
|
||||
}
|
||||
bch2_trans_iter_exit(&iter);
|
||||
err:
|
||||
}
|
||||
|
||||
static void bch2_do_invalidates_work(struct work_struct *work)
|
||||
{
|
||||
struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work);
|
||||
struct bch_fs *c = ca->fs;
|
||||
|
||||
__bch2_do_invalidates(ca);
|
||||
|
||||
enumerated_ref_put(&ca->io_ref[WRITE], BCH_DEV_WRITE_REF_do_invalidates);
|
||||
enumerated_ref_put(&c->writes, BCH_WRITE_REF_invalidate);
|
||||
}
|
||||
|
||||
@ -343,7 +343,6 @@ static struct bkey_s_c __bch2_backpointer_get_key(struct btree_trans *trans,
|
||||
int ret = backpointer_target_not_found(trans, bp, k, last_flushed, commit);
|
||||
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
|
||||
} else {
|
||||
bch2_trans_iter_exit(iter);
|
||||
struct btree *b = __bch2_backpointer_get_node(trans, bp, iter, last_flushed, commit);
|
||||
if (b == ERR_PTR(-BCH_ERR_backpointer_to_overwritten_btree_node))
|
||||
return bkey_s_c_null;
|
||||
|
||||
@ -327,6 +327,41 @@ static void check_discard_freespace_key_work(struct work_struct *work)
|
||||
kfree(w);
|
||||
}
|
||||
|
||||
static int delete_discard_freespace_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
bool async_repair)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
|
||||
if (!async_repair) {
|
||||
try(bch2_btree_bit_mod_iter(trans, iter, false));
|
||||
try(bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc));
|
||||
return bch_err_throw(c, transaction_restart_commit);
|
||||
} else {
|
||||
/*
|
||||
* We can't repair here when called from the allocator path: the
|
||||
* commit will recurse back into the allocator
|
||||
*
|
||||
* Returning 1 indicates to the caller of
|
||||
* check_discard_freespace_key() - "don't allocate this bucket"
|
||||
*/
|
||||
struct check_discard_freespace_key_async *w = kzalloc(sizeof(*w), GFP_KERNEL);
|
||||
if (!w)
|
||||
return 1;
|
||||
|
||||
if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_check_discard_freespace_key)) {
|
||||
kfree(w);
|
||||
return 1;
|
||||
}
|
||||
|
||||
INIT_WORK(&w->work, check_discard_freespace_key_work);
|
||||
w->c = c;
|
||||
w->pos = BBPOS(iter->btree_id, iter->pos);
|
||||
queue_work(c->write_ref_wq, &w->work);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
int __bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_iter *iter, u8 *gen,
|
||||
enum bch_fsck_flags fsck_flags)
|
||||
{
|
||||
@ -344,18 +379,18 @@ int __bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_i
|
||||
bucket.offset &= ~(~0ULL << 56);
|
||||
u64 genbits = iter->pos.offset & (~0ULL << 56);
|
||||
|
||||
struct btree_iter alloc_iter;
|
||||
struct bkey_s_c alloc_k = bkey_try(bch2_bkey_get_iter(trans, &alloc_iter,
|
||||
BTREE_ID_alloc, bucket,
|
||||
async_repair ? BTREE_ITER_cached : 0));
|
||||
CLASS(btree_iter, alloc_iter)(trans, BTREE_ID_alloc, bucket,
|
||||
async_repair ? BTREE_ITER_cached : 0);
|
||||
struct bkey_s_c alloc_k = bkey_try(bch2_btree_iter_peek_slot(&alloc_iter));
|
||||
|
||||
if (!bch2_dev_bucket_exists(c, bucket)) {
|
||||
if (__fsck_err(trans, fsck_flags,
|
||||
need_discard_freespace_key_to_invalid_dev_bucket,
|
||||
"entry in %s btree for nonexistant dev:bucket %llu:%llu",
|
||||
bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset))
|
||||
goto delete;
|
||||
ret = 1;
|
||||
ret = delete_discard_freespace_key(trans, iter, async_repair);
|
||||
else
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -374,8 +409,9 @@ int __bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_i
|
||||
iter->pos.offset,
|
||||
a->data_type == state,
|
||||
genbits >> 56, alloc_freespace_genbits(*a) >> 56))
|
||||
goto delete;
|
||||
ret = 1;
|
||||
ret = delete_discard_freespace_key(trans, iter, async_repair);
|
||||
else
|
||||
ret = 1;
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -383,38 +419,7 @@ int __bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_i
|
||||
out:
|
||||
fsck_err:
|
||||
bch2_set_btree_iter_dontneed(&alloc_iter);
|
||||
bch2_trans_iter_exit(&alloc_iter);
|
||||
return ret;
|
||||
delete:
|
||||
if (!async_repair) {
|
||||
ret = bch2_btree_bit_mod_iter(trans, iter, false) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL,
|
||||
BCH_TRANS_COMMIT_no_enospc) ?:
|
||||
bch_err_throw(c, transaction_restart_commit);
|
||||
goto out;
|
||||
} else {
|
||||
/*
|
||||
* We can't repair here when called from the allocator path: the
|
||||
* commit will recurse back into the allocator
|
||||
*/
|
||||
struct check_discard_freespace_key_async *w =
|
||||
kzalloc(sizeof(*w), GFP_KERNEL);
|
||||
if (!w)
|
||||
goto out;
|
||||
|
||||
if (!enumerated_ref_tryget(&c->writes, BCH_WRITE_REF_check_discard_freespace_key)) {
|
||||
kfree(w);
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_WORK(&w->work, check_discard_freespace_key_work);
|
||||
w->c = c;
|
||||
w->pos = BBPOS(iter->btree_id, iter->pos);
|
||||
queue_work(c->write_ref_wq, &w->work);
|
||||
|
||||
ret = 1; /* don't allocate from this bucket */
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
static int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_iter *iter)
|
||||
@ -489,33 +494,24 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_check_alloc_info(struct bch_fs *c)
|
||||
static int check_btree_alloc(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
|
||||
struct bch_dev *ca = NULL;
|
||||
struct bkey hole;
|
||||
struct bkey_s_c k;
|
||||
struct progress_indicator_state progress;
|
||||
bch2_progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_alloc));
|
||||
|
||||
CLASS(btree_iter, iter)(trans, BTREE_ID_alloc, POS_MIN, BTREE_ITER_prefetch);
|
||||
CLASS(btree_iter, discard_iter)(trans, BTREE_ID_need_discard, POS_MIN, BTREE_ITER_prefetch);
|
||||
CLASS(btree_iter, freespace_iter)(trans, BTREE_ID_freespace, POS_MIN, BTREE_ITER_prefetch);
|
||||
CLASS(btree_iter, bucket_gens_iter)(trans, BTREE_ID_bucket_gens, POS_MIN, BTREE_ITER_prefetch);
|
||||
|
||||
struct bch_dev *ca __free(bch2_dev_put) = NULL;
|
||||
int ret = 0;
|
||||
|
||||
struct progress_indicator_state progress;
|
||||
bch2_progress_init(&progress, c, BIT_ULL(BTREE_ID_alloc));
|
||||
|
||||
CLASS(btree_trans, trans)(c);
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
|
||||
BTREE_ITER_prefetch);
|
||||
bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
|
||||
BTREE_ITER_prefetch);
|
||||
bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
|
||||
BTREE_ITER_prefetch);
|
||||
bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_prefetch);
|
||||
|
||||
while (1) {
|
||||
struct bpos next;
|
||||
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
|
||||
struct bkey hole;
|
||||
struct bkey_s_c k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto bkey_err;
|
||||
@ -527,6 +523,7 @@ int bch2_check_alloc_info(struct bch_fs *c)
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
struct bpos next;
|
||||
if (k.k->type) {
|
||||
next = bpos_nosnap_successor(k.k->p);
|
||||
|
||||
@ -566,55 +563,58 @@ bkey_err:
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
bch2_trans_iter_exit(&bucket_gens_iter);
|
||||
bch2_trans_iter_exit(&freespace_iter);
|
||||
bch2_trans_iter_exit(&discard_iter);
|
||||
bch2_trans_iter_exit(&iter);
|
||||
bch2_dev_put(ca);
|
||||
ca = NULL;
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return min(0, ret);
|
||||
}
|
||||
|
||||
int bch2_check_alloc_info(struct bch_fs *c)
|
||||
{
|
||||
CLASS(btree_trans, trans)(c);
|
||||
|
||||
try(check_btree_alloc(trans));
|
||||
|
||||
try(for_each_btree_key(trans, iter,
|
||||
BTREE_ID_need_discard, POS_MIN,
|
||||
BTREE_ITER_prefetch, k,
|
||||
bch2_check_discard_freespace_key(trans, &iter)));
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
|
||||
BTREE_ITER_prefetch);
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
if (!k.k)
|
||||
break;
|
||||
{
|
||||
/*
|
||||
* Check freespace btree: we're iterating over every individual
|
||||
* pos of the freespace keys
|
||||
*/
|
||||
CLASS(btree_iter, iter)(trans, BTREE_ID_freespace, POS_MIN,
|
||||
BTREE_ITER_prefetch);
|
||||
while (1) {
|
||||
bch2_trans_begin(trans);
|
||||
struct bkey_s_c k = bch2_btree_iter_peek(&iter);
|
||||
if (!k.k)
|
||||
break;
|
||||
|
||||
ret = bkey_err(k) ?:
|
||||
bch2_check_discard_freespace_key(trans, &iter);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
if (ret) {
|
||||
CLASS(printbuf, buf)();
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
bch_err(c, "while checking %s", buf.buf);
|
||||
break;
|
||||
}
|
||||
int ret = bkey_err(k) ?:
|
||||
bch2_check_discard_freespace_key(trans, &iter);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
|
||||
ret = 0;
|
||||
continue;
|
||||
}
|
||||
if (ret) {
|
||||
CLASS(printbuf, buf)();
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
bch_err(c, "while checking %s", buf.buf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
|
||||
bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
|
||||
}
|
||||
}
|
||||
bch2_trans_iter_exit(&iter);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = for_each_btree_key_commit(trans, iter,
|
||||
try(for_each_btree_key_commit(trans, iter,
|
||||
BTREE_ID_bucket_gens, POS_MIN,
|
||||
BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
bch2_check_bucket_gens_key(trans, &iter, k));
|
||||
bch2_check_bucket_gens_key(trans, &iter, k)));
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
|
||||
@ -1800,7 +1800,6 @@ static int bch2_btree_insert_node(struct btree_update *as, struct btree_trans *t
|
||||
goto split;
|
||||
}
|
||||
|
||||
|
||||
ret = bch2_btree_node_check_topology(trans, b) ?:
|
||||
bch2_btree_insert_keys_interior(as, trans, path, b,
|
||||
path->l[b->c.level].iter, keys);
|
||||
|
||||
@ -3179,13 +3179,13 @@ void bch2_trans_iter_init_outlined(struct btree_trans *trans,
|
||||
ip);
|
||||
}
|
||||
|
||||
void bch2_trans_node_iter_init(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
enum btree_id btree_id,
|
||||
struct bpos pos,
|
||||
unsigned locks_want,
|
||||
unsigned depth,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
void __bch2_trans_node_iter_init(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
enum btree_id btree_id,
|
||||
struct bpos pos,
|
||||
unsigned locks_want,
|
||||
unsigned depth,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
flags |= BTREE_ITER_not_extents;
|
||||
flags |= BTREE_ITER_snapshot_field;
|
||||
|
||||
@ -541,7 +541,7 @@ void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
|
||||
enum btree_iter_update_trigger_flags,
|
||||
unsigned long ip);
|
||||
|
||||
static inline void bch2_trans_iter_init(struct btree_trans *trans,
|
||||
static inline void __bch2_trans_iter_init(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
enum btree_id btree, struct bpos pos,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
@ -555,10 +555,19 @@ static inline void bch2_trans_iter_init(struct btree_trans *trans,
|
||||
bch2_trans_iter_init_outlined(trans, iter, btree, pos, flags, _RET_IP_);
|
||||
}
|
||||
|
||||
static inline void bch2_trans_iter_init(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
enum btree_id btree, struct bpos pos,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
bch2_trans_iter_exit(iter);
|
||||
__bch2_trans_iter_init(trans, iter, btree, pos, flags);
|
||||
}
|
||||
|
||||
#define bch2_trans_iter_class_init(_trans, _btree, _pos, _flags) \
|
||||
({ \
|
||||
struct btree_iter iter; \
|
||||
bch2_trans_iter_init(_trans, &iter, (_btree), (_pos), (_flags)); \
|
||||
__bch2_trans_iter_init(_trans, &iter, (_btree), (_pos), (_flags)); \
|
||||
iter; \
|
||||
})
|
||||
|
||||
@ -594,16 +603,28 @@ DEFINE_CLASS(btree_iter_copy, struct btree_iter,
|
||||
bch2_trans_iter_copy_class_init(src),
|
||||
struct btree_iter *src)
|
||||
|
||||
void bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
|
||||
enum btree_id, struct bpos,
|
||||
unsigned, unsigned,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
void __bch2_trans_node_iter_init(struct btree_trans *, struct btree_iter *,
|
||||
enum btree_id, struct bpos,
|
||||
unsigned, unsigned,
|
||||
enum btree_iter_update_trigger_flags);
|
||||
|
||||
static inline void bch2_trans_node_iter_init(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
enum btree_id btree,
|
||||
struct bpos pos,
|
||||
unsigned locks_want,
|
||||
unsigned depth,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
bch2_trans_iter_exit(iter);
|
||||
__bch2_trans_node_iter_init(trans, iter, btree, pos, locks_want, depth, flags);
|
||||
}
|
||||
|
||||
#define bch2_trans_node_iter_class_init(_trans, _btree, _pos, _locks_want, _depth, _flags)\
|
||||
({ \
|
||||
struct btree_iter iter; \
|
||||
bch2_trans_node_iter_init(_trans, &iter, (_btree), (_pos), \
|
||||
(_locks_want), (_depth), (_flags)); \
|
||||
__bch2_trans_node_iter_init(_trans, &iter, (_btree), (_pos), \
|
||||
(_locks_want), (_depth), (_flags)); \
|
||||
iter; \
|
||||
})
|
||||
|
||||
@ -684,30 +705,6 @@ static __always_inline void *bch2_trans_kmalloc_nomemzero(struct btree_trans *tr
|
||||
return bch2_trans_kmalloc_nomemzero_ip(trans, size, _THIS_IP_);
|
||||
}
|
||||
|
||||
static inline struct bkey_s_c __bch2_bkey_get_iter(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
enum btree_id btree, struct bpos pos,
|
||||
enum btree_iter_update_trigger_flags flags,
|
||||
enum bch_bkey_type type)
|
||||
{
|
||||
bch2_trans_iter_init(trans, iter, btree, pos, flags);
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
|
||||
|
||||
if (!bkey_err(k) && type && k.k->type != type)
|
||||
k = bkey_s_c_err(bch_err_throw(trans->c, ENOENT_bkey_type_mismatch));
|
||||
if (unlikely(bkey_err(k)))
|
||||
bch2_trans_iter_exit(iter);
|
||||
return k;
|
||||
}
|
||||
|
||||
static inline struct bkey_s_c bch2_bkey_get_iter(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
enum btree_id btree, struct bpos pos,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
return __bch2_bkey_get_iter(trans, iter, btree, pos, flags, 0);
|
||||
}
|
||||
|
||||
static inline struct bkey_s_c __bch2_bkey_get_typed(struct btree_iter *iter,
|
||||
enum bch_bkey_type type)
|
||||
{
|
||||
@ -762,9 +759,8 @@ u32 bch2_trans_begin(struct btree_trans *);
|
||||
({ \
|
||||
bch2_trans_begin((_trans)); \
|
||||
\
|
||||
struct btree_iter _iter; \
|
||||
bch2_trans_node_iter_init((_trans), &_iter, (_btree_id), \
|
||||
_start, _locks_want, _depth, _flags); \
|
||||
CLASS(btree_node_iter, _iter)((_trans), (_btree_id), _start, \
|
||||
_locks_want, _depth, _flags); \
|
||||
int _ret3 = 0; \
|
||||
do { \
|
||||
_ret3 = lockrestart_do((_trans), ({ \
|
||||
@ -778,7 +774,6 @@ u32 bch2_trans_begin(struct btree_trans *);
|
||||
PTR_ERR_OR_ZERO(bch2_btree_iter_next_node(&_iter))); \
|
||||
} while (!_ret3); \
|
||||
\
|
||||
bch2_trans_iter_exit(&(_iter)); \
|
||||
_ret3; \
|
||||
})
|
||||
|
||||
|
||||
@ -555,31 +555,23 @@ void *__bch2_trans_subbuf_alloc(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
int bch2_bkey_get_empty_slot(struct btree_trans *trans, struct btree_iter *iter,
|
||||
enum btree_id btree, struct bpos end)
|
||||
enum btree_id btree, struct bpos start, struct bpos end)
|
||||
{
|
||||
bch2_trans_iter_init(trans, iter, btree, end, BTREE_ITER_intent);
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_prev(iter);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
struct bkey_s_c k = bkey_try(bch2_btree_iter_peek_prev(iter));
|
||||
|
||||
bch2_btree_iter_advance(iter);
|
||||
k = bch2_btree_iter_peek_slot(iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
if (bpos_lt(iter->pos, start))
|
||||
bch2_btree_iter_set_pos(iter, start);
|
||||
else
|
||||
bch2_btree_iter_advance(iter);
|
||||
|
||||
k = bkey_try(bch2_btree_iter_peek_slot(iter));
|
||||
BUG_ON(k.k->type != KEY_TYPE_deleted);
|
||||
|
||||
if (bkey_gt(k.k->p, end)) {
|
||||
ret = bch_err_throw(trans->c, ENOSPC_btree_slot);
|
||||
goto err;
|
||||
}
|
||||
if (bkey_gt(k.k->p, end))
|
||||
return bch_err_throw(trans->c, ENOSPC_btree_slot);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
bch2_trans_iter_exit(iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_trans_commit_hook(struct btree_trans *trans,
|
||||
|
||||
@ -131,7 +131,7 @@ int bch2_trans_update_extent_overwrite(struct btree_trans *, struct btree_iter *
|
||||
struct bkey_s_c, struct bkey_s_c);
|
||||
|
||||
int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
|
||||
enum btree_id, struct bpos);
|
||||
enum btree_id, struct bpos, struct bpos);
|
||||
|
||||
int __must_check bch2_trans_update_ip(struct btree_trans *, struct btree_iter *,
|
||||
struct bkey_i *, enum btree_iter_update_trigger_flags,
|
||||
|
||||
@ -393,11 +393,9 @@ static int bch2_btree_write_buffer_flush_locked(struct btree_trans *trans)
|
||||
}
|
||||
}
|
||||
|
||||
if (!iter.path || iter.btree_id != k->btree) {
|
||||
bch2_trans_iter_exit(&iter);
|
||||
if (!iter.path || iter.btree_id != k->btree)
|
||||
bch2_trans_iter_init(trans, &iter, k->btree, k->k.k.p,
|
||||
BTREE_ITER_intent|BTREE_ITER_all_snapshots);
|
||||
}
|
||||
|
||||
bch2_btree_iter_set_pos(&iter, k->k.k.p);
|
||||
btree_iter_path(trans, &iter)->preserve = false;
|
||||
|
||||
@ -352,7 +352,6 @@ static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
|
||||
u64 *i_sectors_delta)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_i_logged_op_finsert *op = bkey_i_to_logged_op_finsert(op_k);
|
||||
subvol_inum inum = { le32_to_cpu(op->v.subvol), le64_to_cpu(op->v.inum) };
|
||||
u64 dst_offset = le64_to_cpu(op->v.dst_offset);
|
||||
@ -371,9 +370,7 @@ static int __bch2_resume_logged_op_finsert(struct btree_trans *trans,
|
||||
*/
|
||||
try(lockrestart_do(trans, __bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot, warn_errors)));
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
|
||||
POS(inum.inum, 0),
|
||||
BTREE_ITER_intent);
|
||||
CLASS(btree_iter, iter)(trans, BTREE_ID_extents, POS(inum.inum, 0), BTREE_ITER_intent);
|
||||
|
||||
switch (op->v.state) {
|
||||
case LOGGED_OP_FINSERT_start:
|
||||
@ -484,7 +481,6 @@ case LOGGED_OP_FINSERT_finish:
|
||||
break;
|
||||
}
|
||||
err:
|
||||
bch2_trans_iter_exit(&iter);
|
||||
if (warn_errors)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
|
||||
@ -177,6 +177,7 @@ static int bch2_dev_metadata_drop(struct bch_fs *c,
|
||||
bch2_btree_iter_next_node(&iter);
|
||||
}
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
bch2_btree_interior_updates_flush(c);
|
||||
|
||||
BUG_ON(bch2_err_matches(ret, BCH_ERR_transaction_restart));
|
||||
|
||||
@ -413,11 +413,11 @@ int bch2_move_data_btree(struct moving_context *ctxt,
|
||||
{
|
||||
struct btree_trans *trans = ctxt->trans;
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
CLASS(per_snapshot_io_opts, snapshot_io_opts)(c);
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
|
||||
if (ctxt->stats) {
|
||||
ctxt->stats->data_type = BCH_DATA_user;
|
||||
@ -437,24 +437,19 @@ retry_root:
|
||||
if (ret)
|
||||
goto root_err;
|
||||
|
||||
if (b != btree_node_root(c, b)) {
|
||||
bch2_trans_iter_exit(&iter);
|
||||
if (b != btree_node_root(c, b))
|
||||
goto retry_root;
|
||||
}
|
||||
|
||||
k = bkey_i_to_s_c(&b->key);
|
||||
ret = bch2_move_extent(ctxt, NULL, &snapshot_io_opts, pred, arg, &iter, level, k);
|
||||
root_err:
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
|
||||
bch2_trans_iter_exit(&iter);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
goto retry_root;
|
||||
}
|
||||
if (bch2_err_matches(ret, BCH_ERR_data_update_fail))
|
||||
ret = 0; /* failure for this extent, keep going */
|
||||
if (bch2_err_matches(ret, EROFS))
|
||||
goto out;
|
||||
WARN_ONCE(ret, "unhandled error from move_extent: %s", bch2_err_str(ret));
|
||||
goto out;
|
||||
WARN_ONCE(ret && !bch2_err_matches(ret, EROFS),
|
||||
"unhandled error from move_extent: %s", bch2_err_str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
bch2_trans_node_iter_init(trans, &iter, btree_id, start, 0, level,
|
||||
@ -499,8 +494,6 @@ next_nondata:
|
||||
if (!bch2_btree_iter_advance(&iter))
|
||||
break;
|
||||
}
|
||||
out:
|
||||
bch2_trans_iter_exit(&iter);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -736,7 +729,6 @@ static int bch2_move_btree(struct bch_fs *c,
|
||||
struct bch_move_stats *stats)
|
||||
{
|
||||
bool kthread = (current->flags & PF_KTHREAD) != 0;
|
||||
struct btree_iter iter;
|
||||
struct btree *b;
|
||||
enum btree_id btree;
|
||||
int ret = 0;
|
||||
@ -748,6 +740,8 @@ static int bch2_move_btree(struct bch_fs *c,
|
||||
bch2_moving_ctxt_init(&ctxt, c, NULL, stats, writepoint_ptr(&c->btree_write_point), true);
|
||||
struct btree_trans *trans = ctxt.trans;
|
||||
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
|
||||
stats->data_type = BCH_DATA_btree;
|
||||
|
||||
for (btree = start.btree;
|
||||
@ -779,8 +773,6 @@ retry:
|
||||
goto next;
|
||||
|
||||
ret = bch2_btree_node_rewrite(trans, &iter, b, 0, 0) ?: ret;
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
continue;
|
||||
if (ret)
|
||||
break;
|
||||
next:
|
||||
@ -789,12 +781,11 @@ next:
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
goto retry;
|
||||
|
||||
bch2_trans_iter_exit(&iter);
|
||||
|
||||
if (kthread && kthread_should_stop())
|
||||
break;
|
||||
}
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
bch2_btree_interior_updates_flush(c);
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
|
||||
@ -534,12 +534,10 @@ static void bch2_rbio_done(struct bch_read_bio *rbio)
|
||||
|
||||
static int get_rbio_extent(struct btree_trans *trans, struct bch_read_bio *rbio, struct bkey_buf *sk)
|
||||
{
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
CLASS(btree_iter, iter)(trans, rbio->data_btree, rbio->data_pos, 0);
|
||||
struct bkey_s_c k;
|
||||
|
||||
try(lockrestart_do(trans,
|
||||
bkey_err(k = bch2_bkey_get_iter(trans, &iter,
|
||||
rbio->data_btree, rbio->data_pos, 0))));
|
||||
try(lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))));
|
||||
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
@ -603,13 +601,10 @@ static noinline int bch2_read_retry_nodecode(struct btree_trans *trans,
|
||||
do {
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
CLASS(btree_iter, iter)(trans, u->btree_id, bkey_start_pos(&u->k.k->k), 0);
|
||||
struct bkey_s_c k;
|
||||
|
||||
try(lockrestart_do(trans,
|
||||
bkey_err(k = bch2_bkey_get_iter(trans, &iter,
|
||||
u->btree_id, bkey_start_pos(&u->k.k->k),
|
||||
0))));
|
||||
try(lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter))));
|
||||
|
||||
if (!bkey_and_val_eq(k, bkey_i_to_s_c(u->k.k))) {
|
||||
/* extent we wanted to read no longer exists: */
|
||||
|
||||
@ -920,7 +920,6 @@ static int check_rebalance_work_one(struct btree_trans *trans,
|
||||
extent_iter->btree_id == BTREE_ID_reflink &&
|
||||
(!rebalance_k.k ||
|
||||
rebalance_k.k->p.inode >= BCACHEFS_ROOT_INO)) {
|
||||
bch2_trans_iter_exit(extent_iter);
|
||||
bch2_trans_iter_init(trans, extent_iter,
|
||||
BTREE_ID_extents, POS_MIN,
|
||||
BTREE_ITER_prefetch|
|
||||
|
||||
@ -499,7 +499,6 @@ int bch2_update_unwritten_extent(struct btree_trans *trans,
|
||||
struct bkey_i_extent *e;
|
||||
struct write_point *wp;
|
||||
struct closure cl;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
@ -512,16 +511,13 @@ int bch2_update_unwritten_extent(struct btree_trans *trans,
|
||||
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, update->btree_id, update->op.pos,
|
||||
BTREE_ITER_slots);
|
||||
ret = lockrestart_do(trans, ({
|
||||
k = bch2_btree_iter_peek_slot(&iter);
|
||||
bkey_err(k);
|
||||
}));
|
||||
bch2_trans_iter_exit(&iter);
|
||||
|
||||
if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
|
||||
break;
|
||||
{
|
||||
CLASS(btree_iter, iter)(trans, update->btree_id, update->op.pos,
|
||||
BTREE_ITER_slots);
|
||||
ret = lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek_slot(&iter)));
|
||||
if (ret || !bch2_extents_match(k, bkey_i_to_s_c(update->k.k)))
|
||||
break;
|
||||
}
|
||||
|
||||
e = bkey_extent_init(update->op.insert_keys.top);
|
||||
e->k.p = update->op.pos;
|
||||
|
||||
@ -1020,6 +1020,8 @@ static int bch2_write_extent(struct bch_write_op *op, struct write_point *wp,
|
||||
struct bversion version = op->version;
|
||||
size_t dst_len = 0, src_len = 0;
|
||||
|
||||
BUG_ON(src->bi_iter.bi_size & (block_bytes(c) - 1));
|
||||
|
||||
if (page_alloc_failed &&
|
||||
dst->bi_iter.bi_size < (wp->sectors_free << 9) &&
|
||||
dst->bi_iter.bi_size < c->opts.encoded_extent_max)
|
||||
@ -1314,7 +1316,7 @@ static void bch2_nocow_write(struct bch_write_op *op)
|
||||
{
|
||||
struct bch_fs *c = op->c;
|
||||
struct btree_trans *trans;
|
||||
struct btree_iter iter;
|
||||
struct btree_iter iter = {};
|
||||
struct bkey_s_c k;
|
||||
struct bkey_ptrs_c ptrs;
|
||||
u32 snapshot;
|
||||
@ -1676,6 +1678,7 @@ CLOSURE_CALLBACK(bch2_write)
|
||||
if (unlikely(bio->bi_iter.bi_size & (c->opts.block_size - 1))) {
|
||||
bch2_write_op_error(op, op->pos.offset, "misaligned write");
|
||||
op->error = bch_err_throw(c, data_write_misaligned);
|
||||
__WARN();
|
||||
goto err;
|
||||
}
|
||||
|
||||
|
||||
@ -175,7 +175,7 @@
|
||||
x(BCH_ERR_btree_insert_fail, btree_insert_need_journal_reclaim) \
|
||||
x(0, backpointer_to_overwritten_btree_node) \
|
||||
x(0, journal_reclaim_would_deadlock) \
|
||||
x(EINVAL, fsck) \
|
||||
x(EROFS, fsck) \
|
||||
x(BCH_ERR_fsck, fsck_ask) \
|
||||
x(BCH_ERR_fsck, fsck_fix) \
|
||||
x(BCH_ERR_fsck, fsck_delete_bkey) \
|
||||
|
||||
@ -273,36 +273,27 @@ struct posix_acl *bch2_get_acl(struct inode *vinode, int type, bool rcu)
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct bch_hash_info hash = bch2_hash_info_init(c, &inode->ei_inode);
|
||||
struct xattr_search_key search = X_SEARCH(acl_to_xattr_type(type), "", 0);
|
||||
struct posix_acl *acl = NULL;
|
||||
|
||||
if (rcu)
|
||||
return ERR_PTR(-ECHILD);
|
||||
|
||||
CLASS(btree_trans, trans)(c);
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
retry:
|
||||
bch2_trans_begin(trans);
|
||||
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
|
||||
&hash, inode_inum(inode), &search, 0);
|
||||
int ret = bkey_err(k);
|
||||
struct bkey_s_c k;
|
||||
int ret = lockrestart_do(trans,
|
||||
bkey_err(k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
|
||||
&hash, inode_inum(inode), &search, 0)));
|
||||
if (ret)
|
||||
goto err;
|
||||
return bch2_err_matches(ret, ENOENT) ? NULL : ERR_PTR(ret);
|
||||
|
||||
struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
|
||||
acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
|
||||
le16_to_cpu(xattr.v->x_val_len));
|
||||
struct posix_acl *acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
|
||||
le16_to_cpu(xattr.v->x_val_len));
|
||||
ret = PTR_ERR_OR_ZERO(acl);
|
||||
err:
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
goto retry;
|
||||
|
||||
if (ret)
|
||||
acl = !bch2_err_matches(ret, ENOENT) ? ERR_PTR(ret) : NULL;
|
||||
|
||||
if (!IS_ERR_OR_NULL(acl))
|
||||
set_cached_acl(&inode->v, type, acl);
|
||||
return ERR_PTR(ret);
|
||||
|
||||
set_cached_acl(&inode->v, type, acl);
|
||||
return acl;
|
||||
}
|
||||
|
||||
@ -336,60 +327,45 @@ int bch2_set_acl_trans(struct btree_trans *trans, subvol_inum inum,
|
||||
return bch2_err_matches(ret, ENOENT) ? 0 : ret;
|
||||
}
|
||||
|
||||
static int __bch2_set_acl(struct btree_trans *trans,
|
||||
struct mnt_idmap *idmap,
|
||||
struct bch_inode_info *inode,
|
||||
struct posix_acl *acl, int type)
|
||||
{
|
||||
try(bch2_subvol_is_ro_trans(trans, inode->ei_inum.subvol));
|
||||
|
||||
CLASS(btree_iter_uninit, inode_iter)(trans);
|
||||
struct bch_inode_unpacked inode_u;
|
||||
try(bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode), BTREE_ITER_intent));
|
||||
|
||||
umode_t mode = inode_u.bi_mode;
|
||||
|
||||
if (type == ACL_TYPE_ACCESS)
|
||||
try(posix_acl_update_mode(idmap, &inode->v, &mode, &acl));
|
||||
|
||||
try(bch2_set_acl_trans(trans, inode_inum(inode), &inode_u, acl, type));
|
||||
|
||||
inode_u.bi_ctime = bch2_current_time(trans->c);
|
||||
inode_u.bi_mode = mode;
|
||||
|
||||
try(bch2_inode_write(trans, &inode_iter, &inode_u));
|
||||
try(bch2_trans_commit(trans, NULL, NULL, 0));
|
||||
|
||||
bch2_inode_update_after_write(trans, inode, &inode_u, ATTR_CTIME|ATTR_MODE);
|
||||
set_cached_acl(&inode->v, type, acl);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_set_acl(struct mnt_idmap *idmap,
|
||||
struct dentry *dentry,
|
||||
struct posix_acl *_acl, int type)
|
||||
struct posix_acl *acl, int type)
|
||||
{
|
||||
struct bch_inode_info *inode = to_bch_ei(dentry->d_inode);
|
||||
struct bch_fs *c = inode->v.i_sb->s_fs_info;
|
||||
struct btree_iter inode_iter = { NULL };
|
||||
struct bch_inode_unpacked inode_u;
|
||||
struct posix_acl *acl;
|
||||
umode_t mode;
|
||||
int ret;
|
||||
|
||||
guard(mutex)(&inode->ei_update_lock);
|
||||
CLASS(btree_trans, trans)(c);
|
||||
retry:
|
||||
bch2_trans_begin(trans);
|
||||
acl = _acl;
|
||||
|
||||
ret = bch2_subvol_is_ro_trans(trans, inode->ei_inum.subvol) ?:
|
||||
bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
goto btree_err;
|
||||
|
||||
mode = inode_u.bi_mode;
|
||||
|
||||
if (type == ACL_TYPE_ACCESS) {
|
||||
ret = posix_acl_update_mode(idmap, &inode->v, &mode, &acl);
|
||||
if (ret)
|
||||
goto btree_err;
|
||||
}
|
||||
|
||||
ret = bch2_set_acl_trans(trans, inode_inum(inode), &inode_u, acl, type);
|
||||
if (ret)
|
||||
goto btree_err;
|
||||
|
||||
inode_u.bi_ctime = bch2_current_time(c);
|
||||
inode_u.bi_mode = mode;
|
||||
|
||||
ret = bch2_inode_write(trans, &inode_iter, &inode_u) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, 0);
|
||||
btree_err:
|
||||
bch2_trans_iter_exit(&inode_iter);
|
||||
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
goto retry;
|
||||
if (unlikely(ret))
|
||||
return ret;
|
||||
|
||||
bch2_inode_update_after_write(trans, inode, &inode_u,
|
||||
ATTR_CTIME|ATTR_MODE);
|
||||
|
||||
set_cached_acl(&inode->v, type, acl);
|
||||
return 0;
|
||||
return lockrestart_do(trans, __bch2_set_acl(trans, idmap, inode, acl, type));
|
||||
}
|
||||
|
||||
int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
|
||||
@ -399,9 +375,8 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
|
||||
{
|
||||
struct bch_hash_info hash_info = bch2_hash_info_init(trans->c, inode);
|
||||
struct xattr_search_key search = X_SEARCH(KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS, "", 0);
|
||||
struct btree_iter iter;
|
||||
struct posix_acl *acl = NULL;
|
||||
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, &iter, bch2_xattr_hash_desc,
|
||||
&hash_info, inum, &search, BTREE_ITER_intent);
|
||||
int ret = bkey_err(k);
|
||||
@ -410,30 +385,19 @@ int bch2_acl_chmod(struct btree_trans *trans, subvol_inum inum,
|
||||
|
||||
struct bkey_s_c_xattr xattr = bkey_s_c_to_xattr(k);
|
||||
|
||||
acl = bch2_acl_from_disk(trans, xattr_val(xattr.v),
|
||||
le16_to_cpu(xattr.v->x_val_len));
|
||||
ret = PTR_ERR_OR_ZERO(acl);
|
||||
if (ret)
|
||||
goto err;
|
||||
struct posix_acl *acl __free(kfree) =
|
||||
errptr_try(bch2_acl_from_disk(trans, xattr_val(xattr.v),
|
||||
le16_to_cpu(xattr.v->x_val_len)));
|
||||
|
||||
ret = allocate_dropping_locks_errcode(trans, __posix_acl_chmod(&acl, _gfp, mode));
|
||||
if (ret)
|
||||
goto err;
|
||||
try(allocate_dropping_locks_errcode(trans, __posix_acl_chmod(&acl, _gfp, mode)));
|
||||
|
||||
struct bkey_i_xattr *new = bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS);
|
||||
ret = PTR_ERR_OR_ZERO(new);
|
||||
if (ret)
|
||||
goto err;
|
||||
struct bkey_i_xattr *new = errptr_try(bch2_acl_to_xattr(trans, acl, ACL_TYPE_ACCESS));
|
||||
|
||||
new->k.p = iter.pos;
|
||||
ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
|
||||
*new_acl = acl;
|
||||
acl = NULL;
|
||||
err:
|
||||
bch2_trans_iter_exit(&iter);
|
||||
if (!IS_ERR_OR_NULL(acl))
|
||||
kfree(acl);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif /* NO_BCACHEFS_FS */
|
||||
|
||||
@ -545,19 +545,11 @@ int bch2_dirent_lookup_trans(struct btree_trans *trans,
|
||||
struct qstr lookup_name;
|
||||
try(bch2_maybe_casefold(trans, hash_info, name, &lookup_name));
|
||||
|
||||
struct bkey_s_c k = bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
|
||||
hash_info, dir, &lookup_name, flags);
|
||||
int ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
struct bkey_s_c k = bkey_try(bch2_hash_lookup(trans, iter, bch2_dirent_hash_desc,
|
||||
hash_info, dir, &lookup_name, flags));
|
||||
|
||||
ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), inum);
|
||||
if (ret > 0)
|
||||
ret = -ENOENT;
|
||||
err:
|
||||
if (ret)
|
||||
bch2_trans_iter_exit(iter);
|
||||
return ret;
|
||||
int ret = bch2_dirent_read_target(trans, dir, bkey_s_c_to_dirent(k), inum);
|
||||
return ret > 0 ? -ENOENT : 0;
|
||||
}
|
||||
|
||||
u64 bch2_dirent_lookup(struct bch_fs *c, subvol_inum dir,
|
||||
@ -583,8 +575,7 @@ int bch2_empty_dir_snapshot(struct btree_trans *trans, u64 dir, u32 subvol, u32
|
||||
struct bkey_s_c_dirent d = bkey_s_c_to_dirent(k);
|
||||
if (d.v->d_type == DT_SUBVOL && le32_to_cpu(d.v->d_parent_subvol) != subvol)
|
||||
continue;
|
||||
ret = bch_err_throw(trans->c, ENOTEMPTY_dir_not_empty);
|
||||
break;
|
||||
return bch_err_throw(trans->c, ENOTEMPTY_dir_not_empty);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@ -62,10 +62,7 @@ static inline struct bkey_s_c_dirent dirent_get_by_pos(struct btree_trans *trans
|
||||
struct bpos pos)
|
||||
{
|
||||
bch2_trans_iter_init(trans, iter, BTREE_ID_dirents, pos, 0);
|
||||
struct bkey_s_c_dirent d = bch2_bkey_get_typed(iter, dirent);
|
||||
if (bkey_err(d.s_c))
|
||||
bch2_trans_iter_exit(iter);
|
||||
return d;
|
||||
return bch2_bkey_get_typed(iter, dirent);
|
||||
}
|
||||
|
||||
int bch2_dirent_read_target(struct btree_trans *, subvol_inum,
|
||||
|
||||
@ -368,7 +368,6 @@ int __bch2_inode_peek(struct btree_trans *trans,
|
||||
err:
|
||||
if (warn)
|
||||
bch_err_msg(trans->c, ret, "looking up inum %llu:%llu:", inum.subvol, inum.inum);
|
||||
bch2_trans_iter_exit(iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -675,7 +674,6 @@ bch2_bkey_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter
|
||||
if (bch2_snapshot_is_ancestor(c, pos.snapshot, k.k->p.snapshot))
|
||||
return k;
|
||||
|
||||
bch2_trans_iter_exit(iter);
|
||||
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
|
||||
}
|
||||
|
||||
@ -683,17 +681,16 @@ static struct bkey_s_c
|
||||
bch2_inode_get_iter_snapshot_parent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bpos pos, unsigned flags)
|
||||
{
|
||||
struct bkey_s_c k;
|
||||
again:
|
||||
k = bch2_bkey_get_iter_snapshot_parent(trans, iter, BTREE_ID_inodes, pos, flags);
|
||||
if (!k.k ||
|
||||
bkey_err(k) ||
|
||||
bkey_is_inode(k.k))
|
||||
return k;
|
||||
while (1) {
|
||||
struct bkey_s_c k =
|
||||
bch2_bkey_get_iter_snapshot_parent(trans, iter, BTREE_ID_inodes, pos, flags);
|
||||
if (!k.k ||
|
||||
bkey_err(k) ||
|
||||
bkey_is_inode(k.k))
|
||||
return k;
|
||||
|
||||
bch2_trans_iter_exit(iter);
|
||||
pos = k.k->p;
|
||||
goto again;
|
||||
pos = k.k->p;
|
||||
}
|
||||
}
|
||||
|
||||
int __bch2_inode_has_child_snapshots(struct btree_trans *trans, struct bpos pos)
|
||||
@ -969,58 +966,47 @@ int bch2_inode_create(struct btree_trans *trans,
|
||||
bch2_trans_iter_init(trans, iter, BTREE_ID_inodes, POS(0, pos),
|
||||
BTREE_ITER_all_snapshots|
|
||||
BTREE_ITER_intent);
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
again:
|
||||
while ((k = bch2_btree_iter_peek(iter)).k &&
|
||||
!(ret = bkey_err(k)) &&
|
||||
bkey_lt(k.k->p, POS(0, max))) {
|
||||
if (pos < iter->pos.offset)
|
||||
goto found_slot;
|
||||
while (1) {
|
||||
struct bkey_s_c k;
|
||||
while ((k = bkey_try(bch2_btree_iter_peek(iter))).k &&
|
||||
bkey_lt(k.k->p, POS(0, max))) {
|
||||
|
||||
if (bch2_snapshot_is_ancestor(trans->c, snapshot, k.k->p.snapshot) &&
|
||||
k.k->type == KEY_TYPE_inode_generation) {
|
||||
gen = le32_to_cpu(bkey_s_c_to_inode_generation(k).v->bi_generation);
|
||||
goto found_slot;
|
||||
if (pos < iter->pos.offset)
|
||||
break;
|
||||
|
||||
if (bch2_snapshot_is_ancestor(trans->c, snapshot, k.k->p.snapshot) &&
|
||||
k.k->type == KEY_TYPE_inode_generation) {
|
||||
pos = k.k->p.offset;
|
||||
gen = le32_to_cpu(bkey_s_c_to_inode_generation(k).v->bi_generation);
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't need to iterate over keys in every snapshot once
|
||||
* we've found just one:
|
||||
*/
|
||||
pos = iter->pos.offset + 1;
|
||||
bch2_btree_iter_set_pos(iter, POS(0, pos));
|
||||
}
|
||||
|
||||
/*
|
||||
* We don't need to iterate over keys in every snapshot once
|
||||
* we've found just one:
|
||||
*/
|
||||
pos = iter->pos.offset + 1;
|
||||
if (likely(pos < max)) {
|
||||
bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
|
||||
k = bkey_try(bch2_btree_iter_peek_slot(iter));
|
||||
|
||||
inode_u->bi_inum = k.k->p.offset;
|
||||
inode_u->bi_generation = max(gen, le64_to_cpu(cursor->v.gen));
|
||||
cursor->v.idx = cpu_to_le64(k.k->p.offset + 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (start == min)
|
||||
return bch_err_throw(trans->c, ENOSPC_inode_create);
|
||||
|
||||
/* Retry from start */
|
||||
pos = start = min;
|
||||
bch2_btree_iter_set_pos(iter, POS(0, pos));
|
||||
le32_add_cpu(&cursor->v.gen, 1);
|
||||
}
|
||||
|
||||
if (!ret && pos < max)
|
||||
goto found_slot;
|
||||
|
||||
if (!ret && start == min)
|
||||
ret = bch_err_throw(trans->c, ENOSPC_inode_create);
|
||||
|
||||
if (ret) {
|
||||
bch2_trans_iter_exit(iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Retry from start */
|
||||
pos = start = min;
|
||||
bch2_btree_iter_set_pos(iter, POS(0, pos));
|
||||
le32_add_cpu(&cursor->v.gen, 1);
|
||||
goto again;
|
||||
found_slot:
|
||||
bch2_btree_iter_set_pos(iter, SPOS(0, pos, snapshot));
|
||||
k = bch2_btree_iter_peek_slot(iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret) {
|
||||
bch2_trans_iter_exit(iter);
|
||||
return ret;
|
||||
}
|
||||
|
||||
inode_u->bi_inum = k.k->p.offset;
|
||||
inode_u->bi_generation = max(gen, le64_to_cpu(cursor->v.gen));
|
||||
cursor->v.idx = cpu_to_le64(k.k->p.offset + 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_inode_delete_keys(struct btree_trans *trans,
|
||||
@ -1078,10 +1064,9 @@ static int bch2_inode_rm_trans(struct btree_trans *trans, subvol_inum inum, u32
|
||||
{
|
||||
try(bch2_subvolume_get_snapshot(trans, inum.subvol, snapshot));
|
||||
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
struct bkey_s_c k = bkey_try(bch2_bkey_get_iter(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, inum.inum, *snapshot),
|
||||
BTREE_ITER_intent|BTREE_ITER_cached));
|
||||
CLASS(btree_iter, iter)(trans, BTREE_ID_inodes, SPOS(0, inum.inum, *snapshot),
|
||||
BTREE_ITER_intent|BTREE_ITER_cached);
|
||||
struct bkey_s_c k = bkey_try(bch2_btree_iter_peek_slot(&iter));
|
||||
|
||||
if (!bkey_is_inode(k.k)) {
|
||||
bch2_fs_inconsistent(trans->c,
|
||||
@ -1204,10 +1189,7 @@ int bch2_inode_set_casefold(struct btree_trans *trans, subvol_inum inum,
|
||||
* Make sure the dir is empty, as otherwise we'd need to
|
||||
* rehash everything and update the dirent keys.
|
||||
*/
|
||||
ret = bch2_empty_dir_trans(trans, inum);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
try(bch2_empty_dir_trans(trans, inum));
|
||||
try(bch2_request_incompat_feature(c, bcachefs_metadata_version_casefolding));
|
||||
|
||||
bch2_check_set_feature(c, BCH_FEATURE_casefolding);
|
||||
@ -1241,28 +1223,23 @@ static noinline int __bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum
|
||||
*/
|
||||
static int delete_ancestor_snapshot_inodes(struct btree_trans *trans, struct bpos pos)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
next_parent:
|
||||
ret = lockrestart_do(trans,
|
||||
bkey_err(k = bch2_inode_get_iter_snapshot_parent(trans, &iter, pos, 0)));
|
||||
if (ret || !k.k)
|
||||
return ret;
|
||||
while (1) {
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
struct bkey_s_c k;
|
||||
|
||||
bool unlinked = bkey_is_unlinked_inode(k);
|
||||
pos = k.k->p;
|
||||
bch2_trans_iter_exit(&iter);
|
||||
try(lockrestart_do(trans,
|
||||
bkey_err(k = bch2_inode_get_iter_snapshot_parent(trans, &iter, pos, 0))));
|
||||
|
||||
if (!unlinked)
|
||||
return 0;
|
||||
if (!k.k || !bkey_is_unlinked_inode(k))
|
||||
return 0;
|
||||
|
||||
ret = lockrestart_do(trans, bch2_inode_or_descendents_is_open(trans, pos));
|
||||
if (ret)
|
||||
return ret < 0 ? ret : 0;
|
||||
pos = k.k->p;
|
||||
int ret = lockrestart_do(trans, bch2_inode_or_descendents_is_open(trans, pos));
|
||||
if (ret)
|
||||
return ret < 0 ? ret : 0;
|
||||
|
||||
try(__bch2_inode_rm_snapshot(trans, pos.offset, pos.snapshot));
|
||||
goto next_parent;
|
||||
try(__bch2_inode_rm_snapshot(trans, pos.offset, pos.snapshot));
|
||||
}
|
||||
}
|
||||
|
||||
int bch2_inode_rm_snapshot(struct btree_trans *trans, u64 inum, u32 snapshot)
|
||||
@ -1278,15 +1255,11 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos,
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
CLASS(printbuf, buf)();
|
||||
int ret;
|
||||
|
||||
CLASS(btree_iter, inode_iter)(trans, BTREE_ID_inodes, pos, BTREE_ITER_cached);
|
||||
struct bkey_s_c k = bch2_btree_iter_peek_slot(&inode_iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
struct bkey_s_c k = bkey_try(bch2_btree_iter_peek_slot(&inode_iter));
|
||||
|
||||
ret = bkey_is_inode(k.k) ? 0 : bch_err_throw(c, ENOENT_inode);
|
||||
int ret = bkey_is_inode(k.k) ? 0 : bch_err_throw(c, ENOENT_inode);
|
||||
if (fsck_err_on(from_deleted_inodes && ret,
|
||||
trans, deleted_inode_missing,
|
||||
"nonexistent inode %llu:%u in deleted_inodes btree",
|
||||
@ -1343,10 +1316,9 @@ static int may_delete_deleted_inode(struct btree_trans *trans, struct bpos pos,
|
||||
try(__bch2_fsck_write_inode(trans, inode));
|
||||
}
|
||||
|
||||
if (!from_deleted_inodes) {
|
||||
if (!from_deleted_inodes)
|
||||
return bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc) ?:
|
||||
bch_err_throw(c, inode_has_child_snapshot);
|
||||
}
|
||||
|
||||
goto delete;
|
||||
|
||||
|
||||
@ -75,7 +75,7 @@ static int __bch2_logged_op_start(struct btree_trans *trans, struct bkey_i *k)
|
||||
{
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
try(bch2_bkey_get_empty_slot(trans, &iter, BTREE_ID_logged_ops,
|
||||
POS(LOGGED_OPS_INUM_logged_ops, U64_MAX)));
|
||||
POS_MIN, POS(LOGGED_OPS_INUM_logged_ops, U64_MAX)));
|
||||
|
||||
k->k.p = iter.pos;
|
||||
|
||||
|
||||
@ -771,33 +771,25 @@ fsck_err:
|
||||
|
||||
static int bch2_propagate_has_case_insensitive(struct btree_trans *trans, subvol_inum inum)
|
||||
{
|
||||
struct btree_iter iter = {};
|
||||
int ret = 0;
|
||||
|
||||
while (true) {
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
struct bch_inode_unpacked inode;
|
||||
ret = bch2_inode_peek(trans, &iter, &inode, inum,
|
||||
BTREE_ITER_intent|BTREE_ITER_with_updates);
|
||||
if (ret)
|
||||
break;
|
||||
try(bch2_inode_peek(trans, &iter, &inode, inum,
|
||||
BTREE_ITER_intent|BTREE_ITER_with_updates));
|
||||
|
||||
if (inode.bi_flags & BCH_INODE_has_case_insensitive)
|
||||
break;
|
||||
|
||||
inode.bi_flags |= BCH_INODE_has_case_insensitive;
|
||||
ret = bch2_inode_write(trans, &iter, &inode);
|
||||
if (ret)
|
||||
break;
|
||||
try(bch2_inode_write(trans, &iter, &inode));
|
||||
|
||||
bch2_trans_iter_exit(&iter);
|
||||
if (subvol_inum_eq(inum, BCACHEFS_ROOT_SUBVOL_INUM))
|
||||
break;
|
||||
|
||||
inum = parent_inum(inum, &inode);
|
||||
}
|
||||
|
||||
bch2_trans_iter_exit(&iter);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_maybe_propagate_has_case_insensitive(struct btree_trans *trans, subvol_inum inum,
|
||||
|
||||
@ -190,7 +190,6 @@ bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
|
||||
break;
|
||||
}
|
||||
}
|
||||
bch2_trans_iter_exit(iter);
|
||||
|
||||
return bkey_s_c_err(ret ?: bch_err_throw(trans->c, ENOENT_str_hash_lookup));
|
||||
}
|
||||
@ -232,7 +231,6 @@ bch2_hash_hole(struct btree_trans *trans,
|
||||
BTREE_ITER_slots|BTREE_ITER_intent, k, ret)
|
||||
if (!is_visible_key(desc, inum, k))
|
||||
return 0;
|
||||
bch2_trans_iter_exit(iter);
|
||||
|
||||
return ret ?: bch_err_throw(trans->c, ENOSPC_str_hash_create);
|
||||
}
|
||||
@ -306,7 +304,6 @@ struct bkey_s_c bch2_hash_set_or_get_in_snapshot(struct btree_trans *trans,
|
||||
ret = bch_err_throw(c, ENOSPC_str_hash_create);
|
||||
out:
|
||||
bch2_trans_iter_exit(&slot);
|
||||
bch2_trans_iter_exit(iter);
|
||||
return ret ? bkey_s_c_err(ret) : bkey_s_c_null;
|
||||
found:
|
||||
found = true;
|
||||
@ -335,15 +332,10 @@ int bch2_hash_set_in_snapshot(struct btree_trans *trans,
|
||||
struct bkey_i *insert,
|
||||
enum btree_iter_update_trigger_flags flags)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
struct bkey_s_c k = bkey_try(bch2_hash_set_or_get_in_snapshot(trans, &iter, desc, info, inum,
|
||||
snapshot, insert, flags));
|
||||
if (k.k) {
|
||||
bch2_trans_iter_exit(&iter);
|
||||
return bch_err_throw(trans->c, EEXIST_str_hash_set);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return k.k ? bch_err_throw(trans->c, EEXIST_str_hash_set) : 0;
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
@ -388,12 +380,10 @@ int bch2_hash_delete(struct btree_trans *trans,
|
||||
const struct bch_hash_info *info,
|
||||
subvol_inum inum, const void *key)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
bkey_try(bch2_hash_lookup(trans, &iter, desc, info, inum, key, BTREE_ITER_intent));
|
||||
|
||||
int ret = bch2_hash_delete_at(trans, desc, info, &iter, 0);
|
||||
bch2_trans_iter_exit(&iter);
|
||||
return ret;
|
||||
return bch2_hash_delete_at(trans, desc, info, &iter, 0);
|
||||
}
|
||||
|
||||
int bch2_repair_inode_hash_info(struct btree_trans *, struct bch_inode_unpacked *);
|
||||
|
||||
@ -1028,19 +1028,11 @@ static int bch2_fs_opt_version_init(struct bch_fs *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
bch_sb_handles *sbs)
|
||||
static int bch2_fs_init(struct bch_fs *c, struct bch_sb *sb,
|
||||
struct bch_opts *opts, bch_sb_handles *sbs)
|
||||
{
|
||||
struct bch_fs *c;
|
||||
unsigned i, iter_size;
|
||||
CLASS(printbuf, name)();
|
||||
int ret = 0;
|
||||
|
||||
c = kvmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
|
||||
if (!c) {
|
||||
c = ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
c->stdio = (void *)(unsigned long) opts->stdio;
|
||||
|
||||
@ -1116,15 +1108,10 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
|
||||
mutex_init(&c->sectors_available_lock);
|
||||
|
||||
ret = percpu_init_rwsem(&c->mark_lock);
|
||||
if (ret)
|
||||
goto err;
|
||||
try(percpu_init_rwsem(&c->mark_lock));
|
||||
|
||||
scoped_guard(mutex, &c->sb_lock)
|
||||
ret = bch2_sb_to_fs(c, sb);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
try(bch2_sb_to_fs(c, sb));
|
||||
|
||||
/* Compat: */
|
||||
if (le16_to_cpu(sb->version) <= bcachefs_metadata_version_inode_v2 &&
|
||||
@ -1136,9 +1123,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
SET_BCH_SB_JOURNAL_RECLAIM_DELAY(sb, 100);
|
||||
|
||||
c->opts = bch2_opts_default;
|
||||
ret = bch2_opts_from_sb(&c->opts, sb);
|
||||
if (ret)
|
||||
goto err;
|
||||
try(bch2_opts_from_sb(&c->opts, sb));
|
||||
|
||||
bch2_opts_apply(&c->opts, *opts);
|
||||
|
||||
@ -1146,8 +1131,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
|
||||
c->opts.block_size > PAGE_SIZE) {
|
||||
bch_err(c, "cannot mount bs > ps filesystem without CONFIG_TRANSPARENT_HUGEPAGE");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1161,8 +1145,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
|
||||
if (bch2_fs_init_fault("fs_alloc")) {
|
||||
bch_err(c, "fs_alloc fault injected");
|
||||
ret = -EFAULT;
|
||||
goto err;
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
if (c->sb.multi_device)
|
||||
@ -1170,9 +1153,7 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
else
|
||||
prt_bdevname(&name, sbs->data[0].bdev);
|
||||
|
||||
ret = name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0;
|
||||
if (ret)
|
||||
goto err;
|
||||
try(name.allocation_failure ? -BCH_ERR_ENOMEM_fs_name_alloc : 0);
|
||||
|
||||
strscpy(c->name, name.buf, sizeof(c->name));
|
||||
|
||||
@ -1193,47 +1174,28 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
!(c->usage = alloc_percpu(struct bch_fs_usage_base)) ||
|
||||
!(c->online_reserved = alloc_percpu(u64)) ||
|
||||
mempool_init_kvmalloc_pool(&c->btree_bounce_pool, 1,
|
||||
c->opts.btree_node_size)) {
|
||||
ret = bch_err_throw(c, ENOMEM_fs_other_alloc);
|
||||
goto err;
|
||||
}
|
||||
c->opts.btree_node_size))
|
||||
return bch_err_throw(c, ENOMEM_fs_other_alloc);
|
||||
|
||||
ret =
|
||||
bch2_fs_async_obj_init(c) ?:
|
||||
bch2_blacklist_table_initialize(c) ?:
|
||||
bch2_fs_btree_cache_init(c) ?:
|
||||
bch2_fs_btree_iter_init(c) ?:
|
||||
bch2_fs_btree_key_cache_init(&c->btree_key_cache) ?:
|
||||
bch2_fs_buckets_waiting_for_journal_init(c) ?:
|
||||
bch2_io_clock_init(&c->io_clock[READ]) ?:
|
||||
bch2_io_clock_init(&c->io_clock[WRITE]) ?:
|
||||
bch2_fs_compress_init(c) ?:
|
||||
bch2_fs_counters_init(c) ?:
|
||||
bch2_fs_ec_init(c) ?:
|
||||
bch2_fs_encryption_init(c) ?:
|
||||
bch2_fs_fsio_init(c) ?:
|
||||
bch2_fs_fs_io_direct_init(c) ?:
|
||||
bch2_fs_io_read_init(c) ?:
|
||||
bch2_fs_rebalance_init(c) ?:
|
||||
bch2_fs_sb_errors_init(c) ?:
|
||||
bch2_fs_vfs_init(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
try(bch2_fs_async_obj_init(c));
|
||||
try(bch2_blacklist_table_initialize(c));
|
||||
try(bch2_fs_btree_cache_init(c));
|
||||
try(bch2_fs_btree_iter_init(c));
|
||||
try(bch2_fs_btree_key_cache_init(&c->btree_key_cache));
|
||||
try(bch2_fs_buckets_waiting_for_journal_init(c));
|
||||
try(bch2_io_clock_init(&c->io_clock[READ]));
|
||||
try(bch2_io_clock_init(&c->io_clock[WRITE]));
|
||||
try(bch2_fs_compress_init(c));
|
||||
try(bch2_fs_counters_init(c));
|
||||
try(bch2_fs_ec_init(c));
|
||||
try(bch2_fs_encryption_init(c));
|
||||
try(bch2_fs_fsio_init(c));
|
||||
try(bch2_fs_fs_io_direct_init(c));
|
||||
try(bch2_fs_io_read_init(c));
|
||||
try(bch2_fs_rebalance_init(c));
|
||||
try(bch2_fs_sb_errors_init(c));
|
||||
try(bch2_fs_vfs_init(c));
|
||||
|
||||
/*
|
||||
* just make sure this is always allocated if we might need it - mount
|
||||
* failing due to kthread_create() failing is _very_ annoying
|
||||
*/
|
||||
if (!(c->sb.features & BIT_ULL(BCH_FEATURE_no_alloc_info)) ||
|
||||
go_rw_in_recovery(c)) {
|
||||
/*
|
||||
* start workqueues/kworkers early - kthread creation checks for
|
||||
* pending signals, which is _very_ annoying
|
||||
*/
|
||||
ret = bch2_fs_init_rw(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_UNICODE)
|
||||
if (!bch2_fs_casefold_enabled(c)) {
|
||||
@ -1244,24 +1206,20 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
unicode_major(BCH_FS_DEFAULT_UTF8_ENCODING),
|
||||
unicode_minor(BCH_FS_DEFAULT_UTF8_ENCODING),
|
||||
unicode_rev(BCH_FS_DEFAULT_UTF8_ENCODING));
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
#else
|
||||
if (c->sb.features & BIT_ULL(BCH_FEATURE_casefolding)) {
|
||||
printk(KERN_ERR "Cannot mount a filesystem with casefolding on a kernel without CONFIG_UNICODE\n");
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
return -EINVAL;
|
||||
}
|
||||
#endif
|
||||
|
||||
for (unsigned i = 0; i < c->sb.nr_devices; i++) {
|
||||
if (!bch2_member_exists(c->disk_sb.sb, i))
|
||||
continue;
|
||||
ret = bch2_dev_alloc(c, i);
|
||||
if (ret)
|
||||
goto err;
|
||||
try(bch2_dev_alloc(c, i));
|
||||
}
|
||||
|
||||
bch2_journal_entry_res_resize(&c->journal,
|
||||
@ -1274,40 +1232,43 @@ static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
scoped_guard(rwsem_write, &c->state_lock)
|
||||
darray_for_each(*sbs, sb) {
|
||||
CLASS(printbuf, err)();
|
||||
ret = bch2_dev_attach_bdev(c, sb, &err);
|
||||
int ret = bch2_dev_attach_bdev(c, sb, &err);
|
||||
if (ret) {
|
||||
bch_err(bch2_dev_locked(c, sb->sb->dev_idx), "%s", err.buf);
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = bch2_fs_opt_version_init(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
try(bch2_fs_opt_version_init(c));
|
||||
|
||||
/*
|
||||
* start workqueues/kworkers early - kthread creation checks for pending
|
||||
* signals, which is _very_ annoying
|
||||
* just make sure this is always allocated if we might need it - mount
|
||||
* failing due to kthread_create() failing is _very_ annoying
|
||||
*/
|
||||
if (go_rw_in_recovery(c)) {
|
||||
ret = bch2_fs_init_rw(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
if (go_rw_in_recovery(c))
|
||||
try(bch2_fs_init_rw(c));
|
||||
|
||||
scoped_guard(mutex, &bch2_fs_list_lock)
|
||||
ret = bch2_fs_online(c);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
try(bch2_fs_online(c));
|
||||
|
||||
c->recovery_task = current;
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct bch_fs *bch2_fs_alloc(struct bch_sb *sb, struct bch_opts *opts,
|
||||
bch_sb_handles *sbs)
|
||||
{
|
||||
struct bch_fs *c = kvmalloc(sizeof(struct bch_fs), GFP_KERNEL|__GFP_ZERO);
|
||||
if (!c)
|
||||
return ERR_PTR(-BCH_ERR_ENOMEM_fs_alloc);
|
||||
|
||||
int ret = bch2_fs_init(c, sb, opts, sbs);
|
||||
if (ret) {
|
||||
bch2_fs_free(c);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return c;
|
||||
err:
|
||||
bch2_fs_free(c);
|
||||
c = ERR_PTR(ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
static bool bch2_fs_may_start(struct bch_fs *c)
|
||||
|
||||
@ -301,7 +301,6 @@ static int bch2_journal_replay_key(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bch2_trans_iter_exit(&iter);
|
||||
bch2_trans_node_iter_init(trans, &iter, k->btree_id, bk->k.p,
|
||||
BTREE_MAX_DEPTH, 0, iter_flags);
|
||||
|
||||
|
||||
@ -68,7 +68,7 @@ __bch2_snapshot_tree_create(struct btree_trans *trans)
|
||||
{
|
||||
CLASS(btree_iter_uninit, iter)(trans);
|
||||
int ret = bch2_bkey_get_empty_slot(trans, &iter,
|
||||
BTREE_ID_snapshot_trees, POS(0, U32_MAX));
|
||||
BTREE_ID_snapshot_trees, POS_MIN, POS(0, U32_MAX));
|
||||
if (ret == -BCH_ERR_ENOSPC_btree_slot)
|
||||
ret = bch_err_throw(trans->c, ENOSPC_snapshot_tree);
|
||||
if (ret)
|
||||
|
||||
@ -542,7 +542,7 @@ int bch2_subvolume_create(struct btree_trans *trans, u64 inode,
|
||||
|
||||
CLASS(btree_iter_uninit, dst_iter)(trans);
|
||||
int ret = bch2_bkey_get_empty_slot(trans, &dst_iter,
|
||||
BTREE_ID_subvolumes, POS(0, U32_MAX));
|
||||
BTREE_ID_subvolumes, POS_MIN, POS(0, U32_MAX));
|
||||
if (ret == -BCH_ERR_ENOSPC_btree_slot)
|
||||
ret = bch_err_throw(c, ENOSPC_subvolume_create);
|
||||
if (ret)
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user