mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-02 00:00:03 +03:00
Update bcachefs sources to ed6b7f81a7 six locks: Disable percpu read lock mode in userspace
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
7a66cf70c5
commit
17d1c4f4fe
@ -1 +1 @@
|
||||
7c0fe6f104a68065c15b069176247bf5d237b2b3
|
||||
ed6b7f81a7b51ac05d02635907f92aff4a3f8445
|
||||
|
@ -135,4 +135,12 @@ static inline unsigned long find_next_zero_bit(const unsigned long *addr, unsign
|
||||
#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
|
||||
#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
|
||||
|
||||
static inline bool bitmap_empty(const unsigned long *src, unsigned nbits)
|
||||
{
|
||||
if (small_const_nbits(nbits))
|
||||
return ! (*src & BITMAP_LAST_WORD_MASK(nbits));
|
||||
|
||||
return find_first_bit(src, nbits) == nbits;
|
||||
}
|
||||
|
||||
#endif /* _PERF_BITOPS_H */
|
||||
|
@ -371,7 +371,8 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
|
||||
if (!ob)
|
||||
iter.path->preserve = false;
|
||||
err:
|
||||
set_btree_iter_dontneed(&iter);
|
||||
if (iter.trans && iter.path)
|
||||
set_btree_iter_dontneed(&iter);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
printbuf_exit(&buf);
|
||||
return ob;
|
||||
@ -934,9 +935,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
rcu_read_lock();
|
||||
devs = target_rw_devs(c, wp->data_type, target);
|
||||
rcu_read_unlock();
|
||||
|
||||
/* Don't allocate from devices we already have pointers to: */
|
||||
for (i = 0; i < devs_have->nr; i++)
|
||||
|
@ -805,7 +805,7 @@ static int check_one_backpointer(struct btree_trans *trans,
|
||||
|
||||
if (fsck_err_on(!k.k, c,
|
||||
"backpointer for missing extent\n %s",
|
||||
(bch2_backpointer_k_to_text(&buf, c, bp.s_c), buf.buf)))
|
||||
(bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf)))
|
||||
return bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
|
||||
out:
|
||||
fsck_err:
|
||||
|
@ -2918,6 +2918,10 @@ static void bch2_trans_alloc_paths(struct btree_trans *trans, struct bch_fs *c)
|
||||
#endif
|
||||
if (!p)
|
||||
p = mempool_alloc(&trans->c->btree_paths_pool, GFP_NOFS);
|
||||
/*
|
||||
* paths need to be zeroed, bch2_check_for_deadlock looks at paths in
|
||||
* other threads
|
||||
*/
|
||||
|
||||
trans->paths = p; p += paths_bytes;
|
||||
trans->updates = p; p += updates_bytes;
|
||||
|
@ -110,11 +110,14 @@ __trans_next_path_safe(struct btree_trans *trans, unsigned *idx)
|
||||
* This version is intended to be safe for use on a btree_trans that is owned by
|
||||
* another thread, for bch2_btree_trans_to_text();
|
||||
*/
|
||||
#define trans_for_each_path_safe(_trans, _path, _idx) \
|
||||
for (_idx = 0; \
|
||||
#define trans_for_each_path_safe_from(_trans, _path, _idx, _start) \
|
||||
for (_idx = _start; \
|
||||
(_path = __trans_next_path_safe((_trans), &_idx)); \
|
||||
_idx++)
|
||||
|
||||
#define trans_for_each_path_safe(_trans, _path, _idx) \
|
||||
trans_for_each_path_safe_from(_trans, _path, _idx, 0)
|
||||
|
||||
static inline struct btree_path *next_btree_path(struct btree_trans *trans, struct btree_path *path)
|
||||
{
|
||||
unsigned idx = path ? path->sorted_idx + 1 : 0;
|
||||
|
@ -254,6 +254,7 @@ int bch2_check_for_deadlock(struct btree_trans *trans, struct printbuf *cycle)
|
||||
struct trans_waiting_for_lock *top;
|
||||
struct btree_bkey_cached_common *b;
|
||||
struct btree_path *path;
|
||||
unsigned path_idx;
|
||||
int ret;
|
||||
|
||||
if (trans->lock_must_abort) {
|
||||
@ -272,12 +273,12 @@ next:
|
||||
|
||||
top = &g.g[g.nr - 1];
|
||||
|
||||
trans_for_each_path_from(top->trans, path, top->path_idx) {
|
||||
trans_for_each_path_safe_from(top->trans, path, path_idx, top->path_idx) {
|
||||
if (!path->nodes_locked)
|
||||
continue;
|
||||
|
||||
if (top->path_idx != path->idx) {
|
||||
top->path_idx = path->idx;
|
||||
if (path_idx != top->path_idx) {
|
||||
top->path_idx = path_idx;
|
||||
top->level = 0;
|
||||
top->lock_start_time = 0;
|
||||
}
|
||||
|
@ -963,11 +963,16 @@ int bch2_trans_commit_error(struct btree_trans *trans, unsigned flags,
|
||||
JOURNAL_RES_GET_CHECK));
|
||||
break;
|
||||
case -BCH_ERR_btree_insert_need_journal_reclaim:
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
trace_and_count(c, trans_blocked_journal_reclaim, trans, trace_ip);
|
||||
|
||||
ret = drop_locks_do(trans,
|
||||
(wait_event_freezable(c->journal.reclaim_wait,
|
||||
(ret = journal_reclaim_wait_done(c))), ret));
|
||||
wait_event_freezable(c->journal.reclaim_wait,
|
||||
(ret = journal_reclaim_wait_done(c)));
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
ret = bch2_trans_relock(trans);
|
||||
break;
|
||||
case -BCH_ERR_btree_insert_need_flush_buffer: {
|
||||
struct btree_write_buffer *wb = &c->btree_write_buffer;
|
||||
@ -1306,29 +1311,52 @@ static int need_whiteout_for_snapshot(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int pos_overwritten_in_snapshot(struct btree_trans *trans, enum btree_id btree,
|
||||
struct bpos pos, u32 snapshot)
|
||||
static int get_snapshot_overwrites(struct btree_trans *trans,
|
||||
enum btree_id btree,
|
||||
struct bpos pos,
|
||||
snapshot_id_list *overwrites)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
snapshot_id_list overwrites2;
|
||||
u32 *i;
|
||||
int ret = 0;
|
||||
|
||||
for_each_btree_key_norestart(trans, iter,
|
||||
btree, SPOS(pos.inode, pos.offset, snapshot),
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_NOPRESERVE, k, ret) {
|
||||
darray_init(overwrites);
|
||||
darray_init(&overwrites2);
|
||||
|
||||
for_each_btree_key_norestart(trans, iter, btree,
|
||||
SPOS(pos.inode, pos.offset, 0),
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
|
||||
if (bpos_ge(k.k->p, pos))
|
||||
break;
|
||||
|
||||
if (bch2_snapshot_is_ancestor(c, k.k->p.snapshot, pos.snapshot)) {
|
||||
ret = 1;
|
||||
break;
|
||||
ret = snapshot_list_add(c, &overwrites2, k.k->p.snapshot);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
darray_for_each(overwrites2, i)
|
||||
if (!snapshot_list_has_ancestor(c, &overwrites2, *i)) {
|
||||
ret = snapshot_list_add(c, overwrites, *i);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
*overwrites = overwrites2;
|
||||
out:
|
||||
darray_exit(&overwrites2);
|
||||
return ret;
|
||||
err:
|
||||
darray_exit(overwrites);
|
||||
goto out;
|
||||
}
|
||||
|
||||
int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
@ -1337,61 +1365,76 @@ int __bch2_insert_snapshot_whiteouts(struct btree_trans *trans,
|
||||
struct bpos new_pos)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter old_iter, new_iter;
|
||||
struct bkey_s_c k;
|
||||
snapshot_id_list s;
|
||||
struct bkey_i *update;
|
||||
snapshot_id_list old_overwrites, new_overwrites, updates;
|
||||
bool began_transaction = false;
|
||||
u32 *i;
|
||||
int ret;
|
||||
|
||||
if (!bch2_snapshot_has_children(c, old_pos.snapshot))
|
||||
return 0;
|
||||
|
||||
darray_init(&s);
|
||||
darray_init(&old_overwrites);
|
||||
darray_init(&new_overwrites);
|
||||
darray_init(&updates);
|
||||
|
||||
bch2_trans_iter_init(trans, &old_iter, btree, old_pos,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS);
|
||||
while ((k = bch2_btree_iter_prev(&old_iter)).k &&
|
||||
!(ret = bkey_err(k)) &&
|
||||
bkey_eq(old_pos, k.k->p)) {
|
||||
ret = get_snapshot_overwrites(trans, btree, old_pos, &old_overwrites) ?:
|
||||
get_snapshot_overwrites(trans, btree, new_pos, &new_overwrites);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!bch2_snapshot_is_ancestor(c, k.k->p.snapshot, old_pos.snapshot) ||
|
||||
snapshot_list_has_ancestor(c, &s, k.k->p.snapshot))
|
||||
continue;
|
||||
|
||||
ret = pos_overwritten_in_snapshot(trans, btree,
|
||||
new_pos, k.k->p.snapshot);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (!ret) {
|
||||
struct bpos whiteout_pos =
|
||||
SPOS(new_pos.inode, new_pos.offset, k.k->p.snapshot);;
|
||||
|
||||
bch2_trans_iter_init(trans, &new_iter, btree, whiteout_pos,
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
|
||||
ret = PTR_ERR_OR_ZERO(update);
|
||||
darray_for_each(old_overwrites, i)
|
||||
if (!snapshot_list_has_ancestor(c, &new_overwrites, *i)) {
|
||||
ret = darray_push(&updates, *i);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
bkey_init(&update->k);
|
||||
update->k.p = whiteout_pos;
|
||||
update->k.type = KEY_TYPE_whiteout;
|
||||
|
||||
ret = bch2_btree_iter_traverse(&new_iter) ?:
|
||||
bch2_trans_update(trans, &new_iter, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
bch2_trans_iter_exit(trans, &new_iter);
|
||||
goto err;
|
||||
}
|
||||
|
||||
ret = snapshot_list_add(c, &s, k.k->p.snapshot);
|
||||
if (updates.nr > 4) {
|
||||
bch2_trans_begin(trans);
|
||||
began_transaction = true;
|
||||
}
|
||||
|
||||
darray_for_each(updates, i) {
|
||||
struct btree_iter iter;
|
||||
struct bkey_i *update;
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, btree,
|
||||
SPOS(new_pos.inode, new_pos.offset, *i),
|
||||
BTREE_ITER_NOT_EXTENTS|
|
||||
BTREE_ITER_INTENT);
|
||||
update = bch2_trans_kmalloc(trans, sizeof(struct bkey_i));
|
||||
ret = PTR_ERR_OR_ZERO(update);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
bkey_init(&update->k);
|
||||
update->k.p = iter.pos;
|
||||
update->k.type = KEY_TYPE_whiteout;
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(trans, &iter, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE) ?:
|
||||
(began_transaction && trans->nr_updates > 4
|
||||
? bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL) : 0);
|
||||
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
bch2_trans_iter_exit(trans, &old_iter);
|
||||
darray_exit(&s);
|
||||
|
||||
if (began_transaction && trans->nr_updates) {
|
||||
ret = bch2_trans_commit(trans, NULL, NULL, BTREE_INSERT_NOFAIL);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (began_transaction)
|
||||
ret = -BCH_ERR_transaction_restart_nested;
|
||||
err:
|
||||
darray_exit(&updates);
|
||||
darray_exit(&new_overwrites);
|
||||
darray_exit(&old_overwrites);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -87,6 +87,40 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
void bch2_disk_groups_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
{
|
||||
struct bch_disk_groups_cpu *g;
|
||||
struct bch_dev *ca;
|
||||
int i;
|
||||
unsigned iter;
|
||||
|
||||
out->atomic++;
|
||||
rcu_read_lock();
|
||||
|
||||
g = rcu_dereference(c->disk_groups);
|
||||
if (!g)
|
||||
goto out;
|
||||
|
||||
for (i = 0; i < g->nr; i++) {
|
||||
if (i)
|
||||
prt_printf(out, " ");
|
||||
|
||||
if (g->entries[i].deleted) {
|
||||
prt_printf(out, "[deleted]");
|
||||
continue;
|
||||
}
|
||||
|
||||
prt_printf(out, "[parent %d devs", g->entries[i].parent);
|
||||
for_each_member_device_rcu(ca, c, iter, &g->entries[i].devs)
|
||||
prt_printf(out, " %s", ca->name);
|
||||
prt_printf(out, "]");
|
||||
}
|
||||
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
out->atomic--;
|
||||
}
|
||||
|
||||
static void bch2_sb_disk_groups_to_text(struct printbuf *out,
|
||||
struct bch_sb *sb,
|
||||
struct bch_sb_field *f)
|
||||
@ -174,26 +208,36 @@ int bch2_sb_disk_groups_to_cpu(struct bch_fs *c)
|
||||
const struct bch_devs_mask *bch2_target_to_mask(struct bch_fs *c, unsigned target)
|
||||
{
|
||||
struct target t = target_decode(target);
|
||||
struct bch_devs_mask *devs;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
switch (t.type) {
|
||||
case TARGET_NULL:
|
||||
return NULL;
|
||||
devs = NULL;
|
||||
break;
|
||||
case TARGET_DEV: {
|
||||
struct bch_dev *ca = t.dev < c->sb.nr_devices
|
||||
? rcu_dereference(c->devs[t.dev])
|
||||
: NULL;
|
||||
return ca ? &ca->self : NULL;
|
||||
devs = ca ? &ca->self : NULL;
|
||||
break;
|
||||
}
|
||||
case TARGET_GROUP: {
|
||||
struct bch_disk_groups_cpu *g = rcu_dereference(c->disk_groups);
|
||||
|
||||
return g && t.group < g->nr && !g->entries[t.group].deleted
|
||||
devs = g && t.group < g->nr && !g->entries[t.group].deleted
|
||||
? &g->entries[t.group].devs
|
||||
: NULL;
|
||||
break;
|
||||
}
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return devs;
|
||||
}
|
||||
|
||||
bool bch2_dev_in_target(struct bch_fs *c, unsigned dev, unsigned target)
|
||||
|
@ -68,6 +68,14 @@ static inline struct bch_devs_mask target_rw_devs(struct bch_fs *c,
|
||||
return devs;
|
||||
}
|
||||
|
||||
static inline bool bch2_target_accepts_data(struct bch_fs *c,
|
||||
enum bch_data_type data_type,
|
||||
u16 target)
|
||||
{
|
||||
struct bch_devs_mask rw_devs = target_rw_devs(c, data_type, target);
|
||||
return !bitmap_empty(rw_devs.d, BCH_SB_MEMBERS_MAX);
|
||||
}
|
||||
|
||||
bool bch2_dev_in_target(struct bch_fs *, unsigned, unsigned);
|
||||
|
||||
int bch2_disk_path_find(struct bch_sb_handle *, const char *);
|
||||
@ -88,4 +96,6 @@ int bch2_dev_group_set(struct bch_fs *, struct bch_dev *, const char *);
|
||||
const char *bch2_sb_validate_disk_groups(struct bch_sb *,
|
||||
struct bch_sb_field *);
|
||||
|
||||
void bch2_disk_groups_to_text(struct printbuf *, struct bch_fs *);
|
||||
|
||||
#endif /* _BCACHEFS_DISK_GROUPS_H */
|
||||
|
@ -1146,6 +1146,7 @@ err:
|
||||
mutex_lock(&c->ec_stripe_new_lock);
|
||||
list_del(&s->list);
|
||||
mutex_unlock(&c->ec_stripe_new_lock);
|
||||
wake_up(&c->ec_stripe_new_wait);
|
||||
|
||||
ec_stripe_buf_exit(&s->existing_stripe);
|
||||
ec_stripe_buf_exit(&s->new_stripe);
|
||||
|
@ -1481,22 +1481,14 @@ again:
|
||||
continue;
|
||||
|
||||
if (!(inode->v.i_state & I_DONTCACHE) &&
|
||||
!(inode->v.i_state & I_FREEING)) {
|
||||
!(inode->v.i_state & I_FREEING) &&
|
||||
igrab(&inode->v)) {
|
||||
this_pass_clean = false;
|
||||
|
||||
d_mark_dontcache(&inode->v);
|
||||
d_prune_aliases(&inode->v);
|
||||
|
||||
/*
|
||||
* If i_count was zero, we have to take and release a
|
||||
* ref in order for I_DONTCACHE to be noticed and the
|
||||
* inode to be dropped;
|
||||
*/
|
||||
|
||||
if (!atomic_read(&inode->v.i_count) &&
|
||||
igrab(&inode->v) &&
|
||||
darray_push_gfp(&grabbed, inode, GFP_ATOMIC|__GFP_NOWARN))
|
||||
if (darray_push_gfp(&grabbed, inode, GFP_ATOMIC|__GFP_NOWARN)) {
|
||||
iput(&inode->v);
|
||||
break;
|
||||
}
|
||||
} else if (clean_pass && this_pass_clean) {
|
||||
wait_queue_head_t *wq = bit_waitqueue(&inode->v.i_state, __I_NEW);
|
||||
DEFINE_WAIT_BIT(wait, &inode->v.i_state, __I_NEW);
|
||||
@ -1511,8 +1503,12 @@ again:
|
||||
}
|
||||
mutex_unlock(&c->vfs_inodes_lock);
|
||||
|
||||
darray_for_each(grabbed, i)
|
||||
iput(&(*i)->v);
|
||||
darray_for_each(grabbed, i) {
|
||||
inode = *i;
|
||||
d_mark_dontcache(&inode->v);
|
||||
d_prune_aliases(&inode->v);
|
||||
iput(&inode->v);
|
||||
}
|
||||
grabbed.nr = 0;
|
||||
|
||||
if (!clean_pass || !this_pass_clean) {
|
||||
|
@ -2057,10 +2057,11 @@ static struct promote_op *__promote_alloc(struct btree_trans *trans,
|
||||
.write_flags = BCH_WRITE_ALLOC_NOWAIT|BCH_WRITE_CACHED,
|
||||
},
|
||||
btree_id, k);
|
||||
/*
|
||||
* possible errors: -BCH_ERR_nocow_lock_blocked,
|
||||
* -BCH_ERR_ENOSPC_disk_reservation:
|
||||
*/
|
||||
if (ret) {
|
||||
WARN_ONCE(ret != -BCH_ERR_nocow_lock_blocked,
|
||||
"%s: saw unknown error %s\n", __func__, bch2_err_str(ret));
|
||||
|
||||
ret = rhashtable_remove_fast(&c->promote_table, &op->hash,
|
||||
bch_promote_params);
|
||||
BUG_ON(ret);
|
||||
|
@ -57,7 +57,8 @@ static bool rebalance_pred(struct bch_fs *c, void *arg,
|
||||
i = 0;
|
||||
bkey_for_each_ptr(ptrs, ptr) {
|
||||
if (!ptr->cached &&
|
||||
!bch2_dev_in_target(c, ptr->dev, io_opts->background_target))
|
||||
!bch2_dev_in_target(c, ptr->dev, io_opts->background_target) &&
|
||||
bch2_target_accepts_data(c, BCH_DATA_user, io_opts->background_target))
|
||||
data_opts->rewrite_ptrs |= 1U << i;
|
||||
i++;
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ static inline int snapshot_list_add(struct bch_fs *c, snapshot_id_list *s, u32 i
|
||||
{
|
||||
int ret;
|
||||
|
||||
BUG_ON(snapshot_list_has_id(s, id));
|
||||
EBUG_ON(snapshot_list_has_id(s, id));
|
||||
ret = darray_push(s, id);
|
||||
if (ret)
|
||||
bch_err(c, "error reallocating snapshot_id_list (size %zu)", s->size);
|
||||
|
@ -223,6 +223,7 @@ static void bch2_write_refs_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
#endif
|
||||
|
||||
read_attribute(internal_uuid);
|
||||
read_attribute(disk_groups);
|
||||
|
||||
read_attribute(has_data);
|
||||
read_attribute(alloc_debug);
|
||||
@ -471,6 +472,9 @@ SHOW(bch2_fs)
|
||||
if (attr == &sysfs_nocow_lock_table)
|
||||
bch2_nocow_locks_to_text(out, &c->nocow_locks);
|
||||
|
||||
if (attr == &sysfs_disk_groups)
|
||||
bch2_disk_groups_to_text(out, c);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -681,6 +685,8 @@ struct attribute *bch2_fs_internal_files[] = {
|
||||
&sysfs_moving_ctxts,
|
||||
|
||||
&sysfs_internal_uuid,
|
||||
|
||||
&sysfs_disk_groups,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
@ -593,10 +593,8 @@ static int rand_insert(struct bch_fs *c, u64 nr)
|
||||
|
||||
ret = commit_do(&trans, NULL, NULL, 0,
|
||||
__bch2_btree_insert(&trans, BTREE_ID_xattrs, &k.k_i, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
@ -629,10 +627,8 @@ static int rand_insert_multi(struct bch_fs *c, u64 nr)
|
||||
__bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[5].k_i, 0) ?:
|
||||
__bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[6].k_i, 0) ?:
|
||||
__bch2_btree_insert(&trans, BTREE_ID_xattrs, &k[7].k_i, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
@ -656,10 +652,8 @@ static int rand_lookup(struct bch_fs *c, u64 nr)
|
||||
|
||||
lockrestart_do(&trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
|
||||
ret = bkey_err(k);
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_trans_iter_exit(&trans, &iter);
|
||||
@ -709,10 +703,8 @@ static int rand_mixed(struct bch_fs *c, u64 nr)
|
||||
rand = test_rand();
|
||||
ret = commit_do(&trans, NULL, NULL, 0,
|
||||
rand_mixed_trans(&trans, &iter, &cookie, i, rand));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_trans_iter_exit(&trans, &iter);
|
||||
@ -728,7 +720,7 @@ static int __do_delete(struct btree_trans *trans, struct bpos pos)
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_xattrs, pos,
|
||||
BTREE_ITER_INTENT);
|
||||
lockrestart_do(trans, bkey_err(k = bch2_btree_iter_peek(&iter)));
|
||||
k = bch2_btree_iter_peek(&iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -755,10 +747,8 @@ static int rand_delete(struct bch_fs *c, u64 nr)
|
||||
|
||||
ret = commit_do(&trans, NULL, NULL, 0,
|
||||
__do_delete(&trans, pos));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
@ -767,90 +757,59 @@ static int rand_delete(struct bch_fs *c, u64 nr)
|
||||
|
||||
static int seq_insert(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bkey_i_cookie insert;
|
||||
int ret = 0;
|
||||
|
||||
bkey_cookie_init(&insert.k_i);
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k,
|
||||
NULL, NULL, 0,
|
||||
({
|
||||
NULL, NULL, 0, ({
|
||||
if (iter.pos.offset >= nr)
|
||||
break;
|
||||
insert.k.p = iter.pos;
|
||||
bch2_trans_update(&trans, &iter, &insert.k_i, 0);
|
||||
}));
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
return ret;
|
||||
})));
|
||||
}
|
||||
|
||||
static int seq_lookup(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
ret = for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key2_upto(&trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX), POS(0, U64_MAX),
|
||||
0, k,
|
||||
0);
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
return ret;
|
||||
0));
|
||||
}
|
||||
|
||||
static int seq_overwrite(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
ret = for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
|
||||
return bch2_trans_run(c,
|
||||
for_each_btree_key_commit(&trans, iter, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
BTREE_ITER_INTENT, k,
|
||||
NULL, NULL, 0,
|
||||
({
|
||||
NULL, NULL, 0, ({
|
||||
struct bkey_i_cookie u;
|
||||
|
||||
bkey_reassemble(&u.k_i, k);
|
||||
bch2_trans_update(&trans, &iter, &u.k_i, 0);
|
||||
}));
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
return ret;
|
||||
})));
|
||||
}
|
||||
|
||||
static int seq_delete(struct bch_fs *c, u64 nr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = bch2_btree_delete_range(c, BTREE_ID_xattrs,
|
||||
return bch2_btree_delete_range(c, BTREE_ID_xattrs,
|
||||
SPOS(0, 0, U32_MAX),
|
||||
POS(0, U64_MAX),
|
||||
0, NULL);
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
typedef int (*perf_test_fn)(struct bch_fs *, u64);
|
||||
|
@ -878,6 +878,11 @@ void __six_lock_init(struct six_lock *lock, const char *name,
|
||||
lockdep_init_map(&lock->dep_map, name, key, 0);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Don't assume that we have real percpu variables available in
|
||||
* userspace:
|
||||
*/
|
||||
#ifdef __KERNEL__
|
||||
if (flags & SIX_LOCK_INIT_PCPU) {
|
||||
/*
|
||||
* We don't return an error here on memory allocation failure
|
||||
@ -888,5 +893,6 @@ void __six_lock_init(struct six_lock *lock, const char *name,
|
||||
*/
|
||||
lock->readers = alloc_percpu(unsigned);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__six_lock_init);
|
||||
|
Loading…
Reference in New Issue
Block a user