mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to 26494335d114 bcachefs: improve move_gap()
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
55a2b65bd0
commit
62ab2e5216
@ -1 +1 @@
|
||||
a623b3063d5d6ab6652314028531060d5a0d192e
|
||||
26494335d114f7813a7fc499bbacb4a74d613b6f
|
||||
|
@ -72,14 +72,10 @@ static bool entry_matches_transaction_filter(struct jset_entry *entry,
|
||||
{
|
||||
if (entry->type == BCH_JSET_ENTRY_btree_root ||
|
||||
entry->type == BCH_JSET_ENTRY_btree_keys ||
|
||||
entry->type == BCH_JSET_ENTRY_overwrite) {
|
||||
struct bkey_i *k;
|
||||
|
||||
entry->type == BCH_JSET_ENTRY_overwrite)
|
||||
jset_entry_for_each_key(entry, k)
|
||||
if (bkey_matches_filter(filter, entry, k))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -100,8 +96,6 @@ static bool should_print_transaction(struct jset_entry *entry, struct jset_entry
|
||||
|
||||
static bool should_print_entry(struct jset_entry *entry, d_btree_id filter)
|
||||
{
|
||||
struct bkey_i *k;
|
||||
|
||||
if (!filter.nr)
|
||||
return true;
|
||||
|
||||
|
@ -668,6 +668,8 @@ struct journal_seq_blacklist_table {
|
||||
};
|
||||
|
||||
struct journal_keys {
|
||||
/* must match layout in darray_types.h */
|
||||
size_t nr, size;
|
||||
struct journal_key {
|
||||
u64 journal_seq;
|
||||
u32 journal_offset;
|
||||
@ -676,15 +678,13 @@ struct journal_keys {
|
||||
bool allocated;
|
||||
bool overwritten;
|
||||
struct bkey_i *k;
|
||||
} *d;
|
||||
} *data;
|
||||
/*
|
||||
* Gap buffer: instead of all the empty space in the array being at the
|
||||
* end of the buffer - from @nr to @size - the empty space is at @gap.
|
||||
* This means that sequential insertions are O(n) instead of O(n^2).
|
||||
*/
|
||||
size_t gap;
|
||||
size_t nr;
|
||||
size_t size;
|
||||
atomic_t ref;
|
||||
bool initial_ref_held;
|
||||
};
|
||||
|
@ -2156,7 +2156,9 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* isn't monotonically increasing before FILTER_SNAPSHOTS, and
|
||||
* that's what we check against in extents mode:
|
||||
*/
|
||||
if (k.k->p.inode > end.inode)
|
||||
if (unlikely(!(iter->flags & BTREE_ITER_IS_EXTENTS)
|
||||
? bkey_gt(k.k->p, end)
|
||||
: k.k->p.inode > end.inode))
|
||||
goto end;
|
||||
|
||||
if (iter->update_path &&
|
||||
|
@ -42,7 +42,7 @@ static inline size_t idx_to_pos(struct journal_keys *keys, size_t idx)
|
||||
|
||||
static inline struct journal_key *idx_to_key(struct journal_keys *keys, size_t idx)
|
||||
{
|
||||
return keys->d + idx_to_pos(keys, idx);
|
||||
return keys->data + idx_to_pos(keys, idx);
|
||||
}
|
||||
|
||||
static size_t __bch2_journal_key_search(struct journal_keys *keys,
|
||||
@ -182,10 +182,10 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
||||
BUG_ON(test_bit(BCH_FS_rw, &c->flags));
|
||||
|
||||
if (idx < keys->size &&
|
||||
journal_key_cmp(&n, &keys->d[idx]) == 0) {
|
||||
if (keys->d[idx].allocated)
|
||||
kfree(keys->d[idx].k);
|
||||
keys->d[idx] = n;
|
||||
journal_key_cmp(&n, &keys->data[idx]) == 0) {
|
||||
if (keys->data[idx].allocated)
|
||||
kfree(keys->data[idx].k);
|
||||
keys->data[idx] = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -198,17 +198,17 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
||||
.size = max_t(size_t, keys->size, 8) * 2,
|
||||
};
|
||||
|
||||
new_keys.d = kvmalloc_array(new_keys.size, sizeof(new_keys.d[0]), GFP_KERNEL);
|
||||
if (!new_keys.d) {
|
||||
new_keys.data = kvmalloc_array(new_keys.size, sizeof(new_keys.data[0]), GFP_KERNEL);
|
||||
if (!new_keys.data) {
|
||||
bch_err(c, "%s: error allocating new key array (size %zu)",
|
||||
__func__, new_keys.size);
|
||||
return -BCH_ERR_ENOMEM_journal_key_insert;
|
||||
}
|
||||
|
||||
/* Since @keys was full, there was no gap: */
|
||||
memcpy(new_keys.d, keys->d, sizeof(keys->d[0]) * keys->nr);
|
||||
kvfree(keys->d);
|
||||
keys->d = new_keys.d;
|
||||
memcpy(new_keys.data, keys->data, sizeof(keys->data[0]) * keys->nr);
|
||||
kvfree(keys->data);
|
||||
keys->data = new_keys.data;
|
||||
keys->nr = new_keys.nr;
|
||||
keys->size = new_keys.size;
|
||||
|
||||
@ -218,11 +218,10 @@ int bch2_journal_key_insert_take(struct bch_fs *c, enum btree_id id,
|
||||
|
||||
journal_iters_move_gap(c, keys->gap, idx);
|
||||
|
||||
move_gap(keys->d, keys->nr, keys->size, keys->gap, idx);
|
||||
keys->gap = idx;
|
||||
move_gap(keys, idx);
|
||||
|
||||
keys->nr++;
|
||||
keys->d[keys->gap++] = n;
|
||||
keys->data[keys->gap++] = n;
|
||||
|
||||
journal_iters_fix(c);
|
||||
|
||||
@ -269,10 +268,10 @@ void bch2_journal_key_overwritten(struct bch_fs *c, enum btree_id btree,
|
||||
size_t idx = bch2_journal_key_search(keys, btree, level, pos);
|
||||
|
||||
if (idx < keys->size &&
|
||||
keys->d[idx].btree_id == btree &&
|
||||
keys->d[idx].level == level &&
|
||||
bpos_eq(keys->d[idx].k->k.p, pos))
|
||||
keys->d[idx].overwritten = true;
|
||||
keys->data[idx].btree_id == btree &&
|
||||
keys->data[idx].level == level &&
|
||||
bpos_eq(keys->data[idx].k->k.p, pos))
|
||||
keys->data[idx].overwritten = true;
|
||||
}
|
||||
|
||||
static void bch2_journal_iter_advance(struct journal_iter *iter)
|
||||
@ -286,16 +285,16 @@ static void bch2_journal_iter_advance(struct journal_iter *iter)
|
||||
|
||||
static struct bkey_s_c bch2_journal_iter_peek(struct journal_iter *iter)
|
||||
{
|
||||
struct journal_key *k = iter->keys->d + iter->idx;
|
||||
struct journal_key *k = iter->keys->data + iter->idx;
|
||||
|
||||
while (k < iter->keys->d + iter->keys->size &&
|
||||
while (k < iter->keys->data + iter->keys->size &&
|
||||
k->btree_id == iter->btree_id &&
|
||||
k->level == iter->level) {
|
||||
if (!k->overwritten)
|
||||
return bkey_i_to_s_c(k->k);
|
||||
|
||||
bch2_journal_iter_advance(iter);
|
||||
k = iter->keys->d + iter->idx;
|
||||
k = iter->keys->data + iter->idx;
|
||||
}
|
||||
|
||||
return bkey_s_c_null;
|
||||
@ -467,22 +466,20 @@ static int journal_sort_key_cmp(const void *_l, const void *_r)
|
||||
void bch2_journal_keys_put(struct bch_fs *c)
|
||||
{
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
struct journal_key *i;
|
||||
|
||||
BUG_ON(atomic_read(&keys->ref) <= 0);
|
||||
|
||||
if (!atomic_dec_and_test(&keys->ref))
|
||||
return;
|
||||
|
||||
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
|
||||
keys->gap = keys->nr;
|
||||
move_gap(keys, keys->nr);
|
||||
|
||||
for (i = keys->d; i < keys->d + keys->nr; i++)
|
||||
darray_for_each(*keys, i)
|
||||
if (i->allocated)
|
||||
kfree(i->k);
|
||||
|
||||
kvfree(keys->d);
|
||||
keys->d = NULL;
|
||||
kvfree(keys->data);
|
||||
keys->data = NULL;
|
||||
keys->nr = keys->gap = keys->size = 0;
|
||||
|
||||
bch2_journal_entries_free(c);
|
||||
@ -490,62 +487,27 @@ void bch2_journal_keys_put(struct bch_fs *c)
|
||||
|
||||
static void __journal_keys_sort(struct journal_keys *keys)
|
||||
{
|
||||
struct journal_key *src, *dst;
|
||||
sort(keys->data, keys->nr, sizeof(keys->data[0]), journal_sort_key_cmp, NULL);
|
||||
|
||||
sort(keys->d, keys->nr, sizeof(keys->d[0]), journal_sort_key_cmp, NULL);
|
||||
struct journal_key *dst = keys->data;
|
||||
|
||||
src = dst = keys->d;
|
||||
while (src < keys->d + keys->nr) {
|
||||
while (src + 1 < keys->d + keys->nr &&
|
||||
darray_for_each(*keys, src) {
|
||||
if (src + 1 < &darray_top(*keys) &&
|
||||
!journal_key_cmp(src, src + 1))
|
||||
src++;
|
||||
continue;
|
||||
|
||||
*dst++ = *src++;
|
||||
*dst++ = *src;
|
||||
}
|
||||
|
||||
keys->nr = dst - keys->d;
|
||||
keys->nr = dst - keys->data;
|
||||
}
|
||||
|
||||
int bch2_journal_keys_sort(struct bch_fs *c)
|
||||
{
|
||||
struct genradix_iter iter;
|
||||
struct journal_replay *i, **_i;
|
||||
struct jset_entry *entry;
|
||||
struct bkey_i *k;
|
||||
struct journal_keys *keys = &c->journal_keys;
|
||||
size_t nr_keys = 0, nr_read = 0;
|
||||
|
||||
genradix_for_each(&c->journal_entries, iter, _i) {
|
||||
i = *_i;
|
||||
|
||||
if (!i || i->ignore)
|
||||
continue;
|
||||
|
||||
for_each_jset_key(k, entry, &i->j)
|
||||
nr_keys++;
|
||||
}
|
||||
|
||||
if (!nr_keys)
|
||||
return 0;
|
||||
|
||||
keys->size = roundup_pow_of_two(nr_keys);
|
||||
|
||||
keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
|
||||
if (!keys->d) {
|
||||
bch_err(c, "Failed to allocate buffer for sorted journal keys (%zu keys); trying slowpath",
|
||||
nr_keys);
|
||||
|
||||
do {
|
||||
keys->size >>= 1;
|
||||
keys->d = kvmalloc_array(keys->size, sizeof(keys->d[0]), GFP_KERNEL);
|
||||
} while (!keys->d && keys->size > nr_keys / 8);
|
||||
|
||||
if (!keys->d) {
|
||||
bch_err(c, "Failed to allocate %zu size buffer for sorted journal keys; exiting",
|
||||
keys->size);
|
||||
return -BCH_ERR_ENOMEM_journal_keys_sort;
|
||||
}
|
||||
}
|
||||
size_t nr_read = 0;
|
||||
|
||||
genradix_for_each(&c->journal_entries, iter, _i) {
|
||||
i = *_i;
|
||||
@ -556,17 +518,7 @@ int bch2_journal_keys_sort(struct bch_fs *c)
|
||||
cond_resched();
|
||||
|
||||
for_each_jset_key(k, entry, &i->j) {
|
||||
if (keys->nr == keys->size) {
|
||||
__journal_keys_sort(keys);
|
||||
|
||||
if (keys->nr > keys->size * 7 / 8) {
|
||||
bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu/%zu",
|
||||
keys->nr, keys->size, nr_read, nr_keys);
|
||||
return -BCH_ERR_ENOMEM_journal_keys_sort;
|
||||
}
|
||||
}
|
||||
|
||||
keys->d[keys->nr++] = (struct journal_key) {
|
||||
struct journal_key n = (struct journal_key) {
|
||||
.btree_id = entry->btree_id,
|
||||
.level = entry->level,
|
||||
.k = k,
|
||||
@ -574,6 +526,18 @@ int bch2_journal_keys_sort(struct bch_fs *c)
|
||||
.journal_offset = k->_data - i->j._data,
|
||||
};
|
||||
|
||||
if (darray_push(keys, n)) {
|
||||
__journal_keys_sort(keys);
|
||||
|
||||
if (keys->nr * 8 > keys->size * 7) {
|
||||
bch_err(c, "Too many journal keys for slowpath; have %zu compacted, buf size %zu, processed %zu keys at seq %llu",
|
||||
keys->nr, keys->size, nr_read, le64_to_cpu(i->j.seq));
|
||||
return -BCH_ERR_ENOMEM_journal_keys_sort;
|
||||
}
|
||||
|
||||
BUG_ON(darray_push(keys, n));
|
||||
}
|
||||
|
||||
nr_read++;
|
||||
}
|
||||
}
|
||||
@ -581,6 +545,6 @@ int bch2_journal_keys_sort(struct bch_fs *c)
|
||||
__journal_keys_sort(keys);
|
||||
keys->gap = keys->nr;
|
||||
|
||||
bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_keys, keys->nr);
|
||||
bch_verbose(c, "Journal keys: %zu read, %zu after sorting and compacting", nr_read, keys->nr);
|
||||
return 0;
|
||||
}
|
||||
|
@ -848,7 +848,7 @@ __bch2_fs_log_msg(struct bch_fs *c, unsigned commit_flags, const char *fmt,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (!test_bit(JOURNAL_RUNNING, &c->journal.flags)) {
|
||||
if (!test_bit(JOURNAL_STARTED, &c->journal.flags)) {
|
||||
ret = darray_make_room(&c->journal.early_journal_entries, jset_u64s(u64s));
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -574,8 +574,6 @@ void bch2_journal_keys_to_write_buffer_end(struct bch_fs *c, struct journal_keys
|
||||
static int bch2_journal_keys_to_write_buffer(struct bch_fs *c, struct journal_buf *buf)
|
||||
{
|
||||
struct journal_keys_to_wb dst;
|
||||
struct jset_entry *entry;
|
||||
struct bkey_i *k;
|
||||
int ret = 0;
|
||||
|
||||
bch2_journal_keys_to_write_buffer_start(c, &dst, le64_to_cpu(buf->data->seq));
|
||||
|
@ -1183,13 +1183,12 @@ void bch2_fs_journal_stop(struct journal *j)
|
||||
bch2_journal_meta(j);
|
||||
|
||||
journal_quiesce(j);
|
||||
cancel_delayed_work_sync(&j->write_work);
|
||||
|
||||
BUG_ON(!bch2_journal_error(j) &&
|
||||
test_bit(JOURNAL_REPLAY_DONE, &j->flags) &&
|
||||
j->last_empty_seq != journal_cur_seq(j));
|
||||
|
||||
clear_bit(JOURNAL_RUNNING, &j->flags);
|
||||
cancel_delayed_work_sync(&j->write_work);
|
||||
}
|
||||
|
||||
int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
|
||||
@ -1263,7 +1262,7 @@ int bch2_fs_journal_start(struct journal *j, u64 cur_seq)
|
||||
|
||||
spin_lock(&j->lock);
|
||||
|
||||
set_bit(JOURNAL_RUNNING, &j->flags);
|
||||
set_bit(JOURNAL_STARTED, &j->flags);
|
||||
j->last_flush_write = jiffies;
|
||||
|
||||
j->reservations.idx = j->reservations.unwritten_idx = journal_cur_seq(j);
|
||||
|
@ -372,7 +372,7 @@ static inline int bch2_journal_res_get(struct journal *j, struct journal_res *re
|
||||
int ret;
|
||||
|
||||
EBUG_ON(res->ref);
|
||||
EBUG_ON(!test_bit(JOURNAL_RUNNING, &j->flags));
|
||||
EBUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
|
||||
|
||||
res->u64s = u64s;
|
||||
|
||||
@ -418,7 +418,7 @@ struct bch_dev;
|
||||
|
||||
static inline void bch2_journal_set_replay_done(struct journal *j)
|
||||
{
|
||||
BUG_ON(!test_bit(JOURNAL_RUNNING, &j->flags));
|
||||
BUG_ON(!test_bit(JOURNAL_STARTED, &j->flags));
|
||||
set_bit(JOURNAL_REPLAY_DONE, &j->flags);
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,6 @@ static void bch2_journal_replay_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
|
||||
bch2_journal_ptrs_to_text(out, c, j);
|
||||
|
||||
struct jset_entry *entry;
|
||||
for_each_jset_entry_type(entry, &j->j, BCH_JSET_ENTRY_datetime) {
|
||||
struct jset_entry_datetime *datetime =
|
||||
container_of(entry, struct jset_entry_datetime, entry);
|
||||
@ -394,7 +393,6 @@ static int journal_entry_btree_keys_validate(struct bch_fs *c,
|
||||
static void journal_entry_btree_keys_to_text(struct printbuf *out, struct bch_fs *c,
|
||||
struct jset_entry *entry)
|
||||
{
|
||||
struct bkey_i *k;
|
||||
bool first = true;
|
||||
|
||||
jset_entry_for_each_key(entry, k) {
|
||||
@ -1814,7 +1812,6 @@ static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w)
|
||||
if (!wb.wb)
|
||||
bch2_journal_keys_to_write_buffer_start(c, &wb, seq);
|
||||
|
||||
struct bkey_i *k;
|
||||
jset_entry_for_each_key(i, k) {
|
||||
ret = bch2_journal_key_to_wb(c, &wb, i->btree_id, k);
|
||||
if (ret) {
|
||||
|
@ -39,12 +39,12 @@ static inline struct jset_entry *__jset_entry_type_next(struct jset *jset,
|
||||
}
|
||||
|
||||
#define for_each_jset_entry_type(entry, jset, type) \
|
||||
for (entry = (jset)->start; \
|
||||
for (struct jset_entry *entry = (jset)->start; \
|
||||
(entry = __jset_entry_type_next(jset, entry, type)); \
|
||||
entry = vstruct_next(entry))
|
||||
|
||||
#define jset_entry_for_each_key(_e, _k) \
|
||||
for (_k = (_e)->start; \
|
||||
for (struct bkey_i *_k = (_e)->start; \
|
||||
_k < vstruct_last(_e); \
|
||||
_k = bkey_next(_k))
|
||||
|
||||
|
@ -831,7 +831,7 @@ bool bch2_journal_flush_pins(struct journal *j, u64 seq_to_flush)
|
||||
/* time_stats this */
|
||||
bool did_work = false;
|
||||
|
||||
if (!test_bit(JOURNAL_RUNNING, &j->flags))
|
||||
if (!test_bit(JOURNAL_STARTED, &j->flags))
|
||||
return false;
|
||||
|
||||
closure_wait_event(&j->async_wait,
|
||||
|
@ -131,7 +131,7 @@ enum journal_space_from {
|
||||
|
||||
enum journal_flags {
|
||||
JOURNAL_REPLAY_DONE,
|
||||
JOURNAL_RUNNING,
|
||||
JOURNAL_STARTED,
|
||||
JOURNAL_MAY_SKIP_FLUSH,
|
||||
JOURNAL_NEED_FLUSH_WRITE,
|
||||
};
|
||||
|
@ -57,8 +57,8 @@ static void drop_alloc_keys(struct journal_keys *keys)
|
||||
size_t src, dst;
|
||||
|
||||
for (src = 0, dst = 0; src < keys->nr; src++)
|
||||
if (!btree_id_is_alloc(keys->d[src].btree_id))
|
||||
keys->d[dst++] = keys->d[src];
|
||||
if (!btree_id_is_alloc(keys->data[src].btree_id))
|
||||
keys->data[dst++] = keys->data[src];
|
||||
|
||||
keys->nr = dst;
|
||||
}
|
||||
@ -70,9 +70,7 @@ static void drop_alloc_keys(struct journal_keys *keys)
|
||||
*/
|
||||
static void zero_out_btree_mem_ptr(struct journal_keys *keys)
|
||||
{
|
||||
struct journal_key *i;
|
||||
|
||||
for (i = keys->d; i < keys->d + keys->nr; i++)
|
||||
darray_for_each(*keys, i)
|
||||
if (i->k->k.type == KEY_TYPE_btree_ptr_v2)
|
||||
bkey_i_to_btree_ptr_v2(i->k)->v.mem_ptr = 0;
|
||||
}
|
||||
@ -166,11 +164,9 @@ static int bch2_journal_replay(struct bch_fs *c)
|
||||
* efficient - better locality of btree access - but some might fail if
|
||||
* that would cause a journal deadlock.
|
||||
*/
|
||||
for (size_t i = 0; i < keys->nr; i++) {
|
||||
darray_for_each(*keys, k) {
|
||||
cond_resched();
|
||||
|
||||
struct journal_key *k = keys->d + i;
|
||||
|
||||
/* Skip fastpath if we're low on space in the journal */
|
||||
ret = c->journal.watermark ? -1 :
|
||||
commit_do(trans, NULL, NULL,
|
||||
@ -524,8 +520,7 @@ static int bch2_set_may_go_rw(struct bch_fs *c)
|
||||
* setting journal_key->overwritten: it will be accessed by multiple
|
||||
* threads
|
||||
*/
|
||||
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
|
||||
keys->gap = keys->nr;
|
||||
move_gap(keys, keys->nr);
|
||||
|
||||
set_bit(BCH_FS_may_go_rw, &c->flags);
|
||||
|
||||
|
@ -286,13 +286,8 @@ static void __bch2_fs_read_only(struct bch_fs *c)
|
||||
if (test_bit(JOURNAL_REPLAY_DONE, &c->journal.flags) &&
|
||||
!test_bit(BCH_FS_emergency_ro, &c->flags))
|
||||
set_bit(BCH_FS_clean_shutdown, &c->flags);
|
||||
|
||||
bch2_fs_journal_stop(&c->journal);
|
||||
|
||||
bch_info(c, "%sshutdown complete, journal seq %llu",
|
||||
test_bit(BCH_FS_clean_shutdown, &c->flags) ? "" : "un",
|
||||
c->journal.seq_ondisk);
|
||||
|
||||
/*
|
||||
* After stopping journal:
|
||||
*/
|
||||
@ -466,7 +461,6 @@ static int __bch2_fs_read_write(struct bch_fs *c, bool early)
|
||||
* at least one non-flush write in the journal or recovery will fail:
|
||||
*/
|
||||
set_bit(JOURNAL_NEED_FLUSH_WRITE, &c->journal.flags);
|
||||
set_bit(JOURNAL_RUNNING, &c->journal.flags);
|
||||
|
||||
for_each_rw_member(c, ca)
|
||||
bch2_dev_allocator_add(c, ca);
|
||||
|
@ -289,7 +289,7 @@ int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigne
|
||||
do {
|
||||
nr_entries = stack_trace_save_tsk(task, stack->data, stack->size, skipnr + 1);
|
||||
} while (nr_entries == stack->size &&
|
||||
!(ret = darray_make_room(stack, stack->size * 2)));
|
||||
!(ret = darray_make_room_gfp(stack, stack->size * 2, gfp)));
|
||||
|
||||
stack->nr = nr_entries;
|
||||
up_read(&task->signal->exec_update_lock);
|
||||
|
@ -651,8 +651,12 @@ static inline void __move_gap(void *array, size_t element_size,
|
||||
}
|
||||
|
||||
/* Move the gap in a gap buffer: */
|
||||
#define move_gap(_array, _nr, _size, _old_gap, _new_gap) \
|
||||
__move_gap(_array, sizeof(_array[0]), _nr, _size, _old_gap, _new_gap)
|
||||
#define move_gap(_d, _new_gap) \
|
||||
do { \
|
||||
__move_gap((_d)->data, sizeof((_d)->data[0]), \
|
||||
(_d)->nr, (_d)->size, (_d)->gap, _new_gap); \
|
||||
(_d)->gap = _new_gap; \
|
||||
} while (0)
|
||||
|
||||
#define bubble_sort(_base, _nr, _cmp) \
|
||||
do { \
|
||||
|
Loading…
Reference in New Issue
Block a user