mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-02 00:00:03 +03:00
Update bcachefs sources to 0cd3e1d27a bcachefs: Fix for bch2_bkey_pack_pos() not initializing len/version fields
This commit is contained in:
parent
8f72e6940c
commit
2e2d5a3f7e
@ -1 +1 @@
|
||||
ae6f512de8cdd129ce873e14eab84b8e0746daed
|
||||
0cd3e1d27a6252b6c0cc32f237c2b2414540e2e8
|
||||
|
@ -29,10 +29,15 @@
|
||||
__ret_warn_on; \
|
||||
})
|
||||
|
||||
#define __WARN() \
|
||||
do { \
|
||||
fprintf(stderr, "WARNING at " __FILE__ ":%d\n", __LINE__); \
|
||||
} while (0)
|
||||
|
||||
#define WARN_ON(cond) ({ \
|
||||
int __ret_warn_on = unlikely(!!(cond)); \
|
||||
if (__ret_warn_on) \
|
||||
fprintf(stderr, "WARNING at " __FILE__ ":%d\n", __LINE__);\
|
||||
__WARN(); \
|
||||
__ret_warn_on; \
|
||||
})
|
||||
|
||||
@ -42,8 +47,7 @@
|
||||
int __ret_warn_on = unlikely(!!(cond)); \
|
||||
if (__ret_warn_on && !__warned) { \
|
||||
__warned = true; \
|
||||
fprintf(stderr, "WARNING at " __FILE__ ":%d: " fmt "\n",\
|
||||
__LINE__, ##__VA_ARGS__); \
|
||||
__WARN(); \
|
||||
} \
|
||||
__ret_warn_on; \
|
||||
})
|
||||
@ -53,7 +57,7 @@
|
||||
int __ret_warn_on = unlikely(!!(cond)); \
|
||||
if (__ret_warn_on && !__warned) { \
|
||||
__warned = true; \
|
||||
fprintf(stderr, "WARNING at " __FILE__ ":%d\n", __LINE__);\
|
||||
__WARN(); \
|
||||
} \
|
||||
__ret_warn_on; \
|
||||
})
|
||||
|
@ -443,8 +443,15 @@ enum bkey_pack_pos_ret bch2_bkey_pack_pos_lossy(struct bkey_packed *out,
|
||||
struct bpos orig = in;
|
||||
#endif
|
||||
bool exact = true;
|
||||
unsigned i;
|
||||
|
||||
out->_data[0] = 0;
|
||||
/*
|
||||
* bch2_bkey_pack_key() will write to all of f->key_u64s, minus the 3
|
||||
* byte header, but pack_pos() won't if the len/version fields are big
|
||||
* enough - we need to make sure to zero them out:
|
||||
*/
|
||||
for (i = 0; i < f->key_u64s; i++)
|
||||
out->_data[i] = 0;
|
||||
|
||||
if (unlikely(in.snapshot <
|
||||
le64_to_cpu(f->field_offset[BKEY_FIELD_SNAPSHOT]))) {
|
||||
|
@ -1193,7 +1193,7 @@ static struct bkey_packed *bset_search_write_set(const struct btree *b,
|
||||
|
||||
static inline void prefetch_four_cachelines(void *p)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
#if (CONFIG_X86_64 && !defined(__clang__))
|
||||
asm(".intel_syntax noprefix;"
|
||||
"prefetcht0 [%0 - 127 + 64 * 0];"
|
||||
"prefetcht0 [%0 - 127 + 64 * 1];"
|
||||
|
@ -424,18 +424,38 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p = { 0 };
|
||||
bool do_update = false;
|
||||
char buf[200];
|
||||
int ret = 0;
|
||||
|
||||
bkey_for_each_ptr_decode(k->k, ptrs, p, entry) {
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
|
||||
struct bucket *g = PTR_BUCKET(ca, &p.ptr, true);
|
||||
struct bucket *g2 = PTR_BUCKET(ca, &p.ptr, false);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry->ptr);
|
||||
|
||||
if (fsck_err_on(g->mark.data_type &&
|
||||
g->mark.data_type != data_type, c,
|
||||
"bucket %u:%zu different types of data in same bucket: %s, %s\n"
|
||||
"while marking %s",
|
||||
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
|
||||
bch2_data_types[g->mark.data_type],
|
||||
bch2_data_types[data_type],
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
|
||||
if (data_type == BCH_DATA_btree) {
|
||||
g2->_mark.data_type = g->_mark.data_type = data_type;
|
||||
set_bit(BCH_FS_NEED_ALLOC_WRITE, &c->flags);
|
||||
} else {
|
||||
do_update = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (fsck_err_on(!g->gen_valid, c,
|
||||
"bucket %u:%zu data type %s ptr gen %u missing in alloc btree",
|
||||
"bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
|
||||
"while marking %s",
|
||||
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
|
||||
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
|
||||
p.ptr.gen)) {
|
||||
p.ptr.gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
|
||||
if (!p.ptr.cached) {
|
||||
g2->_mark.gen = g->_mark.gen = p.ptr.gen;
|
||||
g2->gen_valid = g->gen_valid = true;
|
||||
@ -446,10 +466,12 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
}
|
||||
|
||||
if (fsck_err_on(gen_cmp(p.ptr.gen, g->mark.gen) > 0, c,
|
||||
"bucket %u:%zu data type %s ptr gen in the future: %u > %u",
|
||||
"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
|
||||
"while marking %s",
|
||||
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
|
||||
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
|
||||
p.ptr.gen, g->mark.gen)) {
|
||||
p.ptr.gen, g->mark.gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf))) {
|
||||
if (!p.ptr.cached) {
|
||||
g2->_mark.gen = g->_mark.gen = p.ptr.gen;
|
||||
g2->gen_valid = g->gen_valid = true;
|
||||
@ -465,23 +487,29 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
|
||||
if (fsck_err_on(!p.ptr.cached &&
|
||||
gen_cmp(p.ptr.gen, g->mark.gen) < 0, c,
|
||||
"bucket %u:%zu data type %s stale dirty ptr: %u < %u",
|
||||
"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
|
||||
"while marking %s",
|
||||
p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
|
||||
bch2_data_types[ptr_data_type(k->k, &p.ptr)],
|
||||
p.ptr.gen, g->mark.gen))
|
||||
p.ptr.gen, g->mark.gen,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
|
||||
do_update = true;
|
||||
|
||||
if (p.has_ec) {
|
||||
struct stripe *m = genradix_ptr(&c->stripes[true], p.ec.idx);
|
||||
|
||||
if (fsck_err_on(!m || !m->alive, c,
|
||||
"pointer to nonexistent stripe %llu",
|
||||
(u64) p.ec.idx))
|
||||
"pointer to nonexistent stripe %llu\n"
|
||||
"while marking %s",
|
||||
(u64) p.ec.idx,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
|
||||
do_update = true;
|
||||
|
||||
if (fsck_err_on(!bch2_ptr_matches_stripe_m(m, p), c,
|
||||
"pointer does not match stripe %llu",
|
||||
(u64) p.ec.idx))
|
||||
"pointer does not match stripe %llu\n"
|
||||
"while marking %s",
|
||||
(u64) p.ec.idx,
|
||||
(bch2_bkey_val_to_text(&PBUF(buf), c, *k), buf)))
|
||||
do_update = true;
|
||||
}
|
||||
}
|
||||
@ -522,11 +550,14 @@ static int bch2_check_fix_ptrs(struct bch_fs *c, enum btree_id btree_id,
|
||||
bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
|
||||
struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
|
||||
struct bucket *g = PTR_BUCKET(ca, ptr, true);
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
|
||||
|
||||
(ptr->cached &&
|
||||
(!g->gen_valid || gen_cmp(ptr->gen, g->mark.gen) > 0)) ||
|
||||
(!ptr->cached &&
|
||||
gen_cmp(ptr->gen, g->mark.gen) < 0);
|
||||
gen_cmp(ptr->gen, g->mark.gen) < 0) ||
|
||||
(g->mark.data_type &&
|
||||
g->mark.data_type != data_type);
|
||||
}));
|
||||
again:
|
||||
ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
|
||||
|
@ -1891,6 +1891,7 @@ static inline void bch2_btree_iter_init(struct btree_trans *trans,
|
||||
iter->trans = trans;
|
||||
iter->uptodate = BTREE_ITER_NEED_TRAVERSE;
|
||||
iter->btree_id = btree_id;
|
||||
iter->real_pos = POS_MIN;
|
||||
iter->level = 0;
|
||||
iter->min_depth = 0;
|
||||
iter->locks_want = 0;
|
||||
|
@ -806,13 +806,13 @@ static int extent_update_to_keys(struct btree_trans *trans,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (bkey_deleted(&n.k->k))
|
||||
return 0;
|
||||
|
||||
ret = bch2_extent_can_insert(trans, n.iter, n.k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (bkey_deleted(&n.k->k))
|
||||
return 0;
|
||||
|
||||
n.iter = bch2_trans_get_iter(trans, n.iter->btree_id, n.k->k.p,
|
||||
BTREE_ITER_INTENT|
|
||||
BTREE_ITER_NOT_EXTENTS);
|
||||
|
@ -1266,14 +1266,15 @@ int bch2_mark_update(struct btree_trans *trans,
|
||||
|
||||
static noinline __cold
|
||||
void fs_usage_apply_warn(struct btree_trans *trans,
|
||||
unsigned disk_res_sectors)
|
||||
unsigned disk_res_sectors,
|
||||
s64 should_not_have_added)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_insert_entry *i;
|
||||
char buf[200];
|
||||
|
||||
bch_err(c, "disk usage increased more than %u sectors reserved",
|
||||
disk_res_sectors);
|
||||
bch_err(c, "disk usage increased %lli more than %u sectors reserved",
|
||||
should_not_have_added, disk_res_sectors);
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
pr_err("while inserting");
|
||||
@ -1305,6 +1306,7 @@ void fs_usage_apply_warn(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
}
|
||||
__WARN();
|
||||
}
|
||||
|
||||
void bch2_trans_fs_usage_apply(struct btree_trans *trans,
|
||||
@ -1363,7 +1365,7 @@ void bch2_trans_fs_usage_apply(struct btree_trans *trans,
|
||||
preempt_enable();
|
||||
|
||||
if (unlikely(warn) && !xchg(&warned_disk_usage, 1))
|
||||
fs_usage_apply_warn(trans, disk_res_sectors);
|
||||
fs_usage_apply_warn(trans, disk_res_sectors, should_not_have_added);
|
||||
}
|
||||
|
||||
/* trans_mark: */
|
||||
@ -1642,8 +1644,8 @@ static int bch2_trans_mark_stripe(struct btree_trans *trans,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
{
|
||||
struct bkey_s_c_stripe old_s = { NULL };
|
||||
struct bkey_s_c_stripe new_s = { NULL };
|
||||
struct bkey_s_c_stripe old_s = { .k = NULL };
|
||||
struct bkey_s_c_stripe new_s = { .k = NULL };
|
||||
struct bch_replicas_padded r;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
@ -1797,7 +1799,9 @@ static int bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
unsigned front_frag, back_frag;
|
||||
s64 ret = 0;
|
||||
|
||||
sectors = abs(sectors);
|
||||
if (sectors < 0)
|
||||
sectors = -sectors;
|
||||
|
||||
BUG_ON(offset + sectors > p.k->size);
|
||||
|
||||
front_frag = offset;
|
||||
|
@ -529,6 +529,30 @@ static inline struct bch_devs_list bch2_bkey_cached_devs(struct bkey_s_c k)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned bch2_bkey_ptr_data_type(struct bkey_s_c k, const struct bch_extent_ptr *ptr)
|
||||
{
|
||||
switch (k.k->type) {
|
||||
case KEY_TYPE_btree_ptr:
|
||||
case KEY_TYPE_btree_ptr_v2:
|
||||
return BCH_DATA_btree;
|
||||
case KEY_TYPE_extent:
|
||||
case KEY_TYPE_reflink_v:
|
||||
return BCH_DATA_user;
|
||||
case KEY_TYPE_stripe: {
|
||||
struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
|
||||
|
||||
BUG_ON(ptr < s.v->ptrs ||
|
||||
ptr >= s.v->ptrs + s.v->nr_blocks);
|
||||
|
||||
return ptr >= s.v->ptrs + s.v->nr_blocks - s.v->nr_redundant
|
||||
? BCH_DATA_parity
|
||||
: BCH_DATA_user;
|
||||
}
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
unsigned bch2_bkey_nr_ptrs(struct bkey_s_c);
|
||||
unsigned bch2_bkey_nr_ptrs_allocated(struct bkey_s_c);
|
||||
unsigned bch2_bkey_nr_ptrs_fully_allocated(struct bkey_s_c);
|
||||
|
@ -1161,7 +1161,8 @@ static int add_nlink(struct nlink_table *t, u64 inum, u32 snapshot)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memcpy(d, t->d, t->size * sizeof(t->d[0]));
|
||||
if (t->d)
|
||||
memcpy(d, t->d, t->size * sizeof(t->d[0]));
|
||||
kvfree(t->d);
|
||||
|
||||
t->d = d;
|
||||
|
@ -478,7 +478,7 @@ struct btree_iter *bch2_inode_create(struct btree_trans *trans,
|
||||
struct btree_iter *iter = NULL;
|
||||
struct bkey_s_c k;
|
||||
u64 min, max, start, pos, *hint;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
u64 cpu = raw_smp_processor_id();
|
||||
unsigned bits = (c->opts.inodes_32bit
|
||||
|
@ -120,7 +120,7 @@ void bch2_latency_acct(struct bch_dev *ca, u64 submit_time, int rw)
|
||||
* the time:
|
||||
*/
|
||||
if (abs((int) (old - io_latency)) < (old >> 1) &&
|
||||
now & ~(~0 << 5))
|
||||
now & ~(~0U << 5))
|
||||
break;
|
||||
|
||||
new = ewma_add(old, io_latency, 5);
|
||||
|
@ -805,8 +805,10 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
||||
long b;
|
||||
|
||||
if (new_fs) {
|
||||
percpu_down_read(&c->mark_lock);
|
||||
b = bch2_bucket_alloc_new_fs(ca);
|
||||
if (b < 0) {
|
||||
percpu_up_read(&c->mark_lock);
|
||||
ret = -ENOSPC;
|
||||
goto err;
|
||||
}
|
||||
@ -821,11 +823,10 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
||||
}
|
||||
|
||||
b = sector_to_bucket(ca, ob->ptr.offset);
|
||||
|
||||
percpu_down_read(&c->mark_lock);
|
||||
spin_lock(&c->journal.lock);
|
||||
}
|
||||
|
||||
spin_lock(&c->journal.lock);
|
||||
|
||||
/*
|
||||
* XXX
|
||||
* For resize at runtime, we should be writing the new
|
||||
@ -851,15 +852,15 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
||||
if (pos <= ja->cur_idx)
|
||||
ja->cur_idx = (ja->cur_idx + 1) % ja->nr;
|
||||
|
||||
spin_unlock(&c->journal.lock);
|
||||
|
||||
if (new_fs) {
|
||||
bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
|
||||
ca->mi.bucket_size,
|
||||
gc_phase(GC_PHASE_SB),
|
||||
0);
|
||||
} else {
|
||||
spin_unlock(&c->journal.lock);
|
||||
percpu_up_read(&c->mark_lock);
|
||||
|
||||
} else {
|
||||
ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_NOFAIL,
|
||||
bch2_trans_mark_metadata_bucket(&trans, ca,
|
||||
b, BCH_DATA_journal,
|
||||
|
@ -2,6 +2,7 @@
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "btree_update.h"
|
||||
#include "buckets.h"
|
||||
#include "extents.h"
|
||||
#include "inode.h"
|
||||
#include "io.h"
|
||||
@ -224,6 +225,8 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
BTREE_ITER_INTENT);
|
||||
|
||||
while (ret == 0 || ret == -EINTR) {
|
||||
struct disk_reservation disk_res = { 0 };
|
||||
|
||||
bch2_trans_begin(&trans);
|
||||
|
||||
if (fatal_signal_pending(current)) {
|
||||
@ -287,8 +290,9 @@ s64 bch2_remap_range(struct bch_fs *c,
|
||||
dst_end.offset - dst_iter->pos.offset));
|
||||
|
||||
ret = bch2_extent_update(&trans, dst_iter, new_dst.k,
|
||||
NULL, journal_seq,
|
||||
&disk_res, journal_seq,
|
||||
new_i_size, i_sectors_delta);
|
||||
bch2_disk_reservation_put(c, &disk_res);
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
|
@ -33,10 +33,11 @@ bch2_str_hash_opt_to_type(struct bch_fs *c, enum bch_str_hash_opts opt)
|
||||
|
||||
struct bch_hash_info {
|
||||
u8 type;
|
||||
union {
|
||||
__le64 crc_key;
|
||||
SIPHASH_KEY siphash_key;
|
||||
};
|
||||
/*
|
||||
* For crc32 or crc64 string hashes the first key value of
|
||||
* the siphash_key (k0) is used as the key.
|
||||
*/
|
||||
SIPHASH_KEY siphash_key;
|
||||
};
|
||||
|
||||
static inline struct bch_hash_info
|
||||
@ -46,7 +47,7 @@ bch2_hash_info_init(struct bch_fs *c, const struct bch_inode_unpacked *bi)
|
||||
struct bch_hash_info info = {
|
||||
.type = (bi->bi_flags >> INODE_STR_HASH_OFFSET) &
|
||||
~(~0U << INODE_STR_HASH_BITS),
|
||||
.crc_key = bi->bi_hash_seed,
|
||||
.siphash_key = { .k0 = bi->bi_hash_seed }
|
||||
};
|
||||
|
||||
if (unlikely(info.type == BCH_STR_HASH_SIPHASH_OLD)) {
|
||||
@ -76,10 +77,12 @@ static inline void bch2_str_hash_init(struct bch_str_hash_ctx *ctx,
|
||||
{
|
||||
switch (info->type) {
|
||||
case BCH_STR_HASH_CRC32C:
|
||||
ctx->crc32c = crc32c(~0, &info->crc_key, sizeof(info->crc_key));
|
||||
ctx->crc32c = crc32c(~0, &info->siphash_key.k0,
|
||||
sizeof(info->siphash_key.k0));
|
||||
break;
|
||||
case BCH_STR_HASH_CRC64:
|
||||
ctx->crc64 = crc64_be(~0, &info->crc_key, sizeof(info->crc_key));
|
||||
ctx->crc64 = crc64_be(~0, &info->siphash_key.k0,
|
||||
sizeof(info->siphash_key.k0));
|
||||
break;
|
||||
case BCH_STR_HASH_SIPHASH_OLD:
|
||||
case BCH_STR_HASH_SIPHASH:
|
||||
|
Loading…
Reference in New Issue
Block a user