mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-23 00:00:02 +03:00
Update bcachefs sources to b0788c47d9 bcachefs: Fix check_version_upgrade()
This commit is contained in:
parent
bcee0320dc
commit
f3976e3733
@ -1 +1 @@
|
||||
717b356d1dfdf178ac46e217c81bb710b7e77032
|
||||
b0788c47d97935856809bd1357423978dbfcdf9f
|
||||
|
@ -26,6 +26,9 @@ struct page;
|
||||
#define kmap_atomic(page) page_address(page)
|
||||
#define kunmap_atomic(addr) do {} while (0)
|
||||
|
||||
#define kmap_local_page(page) page_address(page)
|
||||
#define kunmap_local(addr) do {} while (0)
|
||||
|
||||
#define PageHighMem(page) false
|
||||
|
||||
static const char zero_page[PAGE_SIZE];
|
||||
|
@ -105,7 +105,7 @@ struct write_point {
|
||||
struct dev_stripe_state stripe;
|
||||
|
||||
u64 sectors_allocated;
|
||||
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
|
||||
} __aligned(SMP_CACHE_BYTES);
|
||||
|
||||
struct {
|
||||
struct work_struct index_update_work;
|
||||
@ -116,7 +116,7 @@ struct write_point {
|
||||
enum write_point_state state;
|
||||
u64 last_state_change;
|
||||
u64 time[WRITE_POINT_STATE_NR];
|
||||
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
|
||||
} __aligned(SMP_CACHE_BYTES);
|
||||
};
|
||||
|
||||
struct write_point_specifier {
|
||||
|
@ -536,7 +536,7 @@ int bch2_check_topology(struct bch_fs *c)
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
for (i = 0; i < btree_id_nr_alive(c)&& !ret; i++) {
|
||||
for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
|
||||
struct btree_root *r = bch2_btree_id_root(c, i);
|
||||
|
||||
if (!r->alive)
|
||||
|
@ -143,8 +143,8 @@ enum btree_write_flags {
|
||||
__BTREE_WRITE_ONLY_IF_NEED = BTREE_WRITE_TYPE_BITS,
|
||||
__BTREE_WRITE_ALREADY_STARTED,
|
||||
};
|
||||
#define BTREE_WRITE_ONLY_IF_NEED (1U << __BTREE_WRITE_ONLY_IF_NEED )
|
||||
#define BTREE_WRITE_ALREADY_STARTED (1U << __BTREE_WRITE_ALREADY_STARTED)
|
||||
#define BTREE_WRITE_ONLY_IF_NEED BIT(__BTREE_WRITE_ONLY_IF_NEED)
|
||||
#define BTREE_WRITE_ALREADY_STARTED BIT(__BTREE_WRITE_ALREADY_STARTED)
|
||||
|
||||
void __bch2_btree_node_write(struct bch_fs *, struct btree *, unsigned);
|
||||
void bch2_btree_node_write(struct bch_fs *, struct btree *,
|
||||
|
@ -1008,7 +1008,7 @@ retry_all:
|
||||
/*
|
||||
* We used to assert that all paths had been traversed here
|
||||
* (path->uptodate < BTREE_ITER_NEED_TRAVERSE); however, since
|
||||
* path->Should_be_locked is not set yet, we we might have unlocked and
|
||||
* path->should_be_locked is not set yet, we might have unlocked and
|
||||
* then failed to relock a path - that's fine.
|
||||
*/
|
||||
err:
|
||||
@ -2738,9 +2738,9 @@ void bch2_trans_node_iter_init(struct btree_trans *trans,
|
||||
unsigned depth,
|
||||
unsigned flags)
|
||||
{
|
||||
flags |= BTREE_ITER_NOT_EXTENTS;
|
||||
flags |= __BTREE_ITER_ALL_SNAPSHOTS;
|
||||
flags |= BTREE_ITER_ALL_SNAPSHOTS;
|
||||
flags |= BTREE_ITER_NOT_EXTENTS;
|
||||
flags |= __BTREE_ITER_ALL_SNAPSHOTS;
|
||||
flags |= BTREE_ITER_ALL_SNAPSHOTS;
|
||||
|
||||
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
|
||||
__bch2_btree_iter_flags(trans, btree_id, flags),
|
||||
|
@ -268,10 +268,10 @@ static inline struct bkey_i *__bch2_bkey_get_mut_noupdate(struct btree_trans *tr
|
||||
{
|
||||
struct bkey_s_c k = __bch2_bkey_get_iter(trans, iter,
|
||||
btree_id, pos, flags|BTREE_ITER_INTENT, type);
|
||||
struct bkey_i *ret = unlikely(IS_ERR(k.k))
|
||||
struct bkey_i *ret = IS_ERR(k.k)
|
||||
? ERR_CAST(k.k)
|
||||
: __bch2_bkey_make_mut_noupdate(trans, k, 0, min_bytes);
|
||||
if (unlikely(IS_ERR(ret)))
|
||||
if (IS_ERR(ret))
|
||||
bch2_trans_iter_exit(trans, iter);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1924,6 +1924,7 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
|
@ -17,7 +17,7 @@ int __init bch2_chardev_init(void);
|
||||
static inline long bch2_fs_ioctl(struct bch_fs *c,
|
||||
unsigned cmd, void __user * arg)
|
||||
{
|
||||
return -ENOSYS;
|
||||
return -ENOTTY;
|
||||
}
|
||||
|
||||
static inline void bch2_fs_chardev_exit(struct bch_fs *c) {}
|
||||
|
@ -265,9 +265,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
__bio_for_each_segment(bv, bio, *iter, *iter) {
|
||||
void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
|
||||
void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
|
||||
|
||||
bch2_checksum_update(&state, p, bv.bv_len);
|
||||
kunmap_atomic(p);
|
||||
kunmap_local(p);
|
||||
}
|
||||
#else
|
||||
__bio_for_each_bvec(bv, bio, *iter, *iter)
|
||||
@ -287,10 +288,10 @@ static struct bch_csum __bch2_checksum_bio(struct bch_fs *c, unsigned type,
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
__bio_for_each_segment(bv, bio, *iter, *iter) {
|
||||
void *p = kmap_atomic(bv.bv_page) + bv.bv_offset;
|
||||
void *p = kmap_local_page(bv.bv_page) + bv.bv_offset;
|
||||
|
||||
crypto_shash_update(desc, p, bv.bv_len);
|
||||
kunmap_atomic(p);
|
||||
kunmap_local(p);
|
||||
}
|
||||
#else
|
||||
__bio_for_each_bvec(bv, bio, *iter, *iter)
|
||||
@ -427,8 +428,9 @@ int bch2_rechecksum_bio(struct bch_fs *c, struct bio *bio,
|
||||
extent_nonce(version, crc_old), bio);
|
||||
|
||||
if (bch2_crc_cmp(merged, crc_old.csum) && !c->opts.no_data_io) {
|
||||
bch_err(c, "checksum error in bch2_rechecksum_bio() (memory corruption or bug?)\n"
|
||||
bch_err(c, "checksum error in %s() (memory corruption or bug?)\n"
|
||||
"expected %0llx:%0llx got %0llx:%0llx (old type %s new type %s)",
|
||||
__func__,
|
||||
crc_old.csum.hi,
|
||||
crc_old.csum.lo,
|
||||
merged.hi,
|
||||
|
@ -643,7 +643,8 @@ static int __bch2_fs_compress_init(struct bch_fs *c, u64 features)
|
||||
static u64 compression_opt_to_feature(unsigned v)
|
||||
{
|
||||
unsigned type = bch2_compression_decode(v).type;
|
||||
return 1ULL << bch2_compression_opt_to_feature[type];
|
||||
|
||||
return BIT_ULL(bch2_compression_opt_to_feature[type]);
|
||||
}
|
||||
|
||||
int bch2_fs_compress_init(struct bch_fs *c)
|
||||
|
@ -517,7 +517,7 @@ static void bch2_extent_crc_pack(union bch_extent_crc *dst,
|
||||
switch (type) {
|
||||
case BCH_EXTENT_ENTRY_crc32:
|
||||
set_common_fields(dst->crc32, src);
|
||||
dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
|
||||
dst->crc32.csum = (u32 __force) *((__le32 *) &src.csum.lo);
|
||||
break;
|
||||
case BCH_EXTENT_ENTRY_crc64:
|
||||
set_common_fields(dst->crc64, src);
|
||||
@ -915,11 +915,11 @@ bool bch2_extents_match(struct bkey_s_c k1, struct bkey_s_c k2)
|
||||
|
||||
bkey_for_each_ptr_decode(k1.k, ptrs1, p1, entry1)
|
||||
bkey_for_each_ptr_decode(k2.k, ptrs2, p2, entry2)
|
||||
if (p1.ptr.dev == p2.ptr.dev &&
|
||||
p1.ptr.gen == p2.ptr.gen &&
|
||||
(s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
|
||||
(s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
|
||||
return true;
|
||||
if (p1.ptr.dev == p2.ptr.dev &&
|
||||
p1.ptr.gen == p2.ptr.gen &&
|
||||
(s64) p1.ptr.offset + p1.crc.offset - bkey_start_offset(k1.k) ==
|
||||
(s64) p2.ptr.offset + p2.crc.offset - bkey_start_offset(k2.k))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
} else {
|
||||
|
@ -543,8 +543,7 @@ do_io:
|
||||
|
||||
if (f_sectors > w->tmp_sectors) {
|
||||
kfree(w->tmp);
|
||||
w->tmp = kzalloc(sizeof(struct bch_folio_sector) *
|
||||
f_sectors, __GFP_NOFAIL);
|
||||
w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), __GFP_NOFAIL);
|
||||
w->tmp_sectors = f_sectors;
|
||||
}
|
||||
|
||||
|
@ -15,7 +15,6 @@ int bch2_write_begin(struct file *, struct address_space *, loff_t,
|
||||
int bch2_write_end(struct file *, struct address_space *, loff_t,
|
||||
unsigned, unsigned, struct page *, void *);
|
||||
|
||||
ssize_t bch2_read_iter(struct kiocb *, struct iov_iter *);
|
||||
ssize_t bch2_write_iter(struct kiocb *, struct iov_iter *);
|
||||
|
||||
void bch2_fs_fs_io_buffered_exit(struct bch_fs *);
|
||||
|
@ -351,7 +351,8 @@ static noinline void bch2_dio_write_flush(struct dio_write *dio)
|
||||
if (ret) {
|
||||
dio->op.error = ret;
|
||||
} else {
|
||||
bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq, &dio->op.cl);
|
||||
bch2_journal_flush_seq_async(&c->journal, inode.bi_journal_seq,
|
||||
&dio->op.cl);
|
||||
bch2_inode_flush_nocow_writes_async(c, dio->inode, &dio->op.cl);
|
||||
}
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
|
||||
#ifndef NO_BCACHEFS_FS
|
||||
ssize_t bch2_direct_write(struct kiocb *, struct iov_iter *);
|
||||
ssize_t bch2_read_iter(struct kiocb *, struct iov_iter *);
|
||||
|
||||
void bch2_fs_fs_io_direct_exit(struct bch_fs *);
|
||||
int bch2_fs_fs_io_direct_init(struct bch_fs *);
|
||||
|
@ -219,8 +219,10 @@ retry:
|
||||
struct folio *folio = folios[folio_idx];
|
||||
u64 folio_start = folio_sector(folio);
|
||||
u64 folio_end = folio_end_sector(folio);
|
||||
unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) - folio_start;
|
||||
unsigned folio_len = min(k.k->p.offset, folio_end) - folio_offset - folio_start;
|
||||
unsigned folio_offset = max(bkey_start_offset(k.k), folio_start) -
|
||||
folio_start;
|
||||
unsigned folio_len = min(k.k->p.offset, folio_end) -
|
||||
folio_offset - folio_start;
|
||||
|
||||
BUG_ON(k.k->p.offset < folio_start);
|
||||
BUG_ON(bkey_start_offset(k.k) > folio_end);
|
||||
@ -338,7 +340,8 @@ void bch2_mark_pagecache_reserved(struct bch_inode_info *inode,
|
||||
spin_lock(&s->lock);
|
||||
for (j = folio_offset; j < folio_offset + folio_len; j++) {
|
||||
i_sectors_delta -= s->s[j].state == SECTOR_dirty;
|
||||
bch2_folio_sector_set(folio, s, j, folio_sector_reserve(s->s[j].state));
|
||||
bch2_folio_sector_set(folio, s, j,
|
||||
folio_sector_reserve(s->s[j].state));
|
||||
}
|
||||
spin_unlock(&s->lock);
|
||||
}
|
||||
|
@ -83,6 +83,7 @@ static inline void bch2_folio_sector_set(struct folio *folio,
|
||||
static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
|
||||
{
|
||||
u64 f_offset = pos - folio_pos(folio);
|
||||
|
||||
BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
|
||||
return f_offset >> SECTOR_SHIFT;
|
||||
}
|
||||
|
@ -3,7 +3,6 @@
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "alloc_foreground.h"
|
||||
//#include "bkey_buf.h"
|
||||
#include "btree_update.h"
|
||||
#include "buckets.h"
|
||||
#include "clock.h"
|
||||
@ -13,7 +12,6 @@
|
||||
#include "fs.h"
|
||||
#include "fs-io.h"
|
||||
#include "fs-io-buffered.h"
|
||||
//#include "fs-io-direct.h"
|
||||
#include "fs-io-pagecache.h"
|
||||
#include "fsck.h"
|
||||
#include "inode.h"
|
||||
@ -264,7 +262,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
|
||||
|
||||
folio = __filemap_get_folio(mapping, index,
|
||||
FGP_LOCK|FGP_CREAT, GFP_KERNEL);
|
||||
if (unlikely(IS_ERR_OR_NULL(folio))) {
|
||||
if (IS_ERR_OR_NULL(folio)) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "fs-io.h"
|
||||
#include "fs-ioctl.h"
|
||||
#include "fs-io-buffered.h"
|
||||
#include "fs-io-direct.h"
|
||||
#include "fs-io-pagecache.h"
|
||||
#include "fsck.h"
|
||||
#include "inode.h"
|
||||
|
@ -2435,6 +2435,7 @@ static void __bch2_read_endio(struct work_struct *work)
|
||||
|
||||
if (rbio->bounce) {
|
||||
struct bvec_iter src_iter = src->bi_iter;
|
||||
|
||||
bio_copy_data_iter(dst, &dst_iter, src, &src_iter);
|
||||
}
|
||||
}
|
||||
|
@ -52,7 +52,7 @@ enum __bch_write_flags {
|
||||
};
|
||||
|
||||
enum bch_write_flags {
|
||||
#define x(f) BCH_WRITE_##f = 1U << __BCH_WRITE_##f,
|
||||
#define x(f) BCH_WRITE_##f = BIT(__BCH_WRITE_##f),
|
||||
BCH_WRITE_FLAGS()
|
||||
#undef x
|
||||
};
|
||||
|
@ -63,6 +63,7 @@ journal_seq_to_buf(struct journal *j, u64 seq)
|
||||
static void journal_pin_list_init(struct journal_entry_pin_list *p, int count)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(p->list); i++)
|
||||
INIT_LIST_HEAD(&p->list[i]);
|
||||
INIT_LIST_HEAD(&p->flushed);
|
||||
@ -514,8 +515,7 @@ int bch2_journal_res_get_slowpath(struct journal *j, struct journal_res *res,
|
||||
int ret;
|
||||
|
||||
closure_wait_event(&j->async_wait,
|
||||
(ret = __journal_res_get(j, res, flags)) !=
|
||||
-BCH_ERR_journal_res_get_blocked||
|
||||
(ret = __journal_res_get(j, res, flags)) != -BCH_ERR_journal_res_get_blocked ||
|
||||
(flags & JOURNAL_RES_GET_NONBLOCK));
|
||||
return ret;
|
||||
}
|
||||
|
@ -1054,6 +1054,7 @@ found:
|
||||
bch_err(c, "cur_idx %u/%u", ja->cur_idx, ja->nr);
|
||||
for (i = 0; i < 3; i++) {
|
||||
unsigned idx = (ja->cur_idx + ja->nr - 1 + i) % ja->nr;
|
||||
|
||||
bch_err(c, "bucket_seq[%u] = %llu", idx, ja->bucket_seq[idx]);
|
||||
}
|
||||
ja->sectors_free = 0;
|
||||
@ -1304,18 +1305,14 @@ int bch2_journal_read(struct bch_fs *c,
|
||||
|
||||
bch2_replicas_entry_sort(&replicas.e);
|
||||
|
||||
/*
|
||||
* If we're mounting in degraded mode - if we didn't read all
|
||||
* the devices - this is wrong:
|
||||
*/
|
||||
|
||||
printbuf_reset(&buf);
|
||||
bch2_replicas_entry_to_text(&buf, &replicas.e);
|
||||
|
||||
if (!degraded &&
|
||||
fsck_err_on(!bch2_replicas_marked(c, &replicas.e), c,
|
||||
"superblock not marked as containing replicas %s",
|
||||
buf.buf)) {
|
||||
!bch2_replicas_marked(c, &replicas.e) &&
|
||||
(le64_to_cpu(i->j.seq) == *last_seq ||
|
||||
fsck_err(c, "superblock not marked as containing replicas for journal entry %llu\n %s",
|
||||
le64_to_cpu(i->j.seq), buf.buf))) {
|
||||
ret = bch2_mark_replicas(c, &replicas.e);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1482,6 +1479,7 @@ static void journal_write_done(struct closure *cl)
|
||||
struct journal *j = container_of(cl, struct journal, io);
|
||||
struct bch_fs *c = container_of(j, struct bch_fs, journal);
|
||||
struct journal_buf *w = journal_last_unwritten_buf(j);
|
||||
struct bch_replicas_padded replicas;
|
||||
union journal_res_state old, new;
|
||||
u64 v, seq;
|
||||
int err = 0;
|
||||
@ -1493,7 +1491,13 @@ static void journal_write_done(struct closure *cl)
|
||||
if (!w->devs_written.nr) {
|
||||
bch_err(c, "unable to write journal to sufficient devices");
|
||||
err = -EIO;
|
||||
} else {
|
||||
bch2_devlist_to_replicas(&replicas.e, BCH_DATA_journal,
|
||||
w->devs_written);
|
||||
if (bch2_mark_replicas(c, &replicas.e))
|
||||
err = -EIO;
|
||||
}
|
||||
|
||||
if (err)
|
||||
bch2_fatal_error(c);
|
||||
|
||||
@ -1630,7 +1634,6 @@ static void do_journal_write(struct closure *cl)
|
||||
}
|
||||
|
||||
continue_at(cl, journal_write_done, c->io_complete_wq);
|
||||
return;
|
||||
}
|
||||
|
||||
static void bch2_journal_entries_postprocess(struct bch_fs *c, struct jset *jset)
|
||||
|
@ -346,7 +346,7 @@ static inline bool __journal_pin_drop(struct journal *j,
|
||||
list_del_init(&pin->list);
|
||||
|
||||
/*
|
||||
* Unpinning a journal entry make make journal_next_bucket() succeed, if
|
||||
* Unpinning a journal entry may make journal_next_bucket() succeed, if
|
||||
* writing a new last_seq will now make another bucket available:
|
||||
*/
|
||||
return atomic_dec_and_test(&pin_list->count) &&
|
||||
|
@ -132,7 +132,7 @@ static int bch2_journal_replay(struct bch_fs *c)
|
||||
move_gap(keys->d, keys->nr, keys->size, keys->gap, keys->nr);
|
||||
keys->gap = keys->nr;
|
||||
|
||||
keys_sorted = kvmalloc_array(sizeof(*keys_sorted), keys->nr, GFP_KERNEL);
|
||||
keys_sorted = kvmalloc_array(keys->nr, sizeof(*keys_sorted), GFP_KERNEL);
|
||||
if (!keys_sorted)
|
||||
return -BCH_ERR_ENOMEM_journal_replay;
|
||||
|
||||
@ -507,7 +507,7 @@ static struct recovery_pass_fn recovery_pass_fns[] = {
|
||||
|
||||
static void check_version_upgrade(struct bch_fs *c)
|
||||
{
|
||||
unsigned latest_compatible = bch2_version_compatible(c->sb.version);
|
||||
unsigned latest_compatible = bch2_latest_compatible_version(c->sb.version);
|
||||
unsigned latest_version = bcachefs_metadata_version_current;
|
||||
unsigned old_version = c->sb.version_upgrade_complete ?: c->sb.version;
|
||||
unsigned new_version = 0;
|
||||
@ -759,7 +759,7 @@ use_clean:
|
||||
}
|
||||
|
||||
c->journal_replay_seq_start = last_seq;
|
||||
c->journal_replay_seq_end = blacklist_seq - 1;;
|
||||
c->journal_replay_seq_end = blacklist_seq - 1;
|
||||
|
||||
if (c->opts.reconstruct_alloc) {
|
||||
c->sb.compat &= ~(1ULL << BCH_COMPAT_alloc_info);
|
||||
|
@ -198,8 +198,14 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
|
||||
if (dynamic_fault("bcachefs:add:super_realloc"))
|
||||
return -BCH_ERR_ENOMEM_sb_realloc_injected;
|
||||
|
||||
new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO);
|
||||
if (!new_sb)
|
||||
return -BCH_ERR_ENOMEM_sb_buf_realloc;
|
||||
|
||||
sb->sb = new_sb;
|
||||
|
||||
if (sb->have_bio) {
|
||||
unsigned nr_bvecs = DIV_ROUND_UP(new_buffer_size, PAGE_SIZE);
|
||||
unsigned nr_bvecs = buf_pages(sb->sb, new_buffer_size);
|
||||
|
||||
bio = bio_kmalloc(nr_bvecs, GFP_KERNEL);
|
||||
if (!bio)
|
||||
@ -211,11 +217,6 @@ int bch2_sb_realloc(struct bch_sb_handle *sb, unsigned u64s)
|
||||
sb->bio = bio;
|
||||
}
|
||||
|
||||
new_sb = krealloc(sb->sb, new_buffer_size, GFP_NOFS|__GFP_ZERO);
|
||||
if (!new_sb)
|
||||
return -BCH_ERR_ENOMEM_sb_buf_realloc;
|
||||
|
||||
sb->sb = new_sb;
|
||||
sb->buffer_size = new_buffer_size;
|
||||
|
||||
return 0;
|
||||
@ -547,7 +548,9 @@ static int __copy_super(struct bch_sb_handle *dst_handle, struct bch_sb *src)
|
||||
d = (src_f ? le32_to_cpu(src_f->u64s) : 0) -
|
||||
(dst_f ? le32_to_cpu(dst_f->u64s) : 0);
|
||||
if (d > 0) {
|
||||
int ret = bch2_sb_realloc(dst_handle, le32_to_cpu(dst_handle->sb->u64s) + d);
|
||||
int ret = bch2_sb_realloc(dst_handle,
|
||||
le32_to_cpu(dst_handle->sb->u64s) + d);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -58,6 +58,7 @@ struct bch_sb_field_ops {
|
||||
static inline __le64 bch2_sb_magic(struct bch_fs *c)
|
||||
{
|
||||
__le64 ret;
|
||||
|
||||
memcpy(&ret, &c->sb.uuid, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
@ -574,13 +574,6 @@ void __bch2_fs_stop(struct bch_fs *c)
|
||||
cancel_work_sync(&ca->io_error_work);
|
||||
|
||||
cancel_work_sync(&c->read_only_work);
|
||||
|
||||
for (i = 0; i < c->sb.nr_devices; i++) {
|
||||
struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
|
||||
|
||||
if (ca)
|
||||
bch2_free_super(&ca->disk_sb);
|
||||
}
|
||||
}
|
||||
|
||||
void bch2_fs_free(struct bch_fs *c)
|
||||
@ -594,9 +587,14 @@ void bch2_fs_free(struct bch_fs *c)
|
||||
closure_sync(&c->cl);
|
||||
closure_debug_destroy(&c->cl);
|
||||
|
||||
for (i = 0; i < c->sb.nr_devices; i++)
|
||||
if (c->devs[i])
|
||||
bch2_dev_free(rcu_dereference_protected(c->devs[i], 1));
|
||||
for (i = 0; i < c->sb.nr_devices; i++) {
|
||||
struct bch_dev *ca = rcu_dereference_protected(c->devs[i], true);
|
||||
|
||||
if (ca) {
|
||||
bch2_free_super(&ca->disk_sb);
|
||||
bch2_dev_free(ca);
|
||||
}
|
||||
}
|
||||
|
||||
bch_verbose(c, "shutdown complete");
|
||||
|
||||
|
@ -216,6 +216,7 @@ u64 bch2_read_flag_list(char *opt, const char * const list[])
|
||||
|
||||
while ((p = strsep(&s, ","))) {
|
||||
int flag = match_string(list, -1, p);
|
||||
|
||||
if (flag < 0) {
|
||||
ret = -1;
|
||||
break;
|
||||
@ -797,9 +798,10 @@ void memcpy_to_bio(struct bio *dst, struct bvec_iter dst_iter, const void *src)
|
||||
struct bvec_iter iter;
|
||||
|
||||
__bio_for_each_segment(bv, dst, iter, dst_iter) {
|
||||
void *dstp = kmap_atomic(bv.bv_page);
|
||||
void *dstp = kmap_local_page(bv.bv_page);
|
||||
|
||||
memcpy(dstp + bv.bv_offset, src, bv.bv_len);
|
||||
kunmap_atomic(dstp);
|
||||
kunmap_local(dstp);
|
||||
|
||||
src += bv.bv_len;
|
||||
}
|
||||
@ -811,9 +813,10 @@ void memcpy_from_bio(void *dst, struct bio *src, struct bvec_iter src_iter)
|
||||
struct bvec_iter iter;
|
||||
|
||||
__bio_for_each_segment(bv, src, iter, src_iter) {
|
||||
void *srcp = kmap_atomic(bv.bv_page);
|
||||
void *srcp = kmap_local_page(bv.bv_page);
|
||||
|
||||
memcpy(dst, srcp + bv.bv_offset, bv.bv_len);
|
||||
kunmap_atomic(srcp);
|
||||
kunmap_local(srcp);
|
||||
|
||||
dst += bv.bv_len;
|
||||
}
|
||||
|
@ -468,8 +468,10 @@ struct bch_pd_controller {
|
||||
s64 last_change;
|
||||
s64 last_target;
|
||||
|
||||
/* If true, the rate will not increase if bch2_ratelimit_delay()
|
||||
* is not being called often enough. */
|
||||
/*
|
||||
* If true, the rate will not increase if bch2_ratelimit_delay()
|
||||
* is not being called often enough.
|
||||
*/
|
||||
bool backpressure;
|
||||
};
|
||||
|
||||
@ -607,6 +609,7 @@ static inline void __memcpy_u64s(void *dst, const void *src,
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
long d0, d1, d2;
|
||||
|
||||
asm volatile("rep ; movsq"
|
||||
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
|
||||
: "0" (u64s), "1" (dst), "2" (src)
|
||||
@ -683,6 +686,7 @@ static inline void __memmove_u64s_up(void *_dst, const void *_src,
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
long d0, d1, d2;
|
||||
|
||||
asm volatile("std ;\n"
|
||||
"rep ; movsq\n"
|
||||
"cld ;\n"
|
||||
|
@ -59,6 +59,7 @@ int bch2_varint_decode(const u8 *in, const u8 *end, u64 *out)
|
||||
|
||||
if (likely(bytes < 9)) {
|
||||
__le64 v_le = 0;
|
||||
|
||||
memcpy(&v_le, in, bytes);
|
||||
v = le64_to_cpu(v_le);
|
||||
v >>= bytes;
|
||||
|
Loading…
Reference in New Issue
Block a user