mirror of
https://github.com/koverstreet/bcachefs-tools.git
synced 2025-02-22 00:00:03 +03:00
Update bcachefs sources to 84f132d569 bcachefs: fsck: Break walk_inode() up into multiple functions
Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
parent
84cb7bffe5
commit
cfa816bf3f
@ -1 +1 @@
|
||||
bca25b802d99014f6ef6d7cb7fa3d493fb3841b5
|
||||
84f132d5696138bb038d2dc8f1162d2fab5ac832
|
||||
|
@ -113,40 +113,17 @@ static inline void *bio_data(struct bio *bio)
|
||||
|
||||
#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
|
||||
|
||||
static inline struct bio_vec bio_iter_all_peek(const struct bio *bio,
|
||||
static inline struct bio_vec *bio_next_segment(const struct bio *bio,
|
||||
struct bvec_iter_all *iter)
|
||||
{
|
||||
if (WARN_ON(iter->idx >= bio->bi_vcnt))
|
||||
return (struct bio_vec) { NULL };
|
||||
if (iter->idx >= bio->bi_vcnt)
|
||||
return NULL;
|
||||
|
||||
return bvec_iter_all_peek(bio->bi_io_vec, iter);
|
||||
return &bio->bi_io_vec[iter->idx];
|
||||
}
|
||||
|
||||
static inline void bio_iter_all_advance(const struct bio *bio,
|
||||
struct bvec_iter_all *iter,
|
||||
unsigned bytes)
|
||||
{
|
||||
bvec_iter_all_advance(bio->bi_io_vec, iter, bytes);
|
||||
|
||||
WARN_ON(iter->idx > bio->bi_vcnt ||
|
||||
(iter->idx == bio->bi_vcnt && iter->done));
|
||||
}
|
||||
|
||||
#define bio_for_each_segment_all_continue(bvl, bio, iter) \
|
||||
for (; \
|
||||
iter.idx < bio->bi_vcnt && \
|
||||
((bvl = bio_iter_all_peek(bio, &iter)), true); \
|
||||
bio_iter_all_advance((bio), &iter, bvl.bv_len))
|
||||
|
||||
/*
|
||||
* drivers should _never_ use the all version - the bio may have been split
|
||||
* before it got to the driver and the driver won't own all of it
|
||||
*/
|
||||
#define bio_for_each_segment_all(bvl, bio, iter) \
|
||||
for (bvec_iter_all_init(&iter); \
|
||||
iter.idx < (bio)->bi_vcnt && \
|
||||
((bvl = bio_iter_all_peek((bio), &iter)), true); \
|
||||
bio_iter_all_advance((bio), &iter, bvl.bv_len))
|
||||
#define bio_for_each_segment_all(bvl, bio, iter) \
|
||||
for ((iter).idx = 0; (bvl = bio_next_segment((bio), &(iter))); (iter).idx++)
|
||||
|
||||
static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
|
||||
unsigned bytes)
|
||||
|
@ -43,6 +43,10 @@ struct bvec_iter {
|
||||
current bvec */
|
||||
};
|
||||
|
||||
struct bvec_iter_all {
|
||||
int idx;
|
||||
};
|
||||
|
||||
/*
|
||||
* various member access, note that bio_data should of course not be used
|
||||
* on highmem page vectors
|
||||
@ -94,52 +98,4 @@ static inline void bvec_iter_advance(const struct bio_vec *bv,
|
||||
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
|
||||
bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
|
||||
|
||||
/*
|
||||
* bvec_iter_all: for advancing over individual pages in a bio, as it was when
|
||||
* it was first created:
|
||||
*/
|
||||
struct bvec_iter_all {
|
||||
int idx;
|
||||
unsigned done;
|
||||
};
|
||||
|
||||
static inline void bvec_iter_all_init(struct bvec_iter_all *iter_all)
|
||||
{
|
||||
iter_all->done = 0;
|
||||
iter_all->idx = 0;
|
||||
}
|
||||
|
||||
static inline struct bio_vec __bvec_iter_all_peek(const struct bio_vec *bvec,
|
||||
const struct bvec_iter_all *iter)
|
||||
{
|
||||
struct bio_vec bv = bvec[iter->idx];
|
||||
|
||||
BUG_ON(iter->done >= bv.bv_len);
|
||||
|
||||
bv.bv_offset += iter->done;
|
||||
bv.bv_len -= iter->done;
|
||||
return bv;
|
||||
}
|
||||
|
||||
static inline struct bio_vec bvec_iter_all_peek(const struct bio_vec *bvec,
|
||||
const struct bvec_iter_all *iter)
|
||||
{
|
||||
struct bio_vec bv = __bvec_iter_all_peek(bvec, iter);
|
||||
|
||||
bv.bv_len = min_t(unsigned, PAGE_SIZE - bv.bv_offset, bv.bv_len);
|
||||
return bv;
|
||||
}
|
||||
|
||||
static inline void bvec_iter_all_advance(const struct bio_vec *bvec,
|
||||
struct bvec_iter_all *iter,
|
||||
unsigned bytes)
|
||||
{
|
||||
iter->done += bytes;
|
||||
|
||||
while (iter->done && iter->done >= bvec[iter->idx].bv_len) {
|
||||
iter->done -= bvec[iter->idx].bv_len;
|
||||
iter->idx++;
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* __LINUX_BVEC_ITER_H */
|
||||
|
@ -28,7 +28,7 @@ typedef struct {
|
||||
posix_acl_xattr_entry a_entries[0];
|
||||
} posix_acl_xattr_header;
|
||||
|
||||
extern const struct xattr_handler posix_acl_access_xattr_handler;
|
||||
extern const struct xattr_handler posix_acl_default_xattr_handler;
|
||||
extern const struct xattr_handler nop_posix_acl_access;
|
||||
extern const struct xattr_handler nop_posix_acl_default;
|
||||
|
||||
#endif /* _POSIX_ACL_XATTR_H */
|
||||
|
@ -18,27 +18,24 @@
|
||||
|
||||
#include <string.h>
|
||||
#include <asm/types.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#define UUID_SIZE 16
|
||||
|
||||
typedef struct {
|
||||
__u8 b[16];
|
||||
} uuid_le;
|
||||
__u8 b[UUID_SIZE];
|
||||
} __uuid_t;
|
||||
|
||||
typedef struct {
|
||||
__u8 b[16];
|
||||
} uuid_be;
|
||||
|
||||
#define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
|
||||
((uuid_le) \
|
||||
{{ (a) & 0xff, ((a) >> 8) & 0xff, ((a) >> 16) & 0xff, ((a) >> 24) & 0xff, \
|
||||
(b) & 0xff, ((b) >> 8) & 0xff, \
|
||||
(c) & 0xff, ((c) >> 8) & 0xff, \
|
||||
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
|
||||
|
||||
#define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
|
||||
((uuid_be) \
|
||||
#define UUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \
|
||||
((__uuid_t) \
|
||||
{{ ((a) >> 24) & 0xff, ((a) >> 16) & 0xff, ((a) >> 8) & 0xff, (a) & 0xff, \
|
||||
((b) >> 8) & 0xff, (b) & 0xff, \
|
||||
((c) >> 8) & 0xff, (c) & 0xff, \
|
||||
(d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
|
||||
|
||||
static inline bool uuid_equal(const __uuid_t *u1, const __uuid_t *u2)
|
||||
{
|
||||
return memcmp(u1, u2, sizeof(__uuid_t)) == 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -32,7 +32,7 @@ void bch2_opts_usage(unsigned);
|
||||
|
||||
struct format_opts {
|
||||
char *label;
|
||||
uuid_le uuid;
|
||||
__uuid_t uuid;
|
||||
unsigned version;
|
||||
unsigned superblock_size;
|
||||
bool encrypted;
|
||||
@ -88,9 +88,9 @@ struct bch_sb *__bch2_super_read(int, u64);
|
||||
int bcachectl_open(void);
|
||||
|
||||
struct bchfs_handle {
|
||||
uuid_le uuid;
|
||||
int ioctl_fd;
|
||||
int sysfs_fd;
|
||||
__uuid_t uuid;
|
||||
int ioctl_fd;
|
||||
int sysfs_fd;
|
||||
};
|
||||
|
||||
void bcache_fs_close(struct bchfs_handle);
|
||||
@ -239,7 +239,7 @@ struct dev_name {
|
||||
unsigned idx;
|
||||
char *dev;
|
||||
char *label;
|
||||
uuid_le uuid;
|
||||
uuid_t uuid;
|
||||
};
|
||||
typedef DARRAY(struct dev_name) dev_names;
|
||||
|
||||
|
@ -577,7 +577,7 @@ int bch2_alloc_read(struct bch_fs *c)
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -684,8 +684,7 @@ int bch2_bucket_gens_init(struct bch_fs *c)
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -730,7 +729,7 @@ int bch2_bucket_gens_read(struct bch_fs *c)
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "error reading alloc info: %s", bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1031,12 +1030,13 @@ again:
|
||||
return k;
|
||||
}
|
||||
|
||||
static int bch2_check_alloc_key(struct btree_trans *trans,
|
||||
struct bkey_s_c alloc_k,
|
||||
struct btree_iter *alloc_iter,
|
||||
struct btree_iter *discard_iter,
|
||||
struct btree_iter *freespace_iter,
|
||||
struct btree_iter *bucket_gens_iter)
|
||||
static noinline_for_stack
|
||||
int bch2_check_alloc_key(struct btree_trans *trans,
|
||||
struct bkey_s_c alloc_k,
|
||||
struct btree_iter *alloc_iter,
|
||||
struct btree_iter *discard_iter,
|
||||
struct btree_iter *freespace_iter,
|
||||
struct btree_iter *bucket_gens_iter)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca;
|
||||
@ -1160,10 +1160,11 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
|
||||
struct bpos start,
|
||||
struct bpos *end,
|
||||
struct btree_iter *freespace_iter)
|
||||
static noinline_for_stack
|
||||
int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
|
||||
struct bpos start,
|
||||
struct bpos *end,
|
||||
struct btree_iter *freespace_iter)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_dev *ca;
|
||||
@ -1215,10 +1216,11 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
|
||||
struct bpos start,
|
||||
struct bpos *end,
|
||||
struct btree_iter *bucket_gens_iter)
|
||||
static noinline_for_stack
|
||||
int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
|
||||
struct bpos start,
|
||||
struct bpos *end,
|
||||
struct btree_iter *bucket_gens_iter)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k;
|
||||
@ -1280,7 +1282,7 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __bch2_check_discard_freespace_key(struct btree_trans *trans,
|
||||
static noinline_for_stack int __bch2_check_discard_freespace_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -1360,9 +1362,10 @@ static int bch2_check_discard_freespace_key(struct btree_trans *trans,
|
||||
* valid for buckets that exist; this just checks for keys for nonexistent
|
||||
* buckets.
|
||||
*/
|
||||
static int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k)
|
||||
static noinline_for_stack
|
||||
int bch2_check_bucket_gens_key(struct btree_trans *trans,
|
||||
struct btree_iter *iter,
|
||||
struct bkey_s_c k)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_i_bucket_gens g;
|
||||
@ -1521,7 +1524,9 @@ bkey_err:
|
||||
bch2_check_bucket_gens_key(&trans, &iter, k));
|
||||
err:
|
||||
bch2_trans_exit(&trans);
|
||||
return ret < 0 ? ret : 0;
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
|
||||
@ -1599,20 +1604,18 @@ fsck_err:
|
||||
|
||||
int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
|
||||
POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
bch2_check_alloc_to_lru_ref(&trans, &iter));
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
return ret < 0 ? ret : 0;
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(&trans, iter, BTREE_ID_alloc,
|
||||
POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
bch2_check_alloc_to_lru_ref(&trans, &iter)));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_discard_one_bucket(struct btree_trans *trans,
|
||||
@ -2024,6 +2027,7 @@ int bch2_fs_freespace_init(struct bch_fs *c)
|
||||
ret = bch2_dev_freespace_init(c, ca, &last_updated);
|
||||
if (ret) {
|
||||
percpu_ref_put(&ca->ref);
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
@ -2032,11 +2036,10 @@ int bch2_fs_freespace_init(struct bch_fs *c)
|
||||
mutex_lock(&c->sb_lock);
|
||||
bch2_write_super(c);
|
||||
mutex_unlock(&c->sb_lock);
|
||||
|
||||
bch_verbose(c, "done initializing freespace");
|
||||
}
|
||||
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Bucket IO clocks: */
|
||||
|
@ -220,7 +220,7 @@ static inline u64 should_invalidate_buckets(struct bch_dev *ca,
|
||||
u64 free = max_t(s64, 0,
|
||||
u.d[BCH_DATA_free].buckets
|
||||
+ u.d[BCH_DATA_need_discard].buckets
|
||||
- bch2_dev_buckets_reserved(ca, RESERVE_stripe));
|
||||
- bch2_dev_buckets_reserved(ca, BCH_WATERMARK_stripe));
|
||||
|
||||
return clamp_t(s64, want_free - free, 0, u.d[BCH_DATA_cached].buckets);
|
||||
}
|
||||
|
@ -44,9 +44,9 @@ static void bch2_trans_mutex_lock_norelock(struct btree_trans *trans,
|
||||
}
|
||||
}
|
||||
|
||||
const char * const bch2_alloc_reserves[] = {
|
||||
const char * const bch2_watermarks[] = {
|
||||
#define x(t) #t,
|
||||
BCH_ALLOC_RESERVES()
|
||||
BCH_WATERMARKS()
|
||||
#undef x
|
||||
NULL
|
||||
};
|
||||
@ -188,13 +188,13 @@ long bch2_bucket_alloc_new_fs(struct bch_dev *ca)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
|
||||
static inline unsigned open_buckets_reserved(enum bch_watermark watermark)
|
||||
{
|
||||
switch (reserve) {
|
||||
case RESERVE_btree:
|
||||
case RESERVE_btree_movinggc:
|
||||
switch (watermark) {
|
||||
case BCH_WATERMARK_btree:
|
||||
case BCH_WATERMARK_btree_copygc:
|
||||
return 0;
|
||||
case RESERVE_movinggc:
|
||||
case BCH_WATERMARK_copygc:
|
||||
return OPEN_BUCKETS_COUNT / 4;
|
||||
default:
|
||||
return OPEN_BUCKETS_COUNT / 2;
|
||||
@ -203,7 +203,7 @@ static inline unsigned open_buckets_reserved(enum alloc_reserve reserve)
|
||||
|
||||
static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *ca,
|
||||
u64 bucket,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
const struct bch_alloc_v4 *a,
|
||||
struct bucket_alloc_state *s,
|
||||
struct closure *cl)
|
||||
@ -233,7 +233,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
|
||||
|
||||
spin_lock(&c->freelist_lock);
|
||||
|
||||
if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(reserve))) {
|
||||
if (unlikely(c->open_buckets_nr_free <= open_buckets_reserved(watermark))) {
|
||||
if (cl)
|
||||
closure_wait(&c->open_buckets_wait, cl);
|
||||
|
||||
@ -284,7 +284,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
|
||||
}
|
||||
|
||||
static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bch_dev *ca,
|
||||
enum alloc_reserve reserve, u64 free_entry,
|
||||
enum bch_watermark watermark, u64 free_entry,
|
||||
struct bucket_alloc_state *s,
|
||||
struct bkey_s_c freespace_k,
|
||||
struct closure *cl)
|
||||
@ -374,7 +374,7 @@ static struct open_bucket *try_alloc_bucket(struct btree_trans *trans, struct bc
|
||||
}
|
||||
}
|
||||
|
||||
ob = __try_alloc_bucket(c, ca, b, reserve, a, s, cl);
|
||||
ob = __try_alloc_bucket(c, ca, b, watermark, a, s, cl);
|
||||
if (!ob)
|
||||
iter.path->preserve = false;
|
||||
err:
|
||||
@ -394,7 +394,7 @@ err:
|
||||
static noinline struct open_bucket *
|
||||
bch2_bucket_alloc_early(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct bucket_alloc_state *s,
|
||||
struct closure *cl)
|
||||
{
|
||||
@ -424,7 +424,7 @@ again:
|
||||
|
||||
s->buckets_seen++;
|
||||
|
||||
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, reserve, a, s, cl);
|
||||
ob = __try_alloc_bucket(trans->c, ca, k.k->p.offset, watermark, a, s, cl);
|
||||
if (ob)
|
||||
break;
|
||||
}
|
||||
@ -445,7 +445,7 @@ again:
|
||||
|
||||
static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct bucket_alloc_state *s,
|
||||
struct closure *cl)
|
||||
{
|
||||
@ -474,7 +474,7 @@ again:
|
||||
|
||||
s->buckets_seen++;
|
||||
|
||||
ob = try_alloc_bucket(trans, ca, reserve,
|
||||
ob = try_alloc_bucket(trans, ca, watermark,
|
||||
alloc_cursor, s, k, cl);
|
||||
if (ob) {
|
||||
iter.path->preserve = false;
|
||||
@ -507,7 +507,7 @@ again:
|
||||
*/
|
||||
static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
|
||||
struct bch_dev *ca,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct closure *cl,
|
||||
struct bch_dev_usage *usage)
|
||||
{
|
||||
@ -519,7 +519,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
|
||||
bool waiting = false;
|
||||
again:
|
||||
bch2_dev_usage_read_fast(ca, usage);
|
||||
avail = dev_buckets_free(ca, *usage, reserve);
|
||||
avail = dev_buckets_free(ca, *usage, watermark);
|
||||
|
||||
if (usage->d[BCH_DATA_need_discard].buckets > avail)
|
||||
bch2_do_discards(c);
|
||||
@ -548,8 +548,8 @@ again:
|
||||
closure_wake_up(&c->freelist_wait);
|
||||
alloc:
|
||||
ob = likely(freespace)
|
||||
? bch2_bucket_alloc_freelist(trans, ca, reserve, &s, cl)
|
||||
: bch2_bucket_alloc_early(trans, ca, reserve, &s, cl);
|
||||
? bch2_bucket_alloc_freelist(trans, ca, watermark, &s, cl)
|
||||
: bch2_bucket_alloc_early(trans, ca, watermark, &s, cl);
|
||||
|
||||
if (s.skipped_need_journal_commit * 2 > avail)
|
||||
bch2_journal_flush_async(&c->journal, NULL);
|
||||
@ -564,7 +564,7 @@ err:
|
||||
|
||||
if (!IS_ERR(ob))
|
||||
trace_and_count(c, bucket_alloc, ca,
|
||||
bch2_alloc_reserves[reserve],
|
||||
bch2_watermarks[watermark],
|
||||
ob->bucket,
|
||||
usage->d[BCH_DATA_free].buckets,
|
||||
avail,
|
||||
@ -575,7 +575,7 @@ err:
|
||||
"");
|
||||
else if (!bch2_err_matches(PTR_ERR(ob), BCH_ERR_transaction_restart))
|
||||
trace_and_count(c, bucket_alloc_fail, ca,
|
||||
bch2_alloc_reserves[reserve],
|
||||
bch2_watermarks[watermark],
|
||||
0,
|
||||
usage->d[BCH_DATA_free].buckets,
|
||||
avail,
|
||||
@ -589,14 +589,14 @@ err:
|
||||
}
|
||||
|
||||
struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_dev_usage usage;
|
||||
struct open_bucket *ob;
|
||||
|
||||
bch2_trans_do(c, NULL, NULL, 0,
|
||||
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, reserve,
|
||||
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(&trans, ca, watermark,
|
||||
cl, &usage)));
|
||||
return ob;
|
||||
}
|
||||
@ -629,7 +629,7 @@ static inline void bch2_dev_stripe_increment_inlined(struct bch_dev *ca,
|
||||
struct bch_dev_usage *usage)
|
||||
{
|
||||
u64 *v = stripe->next_alloc + ca->dev_idx;
|
||||
u64 free_space = dev_buckets_available(ca, RESERVE_none);
|
||||
u64 free_space = dev_buckets_available(ca, BCH_WATERMARK_normal);
|
||||
u64 free_space_inv = free_space
|
||||
? div64_u64(1ULL << 48, free_space)
|
||||
: 1ULL << 48;
|
||||
@ -692,7 +692,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
||||
bool *have_cache,
|
||||
unsigned flags,
|
||||
enum bch_data_type data_type,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -725,7 +725,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
||||
continue;
|
||||
}
|
||||
|
||||
ob = bch2_bucket_alloc_trans(trans, ca, reserve, cl, &usage);
|
||||
ob = bch2_bucket_alloc_trans(trans, ca, watermark, cl, &usage);
|
||||
if (!IS_ERR(ob))
|
||||
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
|
||||
percpu_ref_put(&ca->ref);
|
||||
@ -766,7 +766,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
|
||||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
struct closure *cl)
|
||||
{
|
||||
@ -784,7 +784,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
|
||||
if (ec_open_bucket(c, ptrs))
|
||||
return 0;
|
||||
|
||||
h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, reserve, cl);
|
||||
h = bch2_ec_stripe_head_get(trans, target, 0, nr_replicas - 1, watermark, cl);
|
||||
if (IS_ERR(h))
|
||||
return PTR_ERR(h);
|
||||
if (!h)
|
||||
@ -879,7 +879,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
|
||||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache, bool ec,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags)
|
||||
{
|
||||
int i, ret = 0;
|
||||
@ -901,7 +901,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
|
||||
u64 avail;
|
||||
|
||||
bch2_dev_usage_read_fast(ca, &usage);
|
||||
avail = dev_buckets_free(ca, usage, reserve);
|
||||
avail = dev_buckets_free(ca, usage, watermark);
|
||||
if (!avail)
|
||||
continue;
|
||||
|
||||
@ -931,7 +931,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
|
||||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
struct closure *_cl)
|
||||
{
|
||||
@ -962,7 +962,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
|
||||
|
||||
ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache, erasure_code, reserve, flags);
|
||||
have_cache, erasure_code, watermark, flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -971,7 +971,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
|
||||
target,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache,
|
||||
reserve, flags, _cl);
|
||||
watermark, flags, _cl);
|
||||
} else {
|
||||
retry_blocking:
|
||||
/*
|
||||
@ -980,7 +980,7 @@ retry_blocking:
|
||||
*/
|
||||
ret = bch2_bucket_alloc_set_trans(trans, ptrs, &wp->stripe, &devs,
|
||||
nr_replicas, nr_effective, have_cache,
|
||||
flags, wp->data_type, reserve, cl);
|
||||
flags, wp->data_type, watermark, cl);
|
||||
if (ret &&
|
||||
!bch2_err_matches(ret, BCH_ERR_transaction_restart) &&
|
||||
!bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
|
||||
@ -1003,7 +1003,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
|
||||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
struct closure *cl)
|
||||
{
|
||||
@ -1013,7 +1013,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
|
||||
ret = __open_bucket_add_buckets(trans, ptrs, wp,
|
||||
devs_have, target, erasure_code,
|
||||
nr_replicas, nr_effective, have_cache,
|
||||
reserve, flags, cl);
|
||||
watermark, flags, cl);
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
|
||||
bch2_err_matches(ret, BCH_ERR_operation_blocked) ||
|
||||
bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
|
||||
@ -1026,7 +1026,7 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
|
||||
ret = __open_bucket_add_buckets(trans, ptrs, wp,
|
||||
devs_have, target, false,
|
||||
nr_replicas, nr_effective, have_cache,
|
||||
reserve, flags, cl);
|
||||
watermark, flags, cl);
|
||||
return ret < 0 ? ret : 0;
|
||||
}
|
||||
|
||||
@ -1263,7 +1263,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
|
||||
struct bch_devs_list *devs_have,
|
||||
unsigned nr_replicas,
|
||||
unsigned nr_replicas_required,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
struct closure *cl,
|
||||
struct write_point **wp_ret)
|
||||
@ -1296,7 +1296,7 @@ retry:
|
||||
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
|
||||
target, erasure_code,
|
||||
nr_replicas, &nr_effective,
|
||||
&have_cache, reserve,
|
||||
&have_cache, watermark,
|
||||
flags, NULL);
|
||||
if (!ret ||
|
||||
bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
@ -1315,14 +1315,14 @@ retry:
|
||||
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
|
||||
0, erasure_code,
|
||||
nr_replicas, &nr_effective,
|
||||
&have_cache, reserve,
|
||||
&have_cache, watermark,
|
||||
flags, cl);
|
||||
} else {
|
||||
allocate_blocking:
|
||||
ret = open_bucket_add_buckets(trans, &ptrs, wp, devs_have,
|
||||
target, erasure_code,
|
||||
nr_replicas, &nr_effective,
|
||||
&have_cache, reserve,
|
||||
&have_cache, watermark,
|
||||
flags, cl);
|
||||
}
|
||||
alloc_done:
|
||||
|
@ -14,7 +14,7 @@ struct bch_dev;
|
||||
struct bch_fs;
|
||||
struct bch_devs_List;
|
||||
|
||||
extern const char * const bch2_alloc_reserves[];
|
||||
extern const char * const bch2_watermarks[];
|
||||
|
||||
void bch2_reset_alloc_cursors(struct bch_fs *);
|
||||
|
||||
@ -31,7 +31,7 @@ void bch2_dev_stripe_increment(struct bch_dev *, struct dev_stripe_state *);
|
||||
long bch2_bucket_alloc_new_fs(struct bch_dev *);
|
||||
|
||||
struct open_bucket *bch2_bucket_alloc(struct bch_fs *, struct bch_dev *,
|
||||
enum alloc_reserve, struct closure *);
|
||||
enum bch_watermark, struct closure *);
|
||||
|
||||
static inline void ob_push(struct bch_fs *c, struct open_buckets *obs,
|
||||
struct open_bucket *ob)
|
||||
@ -152,7 +152,7 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64
|
||||
int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
|
||||
struct dev_stripe_state *, struct bch_devs_mask *,
|
||||
unsigned, unsigned *, bool *, unsigned,
|
||||
enum bch_data_type, enum alloc_reserve,
|
||||
enum bch_data_type, enum bch_watermark,
|
||||
struct closure *);
|
||||
|
||||
int bch2_alloc_sectors_start_trans(struct btree_trans *,
|
||||
@ -160,7 +160,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *,
|
||||
struct write_point_specifier,
|
||||
struct bch_devs_list *,
|
||||
unsigned, unsigned,
|
||||
enum alloc_reserve,
|
||||
enum bch_watermark,
|
||||
unsigned,
|
||||
struct closure *,
|
||||
struct write_point **);
|
||||
|
@ -16,20 +16,18 @@ struct bucket_alloc_state {
|
||||
u64 skipped_nouse;
|
||||
};
|
||||
|
||||
struct ec_bucket_buf;
|
||||
|
||||
#define BCH_ALLOC_RESERVES() \
|
||||
x(btree_movinggc) \
|
||||
#define BCH_WATERMARKS() \
|
||||
x(btree_copygc) \
|
||||
x(btree) \
|
||||
x(movinggc) \
|
||||
x(none) \
|
||||
x(copygc) \
|
||||
x(normal) \
|
||||
x(stripe)
|
||||
|
||||
enum alloc_reserve {
|
||||
#define x(name) RESERVE_##name,
|
||||
BCH_ALLOC_RESERVES()
|
||||
enum bch_watermark {
|
||||
#define x(name) BCH_WATERMARK_##name,
|
||||
BCH_WATERMARKS()
|
||||
#undef x
|
||||
RESERVE_NR,
|
||||
BCH_WATERMARK_NR,
|
||||
};
|
||||
|
||||
#define OPEN_BUCKETS_COUNT 1024
|
||||
|
@ -404,12 +404,16 @@ int bch2_check_btree_backpointers(struct bch_fs *c)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
return bch2_trans_run(c,
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(&trans, iter,
|
||||
BTREE_ID_backpointers, POS_MIN, 0, k,
|
||||
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
|
||||
bch2_check_btree_backpointer(&trans, &iter, k)));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct bpos_level {
|
||||
@ -769,6 +773,8 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
|
||||
}
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -805,8 +811,10 @@ static int check_one_backpointer(struct btree_trans *trans,
|
||||
|
||||
if (fsck_err_on(!k.k, c,
|
||||
"backpointer for missing extent\n %s",
|
||||
(bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf)))
|
||||
return bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
|
||||
(bch2_bkey_val_to_text(&buf, c, bp.s_c), buf.buf))) {
|
||||
ret = bch2_btree_delete_at_buffered(trans, BTREE_ID_backpointers, bp.k->p);
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
fsck_err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
@ -872,5 +880,7 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c)
|
||||
}
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ static inline int bch2_bucket_backpointer_mod(struct btree_trans *trans,
|
||||
set_bkey_val_u64s(&bp_k->k, 0);
|
||||
}
|
||||
|
||||
return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k->k_i);
|
||||
return bch2_trans_update_buffered(trans, BTREE_ID_backpointers, &bp_k->k_i, !insert);
|
||||
}
|
||||
|
||||
static inline enum bch_data_type bkey_ptr_data_type(enum btree_id btree_id, unsigned level,
|
||||
|
@ -208,6 +208,7 @@
|
||||
#include "fifo.h"
|
||||
#include "nocow_locking_types.h"
|
||||
#include "opts.h"
|
||||
#include "seqmutex.h"
|
||||
#include "util.h"
|
||||
|
||||
#ifdef CONFIG_BCACHEFS_DEBUG
|
||||
@ -290,6 +291,11 @@ do { \
|
||||
#define bch_err_inum_offset_ratelimited(c, _inum, _offset, fmt, ...) \
|
||||
printk_ratelimited(KERN_ERR bch2_fmt_inum_offset(c, _inum, _offset, fmt), ##__VA_ARGS__)
|
||||
|
||||
#define bch_err_fn(_c, _ret) \
|
||||
bch_err(_c, "%s(): error %s", __func__, bch2_err_str(_ret))
|
||||
#define bch_err_msg(_c, _ret, _msg) \
|
||||
bch_err(_c, "%s(): error " _msg " %s", __func__, bch2_err_str(_ret))
|
||||
|
||||
#define bch_verbose(c, fmt, ...) \
|
||||
do { \
|
||||
if ((c)->opts.verbose) \
|
||||
@ -483,7 +489,7 @@ struct bch_dev {
|
||||
* Committed by bch2_write_super() -> bch_fs_mi_update()
|
||||
*/
|
||||
struct bch_member_cpu mi;
|
||||
uuid_le uuid;
|
||||
__uuid_t uuid;
|
||||
char name[BDEVNAME_SIZE];
|
||||
|
||||
struct bch_sb_handle disk_sb;
|
||||
@ -701,8 +707,8 @@ struct bch_fs {
|
||||
|
||||
/* Updated by bch2_sb_update():*/
|
||||
struct {
|
||||
uuid_le uuid;
|
||||
uuid_le user_uuid;
|
||||
__uuid_t uuid;
|
||||
__uuid_t user_uuid;
|
||||
|
||||
u16 version;
|
||||
u16 version_min;
|
||||
@ -779,7 +785,7 @@ struct bch_fs {
|
||||
} btree_write_stats[BTREE_WRITE_TYPE_NR];
|
||||
|
||||
/* btree_iter.c: */
|
||||
struct mutex btree_trans_lock;
|
||||
struct seqmutex btree_trans_lock;
|
||||
struct list_head btree_trans_list;
|
||||
mempool_t btree_paths_pool;
|
||||
mempool_t btree_trans_mem_pool;
|
||||
|
@ -78,6 +78,10 @@
|
||||
#include <linux/uuid.h>
|
||||
#include "vstructs.h"
|
||||
|
||||
#ifdef __KERNEL__
|
||||
typedef uuid_t __uuid_t;
|
||||
#endif
|
||||
|
||||
#define BITMASK(name, type, field, offset, end) \
|
||||
static const unsigned name##_OFFSET = offset; \
|
||||
static const unsigned name##_BITS = (end - offset); \
|
||||
@ -1215,7 +1219,7 @@ struct bch_sb_field_journal_v2 {
|
||||
#define BCH_MIN_NR_NBUCKETS (1 << 6)
|
||||
|
||||
struct bch_member {
|
||||
uuid_le uuid;
|
||||
__uuid_t uuid;
|
||||
__le64 nbuckets; /* device size */
|
||||
__le16 first_bucket; /* index of first bucket used */
|
||||
__le16 bucket_size; /* sectors */
|
||||
@ -1596,7 +1600,7 @@ static const unsigned bcachefs_metadata_required_upgrade_below = bcachefs_metada
|
||||
#define BCH_SB_MEMBERS_MAX 64 /* XXX kill */
|
||||
|
||||
struct bch_sb_layout {
|
||||
uuid_le magic; /* bcachefs superblock UUID */
|
||||
__uuid_t magic; /* bcachefs superblock UUID */
|
||||
__u8 layout_type;
|
||||
__u8 sb_max_size_bits; /* base 2 of 512 byte sectors */
|
||||
__u8 nr_superblocks;
|
||||
@ -1627,9 +1631,9 @@ struct bch_sb {
|
||||
__le16 version;
|
||||
__le16 version_min;
|
||||
__le16 pad[2];
|
||||
uuid_le magic;
|
||||
uuid_le uuid;
|
||||
uuid_le user_uuid;
|
||||
__uuid_t magic;
|
||||
__uuid_t uuid;
|
||||
__uuid_t user_uuid;
|
||||
__u8 label[BCH_SB_LABEL_SIZE];
|
||||
__le64 offset;
|
||||
__le64 seq;
|
||||
@ -1926,11 +1930,11 @@ enum bch_compression_opts {
|
||||
*/
|
||||
|
||||
#define BCACHE_MAGIC \
|
||||
UUID_LE(0xf67385c6, 0x1a4e, 0xca45, \
|
||||
0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
|
||||
UUID_INIT(0xc68573f6, 0x4e1a, 0x45ca, \
|
||||
0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81)
|
||||
#define BCHFS_MAGIC \
|
||||
UUID_LE(0xf67385c6, 0xce66, 0xa990, \
|
||||
0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
|
||||
UUID_INIT(0xc68573f6, 0x66ce, 0x90a9, \
|
||||
0xd9, 0x6a, 0x60, 0xcf, 0x80, 0x3d, 0xf7, 0xef)
|
||||
|
||||
#define BCACHEFS_STATFS_MAGIC 0xca451a4e
|
||||
|
||||
|
@ -93,7 +93,7 @@ struct bch_ioctl_incremental {
|
||||
* this UUID.
|
||||
*/
|
||||
struct bch_ioctl_query_uuid {
|
||||
uuid_le uuid;
|
||||
__uuid_t uuid;
|
||||
};
|
||||
|
||||
#if 0
|
||||
|
@ -110,16 +110,6 @@ enum btree_update_flags {
|
||||
#define BTREE_TRIGGER_BUCKET_INVALIDATE (1U << __BTREE_TRIGGER_BUCKET_INVALIDATE)
|
||||
#define BTREE_TRIGGER_NOATOMIC (1U << __BTREE_TRIGGER_NOATOMIC)
|
||||
|
||||
#define BTREE_TRIGGER_WANTS_OLD_AND_NEW \
|
||||
((1U << KEY_TYPE_alloc)| \
|
||||
(1U << KEY_TYPE_alloc_v2)| \
|
||||
(1U << KEY_TYPE_alloc_v3)| \
|
||||
(1U << KEY_TYPE_alloc_v4)| \
|
||||
(1U << KEY_TYPE_stripe)| \
|
||||
(1U << KEY_TYPE_inode)| \
|
||||
(1U << KEY_TYPE_inode_v2)| \
|
||||
(1U << KEY_TYPE_snapshot))
|
||||
|
||||
static inline int bch2_trans_mark_key(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_i *new,
|
||||
|
@ -446,8 +446,7 @@ void bch2_fs_btree_cache_exit(struct bch_fs *c)
|
||||
struct btree *b;
|
||||
unsigned i, flags;
|
||||
|
||||
if (bc->shrink.list.next)
|
||||
unregister_shrinker(&bc->shrink);
|
||||
unregister_shrinker(&bc->shrink);
|
||||
|
||||
/* vfree() can allocate memory: */
|
||||
flags = memalloc_nofs_save();
|
||||
|
@ -404,8 +404,7 @@ again:
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
bch_err(c, "%s: error getting btree node: %s",
|
||||
__func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "getting btree node");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -473,8 +472,7 @@ again:
|
||||
ret = PTR_ERR_OR_ZERO(cur);
|
||||
|
||||
if (ret) {
|
||||
bch_err(c, "%s: error getting btree node: %s",
|
||||
__func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "getting btree node");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -687,7 +685,7 @@ static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id
|
||||
|
||||
new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
|
||||
if (!new) {
|
||||
bch_err(c, "%s: error allocating new key", __func__);
|
||||
bch_err_msg(c, ret, "allocating new key");
|
||||
ret = -BCH_ERR_ENOMEM_gc_repair_key;
|
||||
goto err;
|
||||
}
|
||||
@ -814,7 +812,7 @@ static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
|
||||
fsck_err:
|
||||
err:
|
||||
if (ret)
|
||||
bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -919,11 +917,8 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
|
||||
ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
|
||||
false, &k, true);
|
||||
if (ret) {
|
||||
bch_err(c, "%s: error from bch2_gc_mark_key: %s",
|
||||
__func__, bch2_err_str(ret));
|
||||
if (ret)
|
||||
goto fsck_err;
|
||||
}
|
||||
|
||||
if (b->c.level) {
|
||||
bch2_bkey_buf_reassemble(&cur, c, k);
|
||||
@ -981,8 +976,7 @@ static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b
|
||||
continue;
|
||||
}
|
||||
} else if (ret) {
|
||||
bch_err(c, "%s: error getting btree node: %s",
|
||||
__func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "getting btree node");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1049,7 +1043,7 @@ fsck_err:
|
||||
six_unlock_read(&b->c.lock);
|
||||
|
||||
if (ret < 0)
|
||||
bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
printbuf_exit(&buf);
|
||||
return ret;
|
||||
}
|
||||
@ -1082,7 +1076,7 @@ static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
|
||||
: bch2_gc_btree(&trans, ids[i], initial, metadata_only);
|
||||
|
||||
if (ret < 0)
|
||||
bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
return ret;
|
||||
@ -1280,7 +1274,7 @@ fsck_err:
|
||||
if (ca)
|
||||
percpu_ref_put(&ca->ref);
|
||||
if (ret)
|
||||
bch_err(c, "error from %s(): %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
percpu_up_write(&c->mark_lock);
|
||||
printbuf_exit(&buf);
|
||||
@ -1594,7 +1588,7 @@ static int bch2_gc_write_reflink_key(struct btree_trans *trans,
|
||||
" should be %u",
|
||||
(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
|
||||
r->refcount)) {
|
||||
struct bkey_i *new = bch2_bkey_make_mut(trans, iter, k, 0);
|
||||
struct bkey_i *new = bch2_bkey_make_mut(trans, iter, &k, 0);
|
||||
|
||||
ret = PTR_ERR_OR_ZERO(new);
|
||||
if (ret)
|
||||
@ -1886,6 +1880,9 @@ out:
|
||||
* allocator thread - issue wakeup in case they blocked on gc_lock:
|
||||
*/
|
||||
closure_wake_up(&c->freelist_wait);
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1919,7 +1916,7 @@ static int gc_btree_gens_key(struct btree_trans *trans,
|
||||
percpu_up_read(&c->mark_lock);
|
||||
return 0;
|
||||
update:
|
||||
u = bch2_bkey_make_mut(trans, iter, k, 0);
|
||||
u = bch2_bkey_make_mut(trans, iter, &k, 0);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -2993,7 +2993,7 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
|
||||
struct btree_trans *pos;
|
||||
|
||||
mutex_lock(&c->btree_trans_lock);
|
||||
seqmutex_lock(&c->btree_trans_lock);
|
||||
list_for_each_entry(pos, &c->btree_trans_list, list) {
|
||||
/*
|
||||
* We'd much prefer to be stricter here and completely
|
||||
@ -3011,7 +3011,7 @@ void __bch2_trans_init(struct btree_trans *trans, struct bch_fs *c, unsigned fn_
|
||||
}
|
||||
list_add_tail(&trans->list, &c->btree_trans_list);
|
||||
list_add_done:
|
||||
mutex_unlock(&c->btree_trans_lock);
|
||||
seqmutex_unlock(&c->btree_trans_lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -3046,6 +3046,12 @@ void bch2_trans_exit(struct btree_trans *trans)
|
||||
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
|
||||
seqmutex_lock(&c->btree_trans_lock);
|
||||
list_del(&trans->list);
|
||||
seqmutex_unlock(&c->btree_trans_lock);
|
||||
}
|
||||
|
||||
closure_sync(&trans->ref);
|
||||
|
||||
if (s)
|
||||
@ -3057,12 +3063,6 @@ void bch2_trans_exit(struct btree_trans *trans)
|
||||
|
||||
check_btree_paths_leaked(trans);
|
||||
|
||||
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG_TRANSACTIONS)) {
|
||||
mutex_lock(&c->btree_trans_lock);
|
||||
list_del(&trans->list);
|
||||
mutex_unlock(&c->btree_trans_lock);
|
||||
}
|
||||
|
||||
srcu_read_unlock(&c->btree_trans_barrier, trans->srcu_idx);
|
||||
|
||||
bch2_journal_preres_put(&c->journal, &trans->journal_preres);
|
||||
@ -3200,7 +3200,7 @@ int bch2_fs_btree_iter_init(struct bch_fs *c)
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&c->btree_trans_list);
|
||||
mutex_init(&c->btree_trans_lock);
|
||||
seqmutex_init(&c->btree_trans_lock);
|
||||
|
||||
ret = mempool_init_kmalloc_pool(&c->btree_paths_pool, 1,
|
||||
sizeof(struct btree_path) * nr +
|
||||
|
@ -286,7 +286,7 @@ __always_inline
|
||||
static inline int btree_trans_restart_nounlock(struct btree_trans *trans, int err)
|
||||
{
|
||||
BUG_ON(err <= 0);
|
||||
BUG_ON(!bch2_err_matches(err, BCH_ERR_transaction_restart));
|
||||
BUG_ON(!bch2_err_matches(-err, BCH_ERR_transaction_restart));
|
||||
|
||||
trans->restarted = err;
|
||||
trans->last_restarted_ip = _THIS_IP_;
|
||||
|
@ -956,8 +956,7 @@ void bch2_fs_btree_key_cache_exit(struct btree_key_cache *bc)
|
||||
int cpu;
|
||||
#endif
|
||||
|
||||
if (bc->shrink.list.next)
|
||||
unregister_shrinker(&bc->shrink);
|
||||
unregister_shrinker(&bc->shrink);
|
||||
|
||||
mutex_lock(&bc->lock);
|
||||
|
||||
|
@ -111,10 +111,8 @@ static noinline void lock_graph_pop_all(struct lock_graph *g)
|
||||
lock_graph_up(g);
|
||||
}
|
||||
|
||||
static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
|
||||
static void __lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
|
||||
{
|
||||
closure_get(&trans->ref);
|
||||
|
||||
g->g[g->nr++] = (struct trans_waiting_for_lock) {
|
||||
.trans = trans,
|
||||
.node_want = trans->locking,
|
||||
@ -122,6 +120,12 @@ static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
|
||||
};
|
||||
}
|
||||
|
||||
static void lock_graph_down(struct lock_graph *g, struct btree_trans *trans)
|
||||
{
|
||||
closure_get(&trans->ref);
|
||||
__lock_graph_down(g, trans);
|
||||
}
|
||||
|
||||
static bool lock_graph_remove_non_waiters(struct lock_graph *g)
|
||||
{
|
||||
struct trans_waiting_for_lock *i;
|
||||
@ -222,10 +226,14 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
|
||||
struct trans_waiting_for_lock *i;
|
||||
|
||||
for (i = g->g; i < g->g + g->nr; i++)
|
||||
if (i->trans == trans)
|
||||
if (i->trans == trans) {
|
||||
closure_put(&trans->ref);
|
||||
return break_cycle(g, cycle);
|
||||
}
|
||||
|
||||
if (g->nr == ARRAY_SIZE(g->g)) {
|
||||
closure_put(&trans->ref);
|
||||
|
||||
if (orig_trans->lock_may_not_fail)
|
||||
return 0;
|
||||
|
||||
@ -239,7 +247,7 @@ static int lock_graph_descend(struct lock_graph *g, struct btree_trans *trans,
|
||||
return btree_trans_restart(orig_trans, BCH_ERR_transaction_restart_deadlock_recursion_limit);
|
||||
}
|
||||
|
||||
lock_graph_down(g, trans);
|
||||
__lock_graph_down(g, trans);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -334,9 +342,10 @@ next:
|
||||
!lock_type_conflicts(lock_held, trans->locking_wait.lock_want))
|
||||
continue;
|
||||
|
||||
ret = lock_graph_descend(&g, trans, cycle);
|
||||
closure_get(&trans->ref);
|
||||
raw_spin_unlock(&b->lock.wait_lock);
|
||||
|
||||
ret = lock_graph_descend(&g, trans, cycle);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto next;
|
||||
|
@ -115,7 +115,7 @@ int bch2_bkey_get_empty_slot(struct btree_trans *, struct btree_iter *,
|
||||
int __must_check bch2_trans_update(struct btree_trans *, struct btree_iter *,
|
||||
struct bkey_i *, enum btree_update_flags);
|
||||
int __must_check bch2_trans_update_buffered(struct btree_trans *,
|
||||
enum btree_id, struct bkey_i *);
|
||||
enum btree_id, struct bkey_i *, bool);
|
||||
|
||||
void bch2_trans_commit_hook(struct btree_trans *,
|
||||
struct btree_trans_commit_hook *);
|
||||
@ -241,10 +241,10 @@ static inline struct bkey_i *bch2_bkey_make_mut_noupdate(struct btree_trans *tra
|
||||
KEY_TYPE_##_type, sizeof(struct bkey_i_##_type)))
|
||||
|
||||
static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_s_c k, unsigned flags,
|
||||
struct bkey_s_c *k, unsigned flags,
|
||||
unsigned type, unsigned min_bytes)
|
||||
{
|
||||
struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, k, type, min_bytes);
|
||||
struct bkey_i *mut = __bch2_bkey_make_mut_noupdate(trans, *k, type, min_bytes);
|
||||
int ret;
|
||||
|
||||
if (IS_ERR(mut))
|
||||
@ -253,11 +253,13 @@ static inline struct bkey_i *__bch2_bkey_make_mut(struct btree_trans *trans, str
|
||||
ret = bch2_trans_update(trans, iter, mut, flags);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
*k = bkey_i_to_s_c(mut);
|
||||
return mut;
|
||||
}
|
||||
|
||||
static inline struct bkey_i *bch2_bkey_make_mut(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
struct bkey_s_c *k, unsigned flags)
|
||||
{
|
||||
return __bch2_bkey_make_mut(trans, iter, k, flags, 0, 0);
|
||||
}
|
||||
|
@ -247,15 +247,15 @@ static struct btree *__bch2_btree_node_alloc(struct btree_trans *trans,
|
||||
struct open_buckets ob = { .nr = 0 };
|
||||
struct bch_devs_list devs_have = (struct bch_devs_list) { 0 };
|
||||
unsigned nr_reserve;
|
||||
enum alloc_reserve alloc_reserve;
|
||||
enum bch_watermark alloc_reserve;
|
||||
int ret;
|
||||
|
||||
if (flags & BTREE_INSERT_USE_RESERVE) {
|
||||
nr_reserve = 0;
|
||||
alloc_reserve = RESERVE_btree_movinggc;
|
||||
alloc_reserve = BCH_WATERMARK_btree_copygc;
|
||||
} else {
|
||||
nr_reserve = BTREE_NODE_RESERVE;
|
||||
alloc_reserve = RESERVE_btree;
|
||||
alloc_reserve = BCH_WATERMARK_btree;
|
||||
}
|
||||
|
||||
mutex_lock(&c->btree_reserve_cache_lock);
|
||||
|
@ -418,8 +418,7 @@ static int run_one_mem_trigger(struct btree_trans *trans,
|
||||
return 0;
|
||||
|
||||
if (bch2_bkey_ops[old.k->type].atomic_trigger ==
|
||||
bch2_bkey_ops[i->k->k.type].atomic_trigger &&
|
||||
((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
|
||||
bch2_bkey_ops[i->k->k.type].atomic_trigger) {
|
||||
ret = bch2_mark_key(trans, i->btree_id, i->level,
|
||||
old, bkey_i_to_s_c(new),
|
||||
BTREE_TRIGGER_INSERT|BTREE_TRIGGER_OVERWRITE|flags);
|
||||
@ -460,8 +459,7 @@ static int run_one_trans_trigger(struct btree_trans *trans, struct btree_insert_
|
||||
if (!i->insert_trigger_run &&
|
||||
!i->overwrite_trigger_run &&
|
||||
bch2_bkey_ops[old.k->type].trans_trigger ==
|
||||
bch2_bkey_ops[i->k->k.type].trans_trigger &&
|
||||
((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
|
||||
bch2_bkey_ops[i->k->k.type].trans_trigger) {
|
||||
i->overwrite_trigger_run = true;
|
||||
i->insert_trigger_run = true;
|
||||
return bch2_trans_mark_key(trans, i->btree_id, i->level, old, i->k,
|
||||
@ -1218,7 +1216,6 @@ static inline int check_pos_snapshot_overwritten(struct btree_trans *trans,
|
||||
struct bpos pos)
|
||||
{
|
||||
if (!btree_type_has_snapshots(id) ||
|
||||
pos.snapshot == U32_MAX ||
|
||||
!snapshot_t(trans->c, pos.snapshot)->children[0])
|
||||
return 0;
|
||||
|
||||
@ -1718,21 +1715,14 @@ int __must_check bch2_trans_update(struct btree_trans *trans, struct btree_iter
|
||||
|
||||
int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
|
||||
enum btree_id btree,
|
||||
struct bkey_i *k)
|
||||
struct bkey_i *k,
|
||||
bool head)
|
||||
{
|
||||
struct btree_write_buffered_key *i;
|
||||
int ret;
|
||||
int ret, pos;
|
||||
|
||||
EBUG_ON(trans->nr_wb_updates > trans->wb_updates_size);
|
||||
EBUG_ON(k->k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
|
||||
|
||||
trans_for_each_wb_update(trans, i) {
|
||||
if (i->btree == btree && bpos_eq(i->k.k.p, k->k.p)) {
|
||||
bkey_copy(&i->k, k);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (!trans->wb_updates ||
|
||||
trans->nr_wb_updates == trans->wb_updates_size) {
|
||||
struct btree_write_buffered_key *u;
|
||||
@ -1759,13 +1749,18 @@ int __must_check bch2_trans_update_buffered(struct btree_trans *trans,
|
||||
trans->wb_updates = u;
|
||||
}
|
||||
|
||||
trans->wb_updates[trans->nr_wb_updates] = (struct btree_write_buffered_key) {
|
||||
.btree = btree,
|
||||
};
|
||||
if (head) {
|
||||
memmove(&trans->wb_updates[1],
|
||||
&trans->wb_updates[0],
|
||||
sizeof(trans->wb_updates[0]) * trans->nr_wb_updates);
|
||||
pos = 0;
|
||||
} else {
|
||||
pos = trans->nr_wb_updates;
|
||||
}
|
||||
|
||||
bkey_copy(&trans->wb_updates[trans->nr_wb_updates].k, k);
|
||||
trans->wb_updates[pos] = (struct btree_write_buffered_key) { .btree = btree, };
|
||||
bkey_copy(&trans->wb_updates[pos].k, k);
|
||||
trans->nr_wb_updates++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1886,7 +1881,7 @@ int bch2_btree_delete_at_buffered(struct btree_trans *trans,
|
||||
|
||||
bkey_init(&k->k);
|
||||
k->k.p = pos;
|
||||
return bch2_trans_update_buffered(trans, btree, k);
|
||||
return bch2_trans_update_buffered(trans, btree, k, false);
|
||||
}
|
||||
|
||||
int bch2_btree_delete_range_trans(struct btree_trans *trans, enum btree_id id,
|
||||
|
@ -279,6 +279,7 @@ int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
|
||||
struct btree_write_buffer *wb = &c->btree_write_buffer;
|
||||
struct btree_write_buffered_key *i;
|
||||
union btree_write_buffer_state old, new;
|
||||
unsigned offset = 0;
|
||||
int ret = 0;
|
||||
u64 v;
|
||||
|
||||
@ -286,7 +287,8 @@ int bch2_btree_insert_keys_write_buffer(struct btree_trans *trans)
|
||||
EBUG_ON(i->k.k.u64s > BTREE_WRITE_BUFERED_U64s_MAX);
|
||||
|
||||
i->journal_seq = trans->journal_res.seq;
|
||||
i->journal_offset = trans->journal_res.offset;
|
||||
i->journal_offset = trans->journal_res.offset + offset;
|
||||
offset++;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
|
@ -948,14 +948,12 @@ static int bch2_mark_stripe_ptr(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_mark_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
static int __mark_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
{
|
||||
u64 journal_seq = trans->journal_res.seq;
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p;
|
||||
@ -1031,6 +1029,14 @@ int bch2_mark_extent(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_mark_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
{
|
||||
return mem_trigger_run_insert_then_overwrite(__mark_extent, trans, btree_id, level, old, new, flags);
|
||||
}
|
||||
|
||||
int bch2_mark_stripe(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
@ -1169,13 +1175,11 @@ int bch2_mark_inode(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_mark_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
static int __mark_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
|
||||
struct bch_fs_usage __percpu *fs_usage;
|
||||
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
|
||||
s64 sectors = (s64) k.k->size;
|
||||
@ -1202,6 +1206,14 @@ int bch2_mark_reservation(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_mark_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
{
|
||||
return mem_trigger_run_insert_then_overwrite(__mark_reservation, trans, btree_id, level, old, new, flags);
|
||||
}
|
||||
|
||||
static s64 __bch2_mark_reflink_p(struct btree_trans *trans,
|
||||
struct bkey_s_c_reflink_p p,
|
||||
u64 start, u64 end,
|
||||
@ -1256,13 +1268,11 @@ fsck_err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_mark_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
static int __mark_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE ? old : new;
|
||||
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
|
||||
struct reflink_gc *ref;
|
||||
size_t l, r, m;
|
||||
@ -1296,6 +1306,14 @@ int bch2_mark_reflink_p(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_mark_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_s_c new,
|
||||
unsigned flags)
|
||||
{
|
||||
return mem_trigger_run_insert_then_overwrite(__mark_reflink_p, trans, btree_id, level, old, new, flags);
|
||||
}
|
||||
|
||||
void bch2_trans_fs_usage_revert(struct btree_trans *trans,
|
||||
struct replicas_delta_list *deltas)
|
||||
{
|
||||
@ -1441,20 +1459,20 @@ static inline int bch2_trans_mark_pointer(struct btree_trans *trans,
|
||||
|
||||
ret = __mark_pointer(trans, k, &p.ptr, sectors, bp.data_type,
|
||||
a->v.gen, &a->v.data_type,
|
||||
&a->v.dirty_sectors, &a->v.cached_sectors);
|
||||
&a->v.dirty_sectors, &a->v.cached_sectors) ?:
|
||||
bch2_trans_update(trans, &iter, &a->k_i, 0);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
|
||||
if (!p.ptr.cached) {
|
||||
ret = bch2_bucket_backpointer_mod(trans, bucket, bp, k, insert);
|
||||
if (ret)
|
||||
goto err;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
|
||||
err:
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bch2_trans_mark_stripe_ptr(struct btree_trans *trans,
|
||||
@ -1497,15 +1515,11 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_i *new,
|
||||
unsigned flags)
|
||||
static int __trans_mark_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
|
||||
? old
|
||||
: bkey_i_to_s_c(new);
|
||||
struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
|
||||
const union bch_extent_entry *entry;
|
||||
struct extent_ptr_decoded p;
|
||||
@ -1562,6 +1576,14 @@ int bch2_trans_mark_extent(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_extent(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_i *new,
|
||||
unsigned flags)
|
||||
{
|
||||
return trigger_run_insert_then_overwrite(__trans_mark_extent, trans, btree_id, level, old, new, flags);
|
||||
}
|
||||
|
||||
static int bch2_trans_mark_stripe_bucket(struct btree_trans *trans,
|
||||
struct bkey_s_c_stripe s,
|
||||
unsigned idx, bool deleting)
|
||||
@ -1736,15 +1758,10 @@ int bch2_trans_mark_inode(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_i *new,
|
||||
unsigned flags)
|
||||
static int __trans_mark_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
{
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
|
||||
? old
|
||||
: bkey_i_to_s_c(new);
|
||||
unsigned replicas = bkey_s_c_to_reservation(k).v->nr_replicas;
|
||||
s64 sectors = (s64) k.k->size;
|
||||
struct replicas_delta_list *d;
|
||||
@ -1766,7 +1783,16 @@ int bch2_trans_mark_reservation(struct btree_trans *trans,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
int bch2_trans_mark_reservation(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_i *new,
|
||||
unsigned flags)
|
||||
{
|
||||
return trigger_run_insert_then_overwrite(__trans_mark_reservation, trans, btree_id, level, old, new, flags);
|
||||
}
|
||||
|
||||
static int trans_mark_reflink_p_segment(struct btree_trans *trans,
|
||||
struct bkey_s_c_reflink_p p,
|
||||
u64 *idx, unsigned flags)
|
||||
{
|
||||
@ -1833,35 +1859,38 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_i *new,
|
||||
unsigned flags)
|
||||
static int __trans_mark_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, unsigned flags)
|
||||
{
|
||||
struct bkey_s_c k = flags & BTREE_TRIGGER_OVERWRITE
|
||||
? old
|
||||
: bkey_i_to_s_c(new);
|
||||
struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
|
||||
u64 idx, end_idx;
|
||||
int ret = 0;
|
||||
|
||||
if (flags & BTREE_TRIGGER_INSERT) {
|
||||
struct bch_reflink_p *v = (struct bch_reflink_p *) p.v;
|
||||
|
||||
v->front_pad = v->back_pad = 0;
|
||||
}
|
||||
|
||||
idx = le64_to_cpu(p.v->idx) - le32_to_cpu(p.v->front_pad);
|
||||
end_idx = le64_to_cpu(p.v->idx) + p.k->size +
|
||||
le32_to_cpu(p.v->back_pad);
|
||||
|
||||
while (idx < end_idx && !ret)
|
||||
ret = __bch2_trans_mark_reflink_p(trans, p, &idx, flags);
|
||||
|
||||
ret = trans_mark_reflink_p_segment(trans, p, &idx, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_trans_mark_reflink_p(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old,
|
||||
struct bkey_i *new,
|
||||
unsigned flags)
|
||||
{
|
||||
if (flags & BTREE_TRIGGER_INSERT) {
|
||||
struct bch_reflink_p *v = &bkey_i_to_reflink_p(new)->v;
|
||||
|
||||
v->front_pad = v->back_pad = 0;
|
||||
}
|
||||
|
||||
return trigger_run_insert_then_overwrite(__trans_mark_reflink_p, trans, btree_id, level, old, new, flags);
|
||||
}
|
||||
|
||||
static int __bch2_trans_mark_metadata_bucket(struct btree_trans *trans,
|
||||
struct bch_dev *ca, size_t b,
|
||||
enum bch_data_type type,
|
||||
@ -1988,7 +2017,10 @@ static int __bch2_trans_mark_dev_sb(struct btree_trans *trans,
|
||||
|
||||
int bch2_trans_mark_dev_sb(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
return bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
|
||||
int ret = bch2_trans_run(c, __bch2_trans_mark_dev_sb(&trans, ca));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Disk reservations: */
|
||||
|
@ -150,26 +150,26 @@ static inline struct bch_dev_usage bch2_dev_usage_read(struct bch_dev *ca)
|
||||
|
||||
void bch2_dev_usage_init(struct bch_dev *);
|
||||
|
||||
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reserve reserve)
|
||||
static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum bch_watermark watermark)
|
||||
{
|
||||
s64 reserved = 0;
|
||||
|
||||
switch (reserve) {
|
||||
case RESERVE_NR:
|
||||
switch (watermark) {
|
||||
case BCH_WATERMARK_NR:
|
||||
unreachable();
|
||||
case RESERVE_stripe:
|
||||
case BCH_WATERMARK_stripe:
|
||||
reserved += ca->mi.nbuckets >> 6;
|
||||
fallthrough;
|
||||
case RESERVE_none:
|
||||
case BCH_WATERMARK_normal:
|
||||
reserved += ca->mi.nbuckets >> 6;
|
||||
fallthrough;
|
||||
case RESERVE_movinggc:
|
||||
case BCH_WATERMARK_copygc:
|
||||
reserved += ca->nr_btree_reserve;
|
||||
fallthrough;
|
||||
case RESERVE_btree:
|
||||
case BCH_WATERMARK_btree:
|
||||
reserved += ca->nr_btree_reserve;
|
||||
fallthrough;
|
||||
case RESERVE_btree_movinggc:
|
||||
case BCH_WATERMARK_btree_copygc:
|
||||
break;
|
||||
}
|
||||
|
||||
@ -178,17 +178,17 @@ static inline u64 bch2_dev_buckets_reserved(struct bch_dev *ca, enum alloc_reser
|
||||
|
||||
static inline u64 dev_buckets_free(struct bch_dev *ca,
|
||||
struct bch_dev_usage usage,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
return max_t(s64, 0,
|
||||
usage.d[BCH_DATA_free].buckets -
|
||||
ca->nr_open_buckets -
|
||||
bch2_dev_buckets_reserved(ca, reserve));
|
||||
bch2_dev_buckets_reserved(ca, watermark));
|
||||
}
|
||||
|
||||
static inline u64 __dev_buckets_available(struct bch_dev *ca,
|
||||
struct bch_dev_usage usage,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
return max_t(s64, 0,
|
||||
usage.d[BCH_DATA_free].buckets
|
||||
@ -196,13 +196,13 @@ static inline u64 __dev_buckets_available(struct bch_dev *ca,
|
||||
+ usage.d[BCH_DATA_need_gc_gens].buckets
|
||||
+ usage.d[BCH_DATA_need_discard].buckets
|
||||
- ca->nr_open_buckets
|
||||
- bch2_dev_buckets_reserved(ca, reserve));
|
||||
- bch2_dev_buckets_reserved(ca, watermark));
|
||||
}
|
||||
|
||||
static inline u64 dev_buckets_available(struct bch_dev *ca,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
return __dev_buckets_available(ca, bch2_dev_usage_read(ca), reserve);
|
||||
return __dev_buckets_available(ca, bch2_dev_usage_read(ca), watermark);
|
||||
}
|
||||
|
||||
/* Filesystem usage: */
|
||||
@ -273,6 +273,20 @@ int bch2_trans_mark_inode(struct btree_trans *, enum btree_id, unsigned, struct
|
||||
int bch2_trans_mark_reservation(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
|
||||
int bch2_trans_mark_reflink_p(struct btree_trans *, enum btree_id, unsigned, struct bkey_s_c, struct bkey_i *, unsigned);
|
||||
|
||||
#define mem_trigger_run_insert_then_overwrite(_fn, _trans, _btree_id, _level, _old, _new, _flags)\
|
||||
({ \
|
||||
int ret = 0; \
|
||||
\
|
||||
if (_new.k->type) \
|
||||
ret = _fn(_trans, _btree_id, _level, _new, _flags & ~BTREE_TRIGGER_OVERWRITE); \
|
||||
if (_old.k->type && !ret) \
|
||||
ret = _fn(_trans, _btree_id, _level, _old, _flags & ~BTREE_TRIGGER_INSERT); \
|
||||
ret; \
|
||||
})
|
||||
|
||||
#define trigger_run_insert_then_overwrite(_fn, _trans, _btree_id, _level, _old, _new, _flags) \
|
||||
mem_trigger_run_insert_then_overwrite(_fn, _trans, _btree_id, _level, _old, bkey_i_to_s_c(_new), _flags)
|
||||
|
||||
void bch2_trans_fs_usage_revert(struct btree_trans *, struct replicas_delta_list *);
|
||||
int bch2_trans_fs_usage_apply(struct btree_trans *, struct replicas_delta_list *);
|
||||
|
||||
|
@ -753,7 +753,7 @@ int __init bch2_chardev_init(void)
|
||||
if (bch_chardev_major < 0)
|
||||
return bch_chardev_major;
|
||||
|
||||
bch_chardev_class = class_create(THIS_MODULE, "bcachefs");
|
||||
bch_chardev_class = class_create("bcachefs");
|
||||
if (IS_ERR(bch_chardev_class))
|
||||
return PTR_ERR(bch_chardev_class);
|
||||
|
||||
|
@ -459,7 +459,7 @@ int bch2_data_update_init(struct btree_trans *trans,
|
||||
bch2_compression_opt_to_type[io_opts.background_compression ?:
|
||||
io_opts.compression];
|
||||
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
|
||||
m->op.alloc_reserve = RESERVE_movinggc;
|
||||
m->op.alloc_reserve = BCH_WATERMARK_copygc;
|
||||
|
||||
bkey_for_each_ptr(ptrs, ptr)
|
||||
percpu_ref_get(&bch_dev_bkey_exists(c, ptr->dev)->ref);
|
||||
|
@ -378,26 +378,25 @@ static ssize_t bch2_read_btree(struct file *file, char __user *buf,
|
||||
i->size = size;
|
||||
i->ret = 0;
|
||||
|
||||
bch2_trans_init(&trans, i->c, 0, 0);
|
||||
ret = flush_buf(i);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bch2_trans_init(&trans, i->c, 0, 0);
|
||||
ret = for_each_btree_key2(&trans, iter, i->id, i->from,
|
||||
BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ({
|
||||
ret = flush_buf(i);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
bch2_bkey_val_to_text(&i->buf, i->c, k);
|
||||
prt_newline(&i->buf);
|
||||
0;
|
||||
drop_locks_do(&trans, flush_buf(i));
|
||||
}));
|
||||
i->from = iter.pos;
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (!ret)
|
||||
ret = flush_buf(i);
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
return ret ?: i->ret;
|
||||
}
|
||||
|
||||
@ -429,19 +428,24 @@ static ssize_t bch2_read_btree_formats(struct file *file, char __user *buf,
|
||||
return i->ret;
|
||||
|
||||
bch2_trans_init(&trans, i->c, 0, 0);
|
||||
retry:
|
||||
bch2_trans_begin(&trans);
|
||||
|
||||
for_each_btree_node(&trans, iter, i->id, i->from, 0, b, ret) {
|
||||
ret = flush_buf(i);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
bch2_btree_node_to_text(&i->buf, i->c, b);
|
||||
i->from = !bpos_eq(SPOS_MAX, b->key.k.p)
|
||||
? bpos_successor(b->key.k.p)
|
||||
: b->key.k.p;
|
||||
|
||||
ret = drop_locks_do(&trans, flush_buf(i));
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
bch2_trans_iter_exit(&trans, &iter);
|
||||
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
goto retry;
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (!ret)
|
||||
@ -483,17 +487,13 @@ static ssize_t bch2_read_bfloat_failed(struct file *file, char __user *buf,
|
||||
struct bkey_packed *_k =
|
||||
bch2_btree_node_iter_peek(&l->iter, l->b);
|
||||
|
||||
ret = flush_buf(i);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (bpos_gt(l->b->key.k.p, i->prev_node)) {
|
||||
bch2_btree_node_to_text(&i->buf, i->c, l->b);
|
||||
i->prev_node = l->b->key.k.p;
|
||||
}
|
||||
|
||||
bch2_bfloat_to_text(&i->buf, l->b, _k);
|
||||
0;
|
||||
drop_locks_do(&trans, flush_buf(i));
|
||||
}));
|
||||
i->from = iter.pos;
|
||||
|
||||
@ -627,19 +627,26 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
|
||||
struct bch_fs *c = i->c;
|
||||
struct btree_trans *trans;
|
||||
ssize_t ret = 0;
|
||||
u32 seq;
|
||||
|
||||
i->ubuf = buf;
|
||||
i->size = size;
|
||||
i->ret = 0;
|
||||
|
||||
mutex_lock(&c->btree_trans_lock);
|
||||
restart:
|
||||
seqmutex_lock(&c->btree_trans_lock);
|
||||
list_for_each_entry(trans, &c->btree_trans_list, list) {
|
||||
if (trans->locking_wait.task->pid <= i->iter)
|
||||
continue;
|
||||
|
||||
closure_get(&trans->ref);
|
||||
seq = seqmutex_seq(&c->btree_trans_lock);
|
||||
seqmutex_unlock(&c->btree_trans_lock);
|
||||
|
||||
ret = flush_buf(i);
|
||||
if (ret)
|
||||
break;
|
||||
if (ret) {
|
||||
closure_put(&trans->ref);
|
||||
goto unlocked;
|
||||
}
|
||||
|
||||
bch2_btree_trans_to_text(&i->buf, trans);
|
||||
|
||||
@ -651,9 +658,14 @@ static ssize_t bch2_btree_transactions_read(struct file *file, char __user *buf,
|
||||
prt_newline(&i->buf);
|
||||
|
||||
i->iter = trans->locking_wait.task->pid;
|
||||
}
|
||||
mutex_unlock(&c->btree_trans_lock);
|
||||
|
||||
closure_put(&trans->ref);
|
||||
|
||||
if (!seqmutex_relock(&c->btree_trans_lock, seq))
|
||||
goto restart;
|
||||
}
|
||||
seqmutex_unlock(&c->btree_trans_lock);
|
||||
unlocked:
|
||||
if (i->buf.allocation_failure)
|
||||
ret = -ENOMEM;
|
||||
|
||||
@ -815,6 +827,7 @@ static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
|
||||
struct bch_fs *c = i->c;
|
||||
struct btree_trans *trans;
|
||||
ssize_t ret = 0;
|
||||
u32 seq;
|
||||
|
||||
i->ubuf = buf;
|
||||
i->size = size;
|
||||
@ -822,21 +835,32 @@ static ssize_t bch2_btree_deadlock_read(struct file *file, char __user *buf,
|
||||
|
||||
if (i->iter)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&c->btree_trans_lock);
|
||||
restart:
|
||||
seqmutex_lock(&c->btree_trans_lock);
|
||||
list_for_each_entry(trans, &c->btree_trans_list, list) {
|
||||
if (trans->locking_wait.task->pid <= i->iter)
|
||||
continue;
|
||||
|
||||
closure_get(&trans->ref);
|
||||
seq = seqmutex_seq(&c->btree_trans_lock);
|
||||
seqmutex_unlock(&c->btree_trans_lock);
|
||||
|
||||
ret = flush_buf(i);
|
||||
if (ret)
|
||||
break;
|
||||
if (ret) {
|
||||
closure_put(&trans->ref);
|
||||
goto out;
|
||||
}
|
||||
|
||||
bch2_check_for_deadlock(trans, &i->buf);
|
||||
|
||||
i->iter = trans->locking_wait.task->pid;
|
||||
|
||||
closure_put(&trans->ref);
|
||||
|
||||
if (!seqmutex_relock(&c->btree_trans_lock, seq))
|
||||
goto restart;
|
||||
}
|
||||
mutex_unlock(&c->btree_trans_lock);
|
||||
seqmutex_unlock(&c->btree_trans_lock);
|
||||
out:
|
||||
if (i->buf.allocation_failure)
|
||||
ret = -ENOMEM;
|
||||
|
@ -1,6 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "bcachefs.h"
|
||||
#include "bkey_buf.h"
|
||||
#include "bkey_methods.h"
|
||||
#include "btree_update.h"
|
||||
#include "extents.h"
|
||||
@ -504,8 +505,10 @@ int bch2_readdir(struct bch_fs *c, subvol_inum inum, struct dir_context *ctx)
|
||||
struct bkey_s_c_dirent dirent;
|
||||
subvol_inum target;
|
||||
u32 snapshot;
|
||||
struct bkey_buf sk;
|
||||
int ret;
|
||||
|
||||
bch2_bkey_buf_init(&sk);
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
retry:
|
||||
bch2_trans_begin(&trans);
|
||||
@ -528,10 +531,11 @@ retry:
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* XXX: dir_emit() can fault and block, while we're holding
|
||||
* locks
|
||||
*/
|
||||
/* dir_emit() can fault and block: */
|
||||
bch2_bkey_buf_reassemble(&sk, c, k);
|
||||
dirent = bkey_i_to_s_c_dirent(sk.k);
|
||||
bch2_trans_unlock(&trans);
|
||||
|
||||
ctx->pos = dirent.k->p.offset;
|
||||
if (!dir_emit(ctx, dirent.v->d_name,
|
||||
bch2_dirent_name_bytes(dirent),
|
||||
@ -554,6 +558,7 @@ err:
|
||||
goto retry;
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
bch2_bkey_buf_exit(&sk, c);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -798,7 +798,7 @@ static void ec_stripe_delete_work(struct work_struct *work)
|
||||
ret = commit_do(&trans, NULL, NULL, BTREE_INSERT_NOFAIL,
|
||||
ec_stripe_delete(&trans, idx));
|
||||
if (ret) {
|
||||
bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -1333,7 +1333,7 @@ static int ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
|
||||
static struct ec_stripe_head *
|
||||
ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
|
||||
unsigned algo, unsigned redundancy,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
struct ec_stripe_head *h;
|
||||
struct bch_dev *ca;
|
||||
@ -1349,7 +1349,7 @@ ec_new_stripe_head_alloc(struct bch_fs *c, unsigned target,
|
||||
h->target = target;
|
||||
h->algo = algo;
|
||||
h->redundancy = redundancy;
|
||||
h->reserve = reserve;
|
||||
h->watermark = watermark;
|
||||
|
||||
rcu_read_lock();
|
||||
h->devs = target_rw_devs(c, BCH_DATA_user, target);
|
||||
@ -1384,7 +1384,7 @@ struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
unsigned target,
|
||||
unsigned algo,
|
||||
unsigned redundancy,
|
||||
enum alloc_reserve reserve)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct ec_stripe_head *h;
|
||||
@ -1406,21 +1406,21 @@ struct ec_stripe_head *__bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
if (h->target == target &&
|
||||
h->algo == algo &&
|
||||
h->redundancy == redundancy &&
|
||||
h->reserve == reserve) {
|
||||
h->watermark == watermark) {
|
||||
ret = bch2_trans_mutex_lock(trans, &h->lock);
|
||||
if (ret)
|
||||
h = ERR_PTR(ret);
|
||||
goto found;
|
||||
}
|
||||
|
||||
h = ec_new_stripe_head_alloc(c, target, algo, redundancy, reserve);
|
||||
h = ec_new_stripe_head_alloc(c, target, algo, redundancy, watermark);
|
||||
found:
|
||||
mutex_unlock(&c->ec_stripe_head_lock);
|
||||
return h;
|
||||
}
|
||||
|
||||
static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_head *h,
|
||||
enum alloc_reserve reserve, struct closure *cl)
|
||||
enum bch_watermark watermark, struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bch_devs_mask devs = h->devs;
|
||||
@ -1453,7 +1453,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
|
||||
&nr_have_parity,
|
||||
&have_cache, 0,
|
||||
BCH_DATA_parity,
|
||||
reserve,
|
||||
watermark,
|
||||
cl);
|
||||
|
||||
open_bucket_for_each(c, &buckets, ob, i) {
|
||||
@ -1480,7 +1480,7 @@ static int new_stripe_alloc_buckets(struct btree_trans *trans, struct ec_stripe_
|
||||
&nr_have_data,
|
||||
&have_cache, 0,
|
||||
BCH_DATA_user,
|
||||
reserve,
|
||||
watermark,
|
||||
cl);
|
||||
|
||||
open_bucket_for_each(c, &buckets, ob, i) {
|
||||
@ -1658,7 +1658,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
unsigned target,
|
||||
unsigned algo,
|
||||
unsigned redundancy,
|
||||
enum alloc_reserve reserve,
|
||||
enum bch_watermark watermark,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@ -1666,7 +1666,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
bool waiting = false;
|
||||
int ret;
|
||||
|
||||
h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, reserve);
|
||||
h = __bch2_ec_stripe_head_get(trans, target, algo, redundancy, watermark);
|
||||
if (!h)
|
||||
bch_err(c, "no stripe head");
|
||||
if (IS_ERR_OR_NULL(h))
|
||||
@ -1687,7 +1687,7 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
goto alloc_existing;
|
||||
|
||||
/* First, try to allocate a full stripe: */
|
||||
ret = new_stripe_alloc_buckets(trans, h, RESERVE_stripe, NULL) ?:
|
||||
ret = new_stripe_alloc_buckets(trans, h, BCH_WATERMARK_stripe, NULL) ?:
|
||||
__bch2_ec_stripe_head_reserve(trans, h);
|
||||
if (!ret)
|
||||
goto allocate_buf;
|
||||
@ -1706,8 +1706,8 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
|
||||
goto err;
|
||||
|
||||
if (reserve == RESERVE_movinggc) {
|
||||
ret = new_stripe_alloc_buckets(trans, h, reserve, NULL) ?:
|
||||
if (watermark == BCH_WATERMARK_copygc) {
|
||||
ret = new_stripe_alloc_buckets(trans, h, watermark, NULL) ?:
|
||||
__bch2_ec_stripe_head_reserve(trans, h);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1723,10 +1723,10 @@ struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
|
||||
closure_wake_up(&c->freelist_wait);
|
||||
alloc_existing:
|
||||
/*
|
||||
* Retry allocating buckets, with the reserve watermark for this
|
||||
* Retry allocating buckets, with the watermark for this
|
||||
* particular write:
|
||||
*/
|
||||
ret = new_stripe_alloc_buckets(trans, h, reserve, cl);
|
||||
ret = new_stripe_alloc_buckets(trans, h, watermark, cl);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -1845,7 +1845,7 @@ int bch2_stripes_read(struct bch_fs *c)
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "error reading stripes: %i", ret);
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -1880,7 +1880,7 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
list_for_each_entry(h, &c->ec_stripe_head_list, list) {
|
||||
prt_printf(out, "target %u algo %u redundancy %u %s:\n",
|
||||
h->target, h->algo, h->redundancy,
|
||||
bch2_alloc_reserves[h->reserve]);
|
||||
bch2_watermarks[h->watermark]);
|
||||
|
||||
if (h->s)
|
||||
prt_printf(out, "\tidx %llu blocks %u+%u allocated %u\n",
|
||||
@ -1898,7 +1898,7 @@ void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
|
||||
s->idx, s->nr_data, s->nr_parity,
|
||||
atomic_read(&s->ref[STRIPE_REF_io]),
|
||||
atomic_read(&s->ref[STRIPE_REF_stripe]),
|
||||
bch2_alloc_reserves[s->h->reserve]);
|
||||
bch2_watermarks[s->h->watermark]);
|
||||
}
|
||||
mutex_unlock(&c->ec_stripe_new_lock);
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ struct ec_stripe_head {
|
||||
unsigned target;
|
||||
unsigned algo;
|
||||
unsigned redundancy;
|
||||
enum alloc_reserve reserve;
|
||||
enum bch_watermark watermark;
|
||||
|
||||
struct bch_devs_mask devs;
|
||||
unsigned nr_active_devs;
|
||||
@ -211,7 +211,7 @@ int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
|
||||
void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
|
||||
struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
|
||||
unsigned, unsigned, unsigned,
|
||||
enum alloc_reserve, struct closure *);
|
||||
enum bch_watermark, struct closure *);
|
||||
|
||||
void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
|
||||
void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
|
||||
|
@ -151,7 +151,7 @@
|
||||
x(0, backpointer_to_overwritten_btree_node) \
|
||||
x(0, lock_fail_root_changed) \
|
||||
x(0, journal_reclaim_would_deadlock) \
|
||||
x(0, fsck) \
|
||||
x(EINVAL, fsck) \
|
||||
x(BCH_ERR_fsck, fsck_fix) \
|
||||
x(BCH_ERR_fsck, fsck_ignore) \
|
||||
x(BCH_ERR_fsck, fsck_errors_not_fixed) \
|
||||
@ -224,7 +224,7 @@ bool __bch2_err_matches(int, int);
|
||||
|
||||
static inline bool _bch2_err_matches(int err, int class)
|
||||
{
|
||||
return err && __bch2_err_matches(err, class);
|
||||
return err < 0 && __bch2_err_matches(err, class);
|
||||
}
|
||||
|
||||
#define bch2_err_matches(_err, _class) \
|
||||
|
@ -35,6 +35,49 @@
|
||||
|
||||
#include <trace/events/writeback.h>
|
||||
|
||||
struct folio_vec {
|
||||
struct folio *fv_folio;
|
||||
size_t fv_offset;
|
||||
size_t fv_len;
|
||||
};
|
||||
|
||||
static inline struct folio_vec biovec_to_foliovec(struct bio_vec bv)
|
||||
{
|
||||
|
||||
struct folio *folio = page_folio(bv.bv_page);
|
||||
size_t offset = (folio_page_idx(folio, bv.bv_page) << PAGE_SHIFT) +
|
||||
bv.bv_offset;
|
||||
size_t len = min_t(size_t, folio_size(folio) - offset, bv.bv_len);
|
||||
|
||||
return (struct folio_vec) {
|
||||
.fv_folio = folio,
|
||||
.fv_offset = offset,
|
||||
.fv_len = len,
|
||||
};
|
||||
}
|
||||
|
||||
static inline struct folio_vec bio_iter_iovec_folio(struct bio *bio,
|
||||
struct bvec_iter iter)
|
||||
{
|
||||
return biovec_to_foliovec(bio_iter_iovec(bio, iter));
|
||||
}
|
||||
|
||||
#define __bio_for_each_folio(bvl, bio, iter, start) \
|
||||
for (iter = (start); \
|
||||
(iter).bi_size && \
|
||||
((bvl = bio_iter_iovec_folio((bio), (iter))), 1); \
|
||||
bio_advance_iter_single((bio), &(iter), (bvl).fv_len))
|
||||
|
||||
/**
|
||||
* bio_for_each_folio - iterate over folios within a bio
|
||||
*
|
||||
* Like other non-_all versions, this iterates over what bio->bi_iter currently
|
||||
* points to. This version is for drivers, where the bio may have previously
|
||||
* been split or cloned.
|
||||
*/
|
||||
#define bio_for_each_folio(bvl, bio, iter) \
|
||||
__bio_for_each_folio(bvl, bio, iter, (bio)->bi_iter)
|
||||
|
||||
/*
|
||||
* Use u64 for the end pos and sector helpers because if the folio covers the
|
||||
* max supported range of the mapping, the start offset of the next folio
|
||||
@ -81,7 +124,7 @@ static int filemap_get_contig_folios_d(struct address_space *mapping,
|
||||
break;
|
||||
|
||||
f = __filemap_get_folio(mapping, pos >> PAGE_SHIFT, fgp_flags, gfp);
|
||||
if (!f)
|
||||
if (IS_ERR_OR_NULL(f))
|
||||
break;
|
||||
|
||||
BUG_ON(folios->nr && folio_pos(f) != pos);
|
||||
@ -1062,17 +1105,16 @@ bool bch2_release_folio(struct folio *folio, gfp_t gfp_mask)
|
||||
|
||||
static void bch2_readpages_end_io(struct bio *bio)
|
||||
{
|
||||
struct bvec_iter_all iter;
|
||||
struct folio_vec fv;
|
||||
struct folio_iter fi;
|
||||
|
||||
bio_for_each_folio_all(fv, bio, iter) {
|
||||
bio_for_each_folio_all(fi, bio) {
|
||||
if (!bio->bi_status) {
|
||||
folio_mark_uptodate(fv.fv_folio);
|
||||
folio_mark_uptodate(fi.folio);
|
||||
} else {
|
||||
folio_clear_uptodate(fv.fv_folio);
|
||||
folio_set_error(fv.fv_folio);
|
||||
folio_clear_uptodate(fi.folio);
|
||||
folio_set_error(fi.folio);
|
||||
}
|
||||
folio_unlock(fv.fv_folio);
|
||||
folio_unlock(fi.folio);
|
||||
}
|
||||
|
||||
bio_put(bio);
|
||||
@ -1430,34 +1472,33 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
|
||||
container_of(op, struct bch_writepage_io, op);
|
||||
struct bch_fs *c = io->op.c;
|
||||
struct bio *bio = &io->op.wbio.bio;
|
||||
struct bvec_iter_all iter;
|
||||
struct folio_vec fv;
|
||||
struct folio_iter fi;
|
||||
unsigned i;
|
||||
|
||||
if (io->op.error) {
|
||||
set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
|
||||
|
||||
bio_for_each_folio_all(fv, bio, iter) {
|
||||
bio_for_each_folio_all(fi, bio) {
|
||||
struct bch_folio *s;
|
||||
|
||||
folio_set_error(fv.fv_folio);
|
||||
mapping_set_error(fv.fv_folio->mapping, -EIO);
|
||||
folio_set_error(fi.folio);
|
||||
mapping_set_error(fi.folio->mapping, -EIO);
|
||||
|
||||
s = __bch2_folio(fv.fv_folio);
|
||||
s = __bch2_folio(fi.folio);
|
||||
spin_lock(&s->lock);
|
||||
for (i = 0; i < folio_sectors(fv.fv_folio); i++)
|
||||
for (i = 0; i < folio_sectors(fi.folio); i++)
|
||||
s->s[i].nr_replicas = 0;
|
||||
spin_unlock(&s->lock);
|
||||
}
|
||||
}
|
||||
|
||||
if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
|
||||
bio_for_each_folio_all(fv, bio, iter) {
|
||||
bio_for_each_folio_all(fi, bio) {
|
||||
struct bch_folio *s;
|
||||
|
||||
s = __bch2_folio(fv.fv_folio);
|
||||
s = __bch2_folio(fi.folio);
|
||||
spin_lock(&s->lock);
|
||||
for (i = 0; i < folio_sectors(fv.fv_folio); i++)
|
||||
for (i = 0; i < folio_sectors(fi.folio); i++)
|
||||
s->s[i].nr_replicas = 0;
|
||||
spin_unlock(&s->lock);
|
||||
}
|
||||
@ -1482,11 +1523,11 @@ static void bch2_writepage_io_done(struct bch_write_op *op)
|
||||
*/
|
||||
i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
|
||||
|
||||
bio_for_each_folio_all(fv, bio, iter) {
|
||||
struct bch_folio *s = __bch2_folio(fv.fv_folio);
|
||||
bio_for_each_folio_all(fi, bio) {
|
||||
struct bch_folio *s = __bch2_folio(fi.folio);
|
||||
|
||||
if (atomic_dec_and_test(&s->write_count))
|
||||
folio_end_writeback(fv.fv_folio);
|
||||
folio_end_writeback(fi.folio);
|
||||
}
|
||||
|
||||
bio_put(&io->op.wbio.bio);
|
||||
@ -1723,7 +1764,7 @@ int bch2_write_begin(struct file *file, struct address_space *mapping,
|
||||
folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
|
||||
FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
|
||||
mapping_gfp_mask(mapping));
|
||||
if (!folio)
|
||||
if (IS_ERR_OR_NULL(folio))
|
||||
goto err_unlock;
|
||||
|
||||
if (folio_test_uptodate(folio))
|
||||
@ -2321,10 +2362,29 @@ static noinline bool bch2_dio_write_check_allocated(struct dio_write *dio)
|
||||
static void bch2_dio_write_loop_async(struct bch_write_op *);
|
||||
static __always_inline long bch2_dio_write_done(struct dio_write *dio);
|
||||
|
||||
/*
|
||||
* We're going to return -EIOCBQUEUED, but we haven't finished consuming the
|
||||
* iov_iter yet, so we need to stash a copy of the iovec: it might be on the
|
||||
* caller's stack, we're not guaranteed that it will live for the duration of
|
||||
* the IO:
|
||||
*/
|
||||
static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
|
||||
{
|
||||
struct iovec *iov = dio->inline_vecs;
|
||||
|
||||
/*
|
||||
* iov_iter has a single embedded iovec - nothing to do:
|
||||
*/
|
||||
if (iter_is_ubuf(&dio->iter))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We don't currently handle non-iovec iov_iters here - return an error,
|
||||
* and we'll fall back to doing the IO synchronously:
|
||||
*/
|
||||
if (!iter_is_iovec(&dio->iter))
|
||||
return -1;
|
||||
|
||||
if (dio->iter.nr_segs > ARRAY_SIZE(dio->inline_vecs)) {
|
||||
iov = kmalloc_array(dio->iter.nr_segs, sizeof(*iov),
|
||||
GFP_KERNEL);
|
||||
@ -2334,8 +2394,8 @@ static noinline int bch2_dio_write_copy_iov(struct dio_write *dio)
|
||||
dio->free_iov = true;
|
||||
}
|
||||
|
||||
memcpy(iov, dio->iter.iov, dio->iter.nr_segs * sizeof(*iov));
|
||||
dio->iter.iov = iov;
|
||||
memcpy(iov, dio->iter.__iov, dio->iter.nr_segs * sizeof(*iov));
|
||||
dio->iter.__iov = iov;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2395,7 +2455,7 @@ static __always_inline long bch2_dio_write_done(struct dio_write *dio)
|
||||
bch2_pagecache_block_put(inode);
|
||||
|
||||
if (dio->free_iov)
|
||||
kfree(dio->iter.iov);
|
||||
kfree(dio->iter.__iov);
|
||||
|
||||
ret = dio->op.error ?: ((long) dio->written << 9);
|
||||
bio_put(&dio->op.wbio.bio);
|
||||
@ -2437,13 +2497,7 @@ static __always_inline void bch2_dio_write_end(struct dio_write *dio)
|
||||
mutex_unlock(&inode->ei_quota_lock);
|
||||
}
|
||||
|
||||
if (likely(!bio_flagged(bio, BIO_NO_PAGE_REF))) {
|
||||
struct bvec_iter_all iter;
|
||||
struct folio_vec fv;
|
||||
|
||||
bio_for_each_folio_all(fv, bio, iter)
|
||||
folio_put(fv.fv_folio);
|
||||
}
|
||||
bio_release_pages(bio, false);
|
||||
|
||||
if (unlikely(dio->op.error))
|
||||
set_bit(EI_INODE_ERROR, &inode->ei_flags);
|
||||
@ -2562,13 +2616,7 @@ out:
|
||||
err:
|
||||
dio->op.error = ret;
|
||||
|
||||
if (!bio_flagged(bio, BIO_NO_PAGE_REF)) {
|
||||
struct bvec_iter_all iter;
|
||||
struct folio_vec fv;
|
||||
|
||||
bio_for_each_folio_all(fv, bio, iter)
|
||||
folio_put(fv.fv_folio);
|
||||
}
|
||||
bio_release_pages(bio, false);
|
||||
|
||||
bch2_quota_reservation_put(c, inode, &dio->quota_res);
|
||||
goto out;
|
||||
@ -2807,7 +2855,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
|
||||
u64 end_pos;
|
||||
|
||||
folio = filemap_lock_folio(mapping, index);
|
||||
if (!folio) {
|
||||
if (IS_ERR_OR_NULL(folio)) {
|
||||
/*
|
||||
* XXX: we're doing two index lookups when we end up reading the
|
||||
* folio
|
||||
@ -2820,7 +2868,7 @@ static int __bch2_truncate_folio(struct bch_inode_info *inode,
|
||||
|
||||
folio = __filemap_get_folio(mapping, index,
|
||||
FGP_LOCK|FGP_CREAT, GFP_KERNEL);
|
||||
if (unlikely(!folio)) {
|
||||
if (unlikely(IS_ERR_OR_NULL(folio))) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
@ -3743,7 +3791,7 @@ static bool folio_hole_offset(struct address_space *mapping, loff_t *offset)
|
||||
bool ret = true;
|
||||
|
||||
folio = filemap_lock_folio(mapping, *offset >> PAGE_SHIFT);
|
||||
if (!folio)
|
||||
if (IS_ERR_OR_NULL(folio))
|
||||
return true;
|
||||
|
||||
s = bch2_folio(folio);
|
||||
|
@ -943,6 +943,7 @@ retry:
|
||||
cur.k->k.p.offset += cur.k->k.size;
|
||||
|
||||
if (have_extent) {
|
||||
bch2_trans_unlock(&trans);
|
||||
ret = bch2_fill_extent(c, info,
|
||||
bkey_i_to_s_c(prev.k), 0);
|
||||
if (ret)
|
||||
@ -961,9 +962,11 @@ err:
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
goto retry;
|
||||
|
||||
if (!ret && have_extent)
|
||||
if (!ret && have_extent) {
|
||||
bch2_trans_unlock(&trans);
|
||||
ret = bch2_fill_extent(c, info, bkey_i_to_s_c(prev.k),
|
||||
FIEMAP_EXTENT_LAST);
|
||||
}
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
bch2_bkey_buf_exit(&cur, c);
|
||||
@ -1011,7 +1014,7 @@ static const struct file_operations bch_file_operations = {
|
||||
.mmap = bch2_mmap,
|
||||
.open = generic_file_open,
|
||||
.fsync = bch2_fsync,
|
||||
.splice_read = generic_file_splice_read,
|
||||
.splice_read = filemap_splice_read,
|
||||
.splice_write = iter_file_splice_write,
|
||||
.fallocate = bch2_fallocate_dispatch,
|
||||
.unlocked_ioctl = bch2_fs_file_ioctl,
|
||||
|
@ -191,17 +191,18 @@ static int __write_inode(struct btree_trans *trans,
|
||||
struct bch_inode_unpacked *inode,
|
||||
u32 snapshot)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
int ret;
|
||||
struct bkey_inode_buf *inode_p =
|
||||
bch2_trans_kmalloc(trans, sizeof(*inode_p));
|
||||
|
||||
bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes,
|
||||
SPOS(0, inode->bi_inum, snapshot),
|
||||
BTREE_ITER_INTENT);
|
||||
if (IS_ERR(inode_p))
|
||||
return PTR_ERR(inode_p);
|
||||
|
||||
ret = bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_inode_write(trans, &iter, inode);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return ret;
|
||||
bch2_inode_pack(inode_p, inode);
|
||||
inode_p->inode.k.p.snapshot = snapshot;
|
||||
|
||||
return bch2_btree_insert_nonextent(trans, BTREE_ID_inodes,
|
||||
&inode_p->inode.k_i,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
}
|
||||
|
||||
static int write_inode(struct btree_trans *trans,
|
||||
@ -303,7 +304,7 @@ static int __remove_dirent(struct btree_trans *trans, struct bpos pos)
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
err:
|
||||
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -638,26 +639,23 @@ static int add_inode(struct bch_fs *c, struct inode_walker *w,
|
||||
}));
|
||||
}
|
||||
|
||||
static int __walk_inode(struct btree_trans *trans,
|
||||
struct inode_walker *w, struct bpos pos)
|
||||
static int get_inodes_all_snapshots(struct btree_trans *trans,
|
||||
struct inode_walker *w, u64 inum)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
u32 restart_count = trans->restart_count;
|
||||
unsigned i;
|
||||
int ret;
|
||||
|
||||
pos.snapshot = bch2_snapshot_equiv(c, pos.snapshot);
|
||||
|
||||
if (pos.inode == w->cur_inum)
|
||||
goto lookup_snapshot;
|
||||
if (w->cur_inum == inum)
|
||||
return 0;
|
||||
|
||||
w->inodes.nr = 0;
|
||||
|
||||
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, pos.inode),
|
||||
for_each_btree_key(trans, iter, BTREE_ID_inodes, POS(0, inum),
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k, ret) {
|
||||
if (k.k->p.offset != pos.inode)
|
||||
if (k.k->p.offset != inum)
|
||||
break;
|
||||
|
||||
if (bkey_is_inode(k.k))
|
||||
@ -668,40 +666,62 @@ static int __walk_inode(struct btree_trans *trans,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
w->cur_inum = pos.inode;
|
||||
w->cur_inum = inum;
|
||||
w->first_this_inode = true;
|
||||
|
||||
if (trans_was_restarted(trans, restart_count))
|
||||
return -BCH_ERR_transaction_restart_nested;
|
||||
|
||||
lookup_snapshot:
|
||||
for (i = 0; i < w->inodes.nr; i++)
|
||||
if (bch2_snapshot_is_ancestor(c, pos.snapshot, w->inodes.data[i].snapshot))
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct inode_walker_entry *
|
||||
lookup_inode_for_snapshot(struct bch_fs *c,
|
||||
struct inode_walker *w, u32 snapshot)
|
||||
{
|
||||
struct inode_walker_entry *i;
|
||||
|
||||
snapshot = bch2_snapshot_equiv(c, snapshot);
|
||||
|
||||
darray_for_each(w->inodes, i)
|
||||
if (bch2_snapshot_is_ancestor(c, snapshot, i->snapshot))
|
||||
goto found;
|
||||
return INT_MAX;
|
||||
|
||||
return NULL;
|
||||
found:
|
||||
BUG_ON(pos.snapshot > w->inodes.data[i].snapshot);
|
||||
BUG_ON(snapshot > i->snapshot);
|
||||
|
||||
if (pos.snapshot != w->inodes.data[i].snapshot) {
|
||||
struct inode_walker_entry e = w->inodes.data[i];
|
||||
if (snapshot != i->snapshot) {
|
||||
struct inode_walker_entry new = *i;
|
||||
int ret;
|
||||
|
||||
e.snapshot = pos.snapshot;
|
||||
e.count = 0;
|
||||
new.snapshot = snapshot;
|
||||
new.count = 0;
|
||||
|
||||
bch_info(c, "have key for inode %llu:%u but have inode in ancestor snapshot %u",
|
||||
pos.inode, pos.snapshot, w->inodes.data[i].snapshot);
|
||||
w->cur_inum, snapshot, i->snapshot);
|
||||
|
||||
while (i && w->inodes.data[i - 1].snapshot > pos.snapshot)
|
||||
while (i > w->inodes.data && i[-1].snapshot > snapshot)
|
||||
--i;
|
||||
|
||||
ret = darray_insert_item(&w->inodes, i, e);
|
||||
ret = darray_insert_item(&w->inodes, i - w->inodes.data, new);
|
||||
if (ret)
|
||||
return ret;
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
static struct inode_walker_entry *walk_inode(struct btree_trans *trans,
|
||||
struct inode_walker *w, struct bpos pos)
|
||||
{
|
||||
int ret = get_inodes_all_snapshots(trans, w, pos.inode);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
return lookup_inode_for_snapshot(trans->c, w, pos.snapshot);
|
||||
}
|
||||
|
||||
static int __get_visible_inodes(struct btree_trans *trans,
|
||||
struct inode_walker *w,
|
||||
struct snapshots_seen *s,
|
||||
@ -983,7 +1003,7 @@ static int check_inode(struct btree_trans *trans,
|
||||
err:
|
||||
fsck_err:
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1009,7 +1029,7 @@ static int check_inodes(struct bch_fs *c, bool full)
|
||||
bch2_trans_exit(&trans);
|
||||
snapshots_seen_exit(&s);
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1129,7 +1149,7 @@ static int check_i_sectors(struct btree_trans *trans, struct inode_walker *w)
|
||||
}
|
||||
fsck_err:
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
if (!ret && trans_was_restarted(trans, restart_count))
|
||||
ret = -BCH_ERR_transaction_restart_nested;
|
||||
return ret;
|
||||
@ -1143,6 +1163,25 @@ struct extent_end {
|
||||
|
||||
typedef DARRAY(struct extent_end) extent_ends;
|
||||
|
||||
static int get_print_extent(struct btree_trans *trans, struct bpos pos, struct printbuf *out)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
|
||||
k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_extents, pos,
|
||||
BTREE_ITER_SLOTS|
|
||||
BTREE_ITER_ALL_SNAPSHOTS|
|
||||
BTREE_ITER_NOT_EXTENTS);
|
||||
ret = bkey_err(k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bch2_bkey_val_to_text(out, trans->c, k);
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int check_overlapping_extents(struct btree_trans *trans,
|
||||
struct snapshots_seen *seen,
|
||||
extent_ends *extent_ends,
|
||||
@ -1165,17 +1204,25 @@ static int check_overlapping_extents(struct btree_trans *trans,
|
||||
i->snapshot, &i->seen))
|
||||
continue;
|
||||
|
||||
if (fsck_err_on(i->offset > bkey_start_offset(k.k), c,
|
||||
"overlapping extents: extent in snapshot %u ends at %llu overlaps with\n%s",
|
||||
i->snapshot,
|
||||
i->offset,
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
if (i->offset <= bkey_start_offset(k.k))
|
||||
continue;
|
||||
|
||||
printbuf_reset(&buf);
|
||||
prt_str(&buf, "overlapping extents:\n ");
|
||||
bch2_bkey_val_to_text(&buf, c, k);
|
||||
prt_str(&buf, "\n ");
|
||||
|
||||
ret = get_print_extent(trans, SPOS(k.k->p.inode, i->offset, i->snapshot), &buf);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
if (fsck_err(c, buf.buf)) {
|
||||
struct bkey_i *update = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
|
||||
if ((ret = PTR_ERR_OR_ZERO(update)))
|
||||
goto err;
|
||||
bkey_reassemble(update, k);
|
||||
ret = bch2_trans_update_extent(trans, iter, update, 0);
|
||||
ret = bch2_trans_update_extent(trans, iter, update,
|
||||
BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
@ -1272,11 +1319,12 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = __walk_inode(trans, inode, equiv);
|
||||
if (ret < 0)
|
||||
i = walk_inode(trans, inode, equiv);
|
||||
ret = PTR_ERR_OR_ZERO(i);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
if (fsck_err_on(ret == INT_MAX, c,
|
||||
if (fsck_err_on(!i, c,
|
||||
"extent in missing inode:\n %s",
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
@ -1285,13 +1333,8 @@ static int check_extent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret == INT_MAX) {
|
||||
ret = 0;
|
||||
if (!i)
|
||||
goto out;
|
||||
}
|
||||
|
||||
i = inode->inodes.data + ret;
|
||||
ret = 0;
|
||||
|
||||
if (fsck_err_on(!S_ISREG(i->inode.bi_mode) &&
|
||||
!S_ISLNK(i->inode.bi_mode), c,
|
||||
@ -1353,7 +1396,7 @@ fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
|
||||
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1395,7 +1438,7 @@ static int check_extents(struct bch_fs *c)
|
||||
snapshots_seen_exit(&s);
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1434,7 +1477,7 @@ static int check_subdir_count(struct btree_trans *trans, struct inode_walker *w)
|
||||
}
|
||||
fsck_err:
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
if (!ret && trans_was_restarted(trans, restart_count))
|
||||
ret = -BCH_ERR_transaction_restart_nested;
|
||||
return ret;
|
||||
@ -1555,7 +1598,7 @@ fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
|
||||
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1597,7 +1640,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
|
||||
BUG_ON(!iter->path->should_be_locked);
|
||||
|
||||
ret = __walk_inode(trans, dir, equiv);
|
||||
i = walk_inode(trans, dir, equiv);
|
||||
ret = PTR_ERR_OR_ZERO(i);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
@ -1605,7 +1649,7 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
*hash_info = bch2_hash_info_init(c, &dir->inodes.data[0].inode);
|
||||
dir->first_this_inode = false;
|
||||
|
||||
if (fsck_err_on(ret == INT_MAX, c,
|
||||
if (fsck_err_on(!i, c,
|
||||
"dirent in nonexisting directory:\n%s",
|
||||
(printbuf_reset(&buf),
|
||||
bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
|
||||
@ -1614,13 +1658,8 @@ static int check_dirent(struct btree_trans *trans, struct btree_iter *iter,
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (ret == INT_MAX) {
|
||||
ret = 0;
|
||||
if (!i)
|
||||
goto out;
|
||||
}
|
||||
|
||||
i = dir->inodes.data + ret;
|
||||
ret = 0;
|
||||
|
||||
if (fsck_err_on(!S_ISDIR(i->inode.bi_mode), c,
|
||||
"dirent in non directory inode type %s:\n%s",
|
||||
@ -1725,7 +1764,7 @@ fsck_err:
|
||||
printbuf_exit(&buf);
|
||||
|
||||
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1764,7 +1803,7 @@ static int check_dirents(struct bch_fs *c)
|
||||
inode_walker_exit(&target);
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1774,34 +1813,34 @@ static int check_xattr(struct btree_trans *trans, struct btree_iter *iter,
|
||||
struct inode_walker *inode)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct inode_walker_entry *i;
|
||||
int ret;
|
||||
|
||||
ret = check_key_has_snapshot(trans, iter, k);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __walk_inode(trans, inode, k.k->p);
|
||||
if (ret < 0)
|
||||
i = walk_inode(trans, inode, k.k->p);
|
||||
ret = PTR_ERR_OR_ZERO(i);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (inode->first_this_inode)
|
||||
*hash_info = bch2_hash_info_init(c, &inode->inodes.data[0].inode);
|
||||
inode->first_this_inode = false;
|
||||
|
||||
if (fsck_err_on(ret == INT_MAX, c,
|
||||
if (fsck_err_on(!i, c,
|
||||
"xattr for missing inode %llu",
|
||||
k.k->p.inode))
|
||||
return bch2_btree_delete_at(trans, iter, 0);
|
||||
|
||||
if (ret == INT_MAX)
|
||||
if (!i)
|
||||
return 0;
|
||||
|
||||
ret = 0;
|
||||
|
||||
ret = hash_check_key(trans, bch2_xattr_hash_desc, hash_info, iter, k);
|
||||
fsck_err:
|
||||
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1833,7 +1872,7 @@ static int check_xattrs(struct bch_fs *c)
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "%s(): error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1896,12 +1935,18 @@ fsck_err:
|
||||
noinline_for_stack
|
||||
static int check_root(struct bch_fs *c)
|
||||
{
|
||||
int ret;
|
||||
|
||||
bch_verbose(c, "checking root directory");
|
||||
|
||||
return bch2_trans_do(c, NULL, NULL,
|
||||
ret = bch2_trans_do(c, NULL, NULL,
|
||||
BTREE_INSERT_NOFAIL|
|
||||
BTREE_INSERT_LAZY_RW,
|
||||
check_root_trans(&trans));
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct pathbuf_entry {
|
||||
@ -2038,7 +2083,7 @@ static int check_path(struct btree_trans *trans,
|
||||
}
|
||||
fsck_err:
|
||||
if (ret)
|
||||
bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2081,10 +2126,11 @@ static int check_directory_structure(struct bch_fs *c)
|
||||
break;
|
||||
}
|
||||
bch2_trans_iter_exit(&trans, &iter);
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
darray_exit(&path);
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2364,6 +2410,8 @@ static int check_nlinks(struct bch_fs *c)
|
||||
|
||||
kvfree(links.d);
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -2397,7 +2445,6 @@ static int fix_reflink_p_key(struct btree_trans *trans, struct btree_iter *iter,
|
||||
noinline_for_stack
|
||||
static int fix_reflink_p(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
int ret;
|
||||
@ -2407,15 +2454,16 @@ static int fix_reflink_p(struct bch_fs *c)
|
||||
|
||||
bch_verbose(c, "fixing reflink_p keys");
|
||||
|
||||
bch2_trans_init(&trans, c, BTREE_ITER_MAX, 0);
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(&trans, iter,
|
||||
BTREE_ID_extents, POS_MIN,
|
||||
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|
|
||||
BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
fix_reflink_p_key(&trans, &iter, k)));
|
||||
|
||||
ret = for_each_btree_key_commit(&trans, iter,
|
||||
BTREE_ID_extents, POS_MIN,
|
||||
BTREE_ITER_INTENT|BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
fix_reflink_p_key(&trans, &iter, k));
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -150,11 +150,11 @@ static bool bch2_target_congested(struct bch_fs *c, u16 target)
|
||||
void bch2_bio_free_pages_pool(struct bch_fs *c, struct bio *bio)
|
||||
{
|
||||
struct bvec_iter_all iter;
|
||||
struct bio_vec bv;
|
||||
struct bio_vec *bv;
|
||||
|
||||
bio_for_each_segment_all(bv, bio, iter)
|
||||
if (bv.bv_page != ZERO_PAGE(0))
|
||||
mempool_free(bv.bv_page, &c->bio_bounce_pages);
|
||||
if (bv->bv_page != ZERO_PAGE(0))
|
||||
mempool_free(bv->bv_page, &c->bio_bounce_pages);
|
||||
bio->bi_vcnt = 0;
|
||||
}
|
||||
|
||||
@ -451,7 +451,7 @@ retry:
|
||||
&devs_have,
|
||||
opts.data_replicas,
|
||||
opts.data_replicas,
|
||||
RESERVE_none, 0, &cl, &wp);
|
||||
BCH_WATERMARK_normal, 0, &cl, &wp);
|
||||
if (ret) {
|
||||
bch2_trans_unlock(trans);
|
||||
closure_sync(&cl);
|
||||
|
@ -59,7 +59,7 @@ enum bch_write_flags {
|
||||
|
||||
static inline struct workqueue_struct *index_update_wq(struct bch_write_op *op)
|
||||
{
|
||||
return op->alloc_reserve == RESERVE_movinggc
|
||||
return op->alloc_reserve == BCH_WATERMARK_copygc
|
||||
? op->c->copygc_wq
|
||||
: op->c->btree_update_wq;
|
||||
}
|
||||
@ -89,7 +89,7 @@ static inline void bch2_write_op_init(struct bch_write_op *op, struct bch_fs *c,
|
||||
op->compression_type = bch2_compression_opt_to_type[opts.compression];
|
||||
op->nr_replicas = 0;
|
||||
op->nr_replicas_required = c->opts.data_replicas_required;
|
||||
op->alloc_reserve = RESERVE_none;
|
||||
op->alloc_reserve = BCH_WATERMARK_normal;
|
||||
op->incompressible = 0;
|
||||
op->open_buckets.nr = 0;
|
||||
op->devs_have.nr = 0;
|
||||
|
@ -828,7 +828,7 @@ static int __bch2_set_nr_journal_buckets(struct bch_dev *ca, unsigned nr,
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
ob[nr_got] = bch2_bucket_alloc(c, ca, RESERVE_none, cl);
|
||||
ob[nr_got] = bch2_bucket_alloc(c, ca, BCH_WATERMARK_normal, cl);
|
||||
ret = PTR_ERR_OR_ZERO(ob[nr_got]);
|
||||
if (ret)
|
||||
break;
|
||||
@ -978,7 +978,7 @@ int bch2_set_nr_journal_buckets(struct bch_fs *c, struct bch_dev *ca,
|
||||
}
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
unlock:
|
||||
up_write(&c->state_lock);
|
||||
return ret;
|
||||
@ -987,9 +987,12 @@ unlock:
|
||||
int bch2_dev_journal_alloc(struct bch_dev *ca)
|
||||
{
|
||||
unsigned nr;
|
||||
int ret;
|
||||
|
||||
if (dynamic_fault("bcachefs:add:journal_alloc"))
|
||||
return -BCH_ERR_ENOMEM_set_nr_journal_buckets;
|
||||
if (dynamic_fault("bcachefs:add:journal_alloc")) {
|
||||
ret = -BCH_ERR_ENOMEM_set_nr_journal_buckets;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* 1/128th of the device by default: */
|
||||
nr = ca->mi.nbuckets >> 7;
|
||||
@ -1003,7 +1006,11 @@ int bch2_dev_journal_alloc(struct bch_dev *ca)
|
||||
min(1 << 13,
|
||||
(1 << 24) / ca->mi.bucket_size));
|
||||
|
||||
return __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
|
||||
ret = __bch2_set_nr_journal_buckets(ca, nr, true, NULL);
|
||||
err:
|
||||
if (ret)
|
||||
bch_err_fn(ca, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* startup/shutdown: */
|
||||
|
@ -61,7 +61,8 @@ static int __bch2_lru_set(struct btree_trans *trans, u16 lru_id,
|
||||
EBUG_ON(lru_pos_time(k->k.p) != time);
|
||||
EBUG_ON(k->k.p.offset != dev_bucket);
|
||||
|
||||
return bch2_trans_update_buffered(trans, BTREE_ID_lru, k);
|
||||
return bch2_trans_update_buffered(trans, BTREE_ID_lru, k,
|
||||
key_type == KEY_TYPE_deleted);
|
||||
}
|
||||
|
||||
int bch2_lru_del(struct btree_trans *trans, u16 lru_id, u64 dev_bucket, u64 time)
|
||||
@ -160,20 +161,18 @@ fsck_err:
|
||||
|
||||
int bch2_check_lrus(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans trans;
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
struct bpos last_flushed_pos = POS_MIN;
|
||||
int ret = 0;
|
||||
|
||||
bch2_trans_init(&trans, c, 0, 0);
|
||||
|
||||
ret = for_each_btree_key_commit(&trans, iter,
|
||||
BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
bch2_check_lru_key(&trans, &iter, k, &last_flushed_pos));
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
ret = bch2_trans_run(c,
|
||||
for_each_btree_key_commit(&trans, iter,
|
||||
BTREE_ID_lru, POS_MIN, BTREE_ITER_PREFETCH, k,
|
||||
NULL, NULL, BTREE_INSERT_NOFAIL|BTREE_INSERT_LAZY_RW,
|
||||
bch2_check_lru_key(&trans, &iter, k, &last_flushed_pos)));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ static int bch2_dev_usrdata_drop_key(struct btree_trans *trans,
|
||||
if (!bch2_bkey_has_device_c(k, dev_idx))
|
||||
return 0;
|
||||
|
||||
n = bch2_bkey_make_mut(trans, iter, k, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
n = bch2_bkey_make_mut(trans, iter, &k, BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE);
|
||||
ret = PTR_ERR_OR_ZERO(n);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -690,7 +690,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
|
||||
bch2_trans_iter_exit(trans, &iter);
|
||||
|
||||
if (ret) {
|
||||
bch_err(c, "%s: error looking up alloc key: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "looking up alloc key");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -701,7 +701,7 @@ int __bch2_evacuate_bucket(struct btree_trans *trans,
|
||||
|
||||
ret = bch2_btree_write_buffer_flush(trans);
|
||||
if (ret) {
|
||||
bch_err(c, "%s: error flushing btree write buffer: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "flushing btree write buffer");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -904,7 +904,7 @@ next:
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "error in %s(): %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
bch2_btree_interior_updates_flush(c);
|
||||
|
||||
@ -1029,6 +1029,8 @@ int bch2_scan_old_btree_nodes(struct bch_fs *c, struct bch_move_stats *stats)
|
||||
mutex_unlock(&c->sb_lock);
|
||||
}
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -271,7 +271,7 @@ unsigned long bch2_copygc_wait_amount(struct bch_fs *c)
|
||||
for_each_rw_member(ca, c, dev_idx) {
|
||||
struct bch_dev_usage usage = bch2_dev_usage_read(ca);
|
||||
|
||||
fragmented_allowed = ((__dev_buckets_available(ca, usage, RESERVE_stripe) *
|
||||
fragmented_allowed = ((__dev_buckets_available(ca, usage, BCH_WATERMARK_stripe) *
|
||||
ca->mi.bucket_size) >> 1);
|
||||
fragmented = 0;
|
||||
|
||||
@ -369,6 +369,7 @@ static int bch2_copygc_thread(void *arg)
|
||||
}
|
||||
|
||||
move_buckets_wait(&trans, &ctxt, &move_buckets, true);
|
||||
rhashtable_destroy(&move_buckets.table);
|
||||
bch2_trans_exit(&trans);
|
||||
bch2_moving_ctxt_exit(&ctxt);
|
||||
|
||||
|
@ -621,10 +621,11 @@ int bch2_fs_quota_read(struct bch_fs *c)
|
||||
for_each_btree_key2(&trans, iter, BTREE_ID_inodes,
|
||||
POS_MIN, BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS, k,
|
||||
bch2_fs_quota_read_inode(&trans, &iter, k));
|
||||
if (ret)
|
||||
bch_err(c, "%s: err %s", __func__, bch2_err_str(ret));
|
||||
|
||||
bch2_trans_exit(&trans);
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -685,6 +685,9 @@ static int bch2_journal_replay(struct bch_fs *c, u64 start_seq, u64 end_seq)
|
||||
bch2_journal_log_msg(c, "journal replay finished");
|
||||
err:
|
||||
kvfree(keys_sorted);
|
||||
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1034,9 +1037,6 @@ static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
|
||||
root_tree.k.p.offset = 1;
|
||||
root_tree.v.master_subvol = cpu_to_le32(1);
|
||||
root_tree.v.root_snapshot = cpu_to_le32(U32_MAX);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees,
|
||||
&root_tree.k_i,
|
||||
NULL, NULL, 0);
|
||||
|
||||
bkey_snapshot_init(&root_snapshot.k_i);
|
||||
root_snapshot.k.p.offset = U32_MAX;
|
||||
@ -1046,28 +1046,27 @@ static int bch2_fs_initialize_subvolumes(struct bch_fs *c)
|
||||
root_snapshot.v.tree = cpu_to_le32(1);
|
||||
SET_BCH_SNAPSHOT_SUBVOL(&root_snapshot.v, true);
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_snapshots,
|
||||
&root_snapshot.k_i,
|
||||
NULL, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
bkey_subvolume_init(&root_volume.k_i);
|
||||
root_volume.k.p.offset = BCACHEFS_ROOT_SUBVOL;
|
||||
root_volume.v.flags = 0;
|
||||
root_volume.v.snapshot = cpu_to_le32(U32_MAX);
|
||||
root_volume.v.inode = cpu_to_le64(BCACHEFS_ROOT_INO);
|
||||
|
||||
ret = bch2_btree_insert(c, BTREE_ID_subvolumes,
|
||||
&root_volume.k_i,
|
||||
NULL, NULL, 0);
|
||||
ret = bch2_btree_insert(c, BTREE_ID_snapshot_trees,
|
||||
&root_tree.k_i,
|
||||
NULL, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_snapshots,
|
||||
&root_snapshot.k_i,
|
||||
NULL, NULL, 0) ?:
|
||||
bch2_btree_insert(c, BTREE_ID_subvolumes,
|
||||
&root_volume.k_i,
|
||||
NULL, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
|
||||
static int __bch2_fs_upgrade_for_subvolumes(struct btree_trans *trans)
|
||||
{
|
||||
struct btree_iter iter;
|
||||
struct bkey_s_c k;
|
||||
@ -1097,9 +1096,19 @@ err:
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* set bi_subvol on root inode */
|
||||
noinline_for_stack
|
||||
static int bch2_fs_upgrade_for_subvolumes(struct bch_fs *c)
|
||||
{
|
||||
int ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
|
||||
__bch2_fs_upgrade_for_subvolumes(&trans));
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bch2_fs_recovery(struct bch_fs *c)
|
||||
{
|
||||
const char *err = "cannot allocate memory";
|
||||
struct bch_sb_field_clean *clean = NULL;
|
||||
struct jset *last_journal_entry = NULL;
|
||||
u64 last_seq, blacklist_seq, journal_seq;
|
||||
@ -1137,12 +1146,6 @@ int bch2_fs_recovery(struct bch_fs *c)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!(c->sb.features & (1ULL << BCH_FEATURE_alloc_v2))) {
|
||||
bch_info(c, "alloc_v2 feature bit not set, fsck required");
|
||||
c->opts.fsck = true;
|
||||
c->opts.fix_errors = FSCK_OPT_YES;
|
||||
}
|
||||
|
||||
if (!c->opts.nochanges) {
|
||||
if (c->sb.version < bcachefs_metadata_required_upgrade_below) {
|
||||
bch_info(c, "version %s (%u) prior to %s (%u), upgrade and fsck required",
|
||||
@ -1286,34 +1289,28 @@ use_clean:
|
||||
goto err;
|
||||
|
||||
bch_verbose(c, "starting alloc read");
|
||||
err = "error reading allocation information";
|
||||
|
||||
down_read(&c->gc_lock);
|
||||
ret = c->sb.version < bcachefs_metadata_version_bucket_gens
|
||||
? bch2_alloc_read(c)
|
||||
: bch2_bucket_gens_read(c);
|
||||
up_read(&c->gc_lock);
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "alloc read done");
|
||||
|
||||
bch_verbose(c, "starting stripes_read");
|
||||
err = "error reading stripes";
|
||||
ret = bch2_stripes_read(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "stripes_read done");
|
||||
|
||||
if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
|
||||
err = "error creating root snapshot node";
|
||||
ret = bch2_fs_initialize_subvolumes(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
bch_verbose(c, "reading snapshots table");
|
||||
err = "error reading snapshots table";
|
||||
ret = bch2_fs_snapshots_start(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1323,7 +1320,6 @@ use_clean:
|
||||
bool metadata_only = c->opts.norecovery;
|
||||
|
||||
bch_info(c, "checking allocations");
|
||||
err = "error checking allocations";
|
||||
ret = bch2_gc(c, true, metadata_only);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1334,7 +1330,6 @@ use_clean:
|
||||
set_bit(BCH_FS_MAY_GO_RW, &c->flags);
|
||||
|
||||
bch_info(c, "starting journal replay, %zu keys", c->journal_keys.nr);
|
||||
err = "journal replay failed";
|
||||
ret = bch2_journal_replay(c, last_seq, blacklist_seq - 1);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1342,7 +1337,6 @@ use_clean:
|
||||
bch_info(c, "journal replay done");
|
||||
|
||||
bch_info(c, "checking need_discard and freespace btrees");
|
||||
err = "error checking need_discard and freespace btrees";
|
||||
ret = bch2_check_alloc_info(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1351,7 +1345,6 @@ use_clean:
|
||||
set_bit(BCH_FS_CHECK_ALLOC_DONE, &c->flags);
|
||||
|
||||
bch_info(c, "checking lrus");
|
||||
err = "error checking lrus";
|
||||
ret = bch2_check_lrus(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1359,21 +1352,18 @@ use_clean:
|
||||
set_bit(BCH_FS_CHECK_LRUS_DONE, &c->flags);
|
||||
|
||||
bch_info(c, "checking backpointers to alloc keys");
|
||||
err = "error checking backpointers to alloc keys";
|
||||
ret = bch2_check_btree_backpointers(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "done checking backpointers to alloc keys");
|
||||
|
||||
bch_info(c, "checking backpointers to extents");
|
||||
err = "error checking backpointers to extents";
|
||||
ret = bch2_check_backpointers_to_extents(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "done checking backpointers to extents");
|
||||
|
||||
bch_info(c, "checking extents to backpointers");
|
||||
err = "error checking extents to backpointers";
|
||||
ret = bch2_check_extents_to_backpointers(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1381,7 +1371,6 @@ use_clean:
|
||||
set_bit(BCH_FS_CHECK_BACKPOINTERS_DONE, &c->flags);
|
||||
|
||||
bch_info(c, "checking alloc to lru refs");
|
||||
err = "error checking alloc to lru refs";
|
||||
ret = bch2_check_alloc_to_lru_refs(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1401,7 +1390,6 @@ use_clean:
|
||||
set_bit(BCH_FS_MAY_GO_RW, &c->flags);
|
||||
|
||||
bch_verbose(c, "starting journal replay, %zu keys", c->journal_keys.nr);
|
||||
err = "journal replay failed";
|
||||
ret = bch2_journal_replay(c, last_seq, blacklist_seq - 1);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1409,7 +1397,6 @@ use_clean:
|
||||
bch_info(c, "journal replay done");
|
||||
}
|
||||
|
||||
err = "error initializing freespace";
|
||||
ret = bch2_fs_freespace_init(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1417,7 +1404,6 @@ use_clean:
|
||||
if (c->sb.version < bcachefs_metadata_version_bucket_gens &&
|
||||
c->opts.version_upgrade) {
|
||||
bch_info(c, "initializing bucket_gens");
|
||||
err = "error initializing bucket gens";
|
||||
ret = bch2_bucket_gens_init(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1425,24 +1411,18 @@ use_clean:
|
||||
}
|
||||
|
||||
if (c->sb.version < bcachefs_metadata_version_snapshot_2) {
|
||||
/* set bi_subvol on root inode */
|
||||
err = "error upgrade root inode for subvolumes";
|
||||
ret = bch2_trans_do(c, NULL, NULL, BTREE_INSERT_LAZY_RW,
|
||||
bch2_fs_upgrade_for_subvolumes(&trans));
|
||||
ret = bch2_fs_upgrade_for_subvolumes(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (c->opts.fsck) {
|
||||
bch_info(c, "starting fsck");
|
||||
err = "error in fsck";
|
||||
ret = bch2_fsck_full(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_verbose(c, "fsck done");
|
||||
} else if (!c->sb.clean) {
|
||||
bch_verbose(c, "checking for deleted inodes");
|
||||
err = "error in recovery";
|
||||
ret = bch2_fsck_walk_inodes_only(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1489,11 +1469,8 @@ use_clean:
|
||||
bch2_move_stats_init(&stats, "recovery");
|
||||
|
||||
bch_info(c, "scanning for old btree nodes");
|
||||
ret = bch2_fs_read_write(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
ret = bch2_scan_old_btree_nodes(c, &stats);
|
||||
ret = bch2_fs_read_write(c) ?:
|
||||
bch2_scan_old_btree_nodes(c, &stats);
|
||||
if (ret)
|
||||
goto err;
|
||||
bch_info(c, "scanning for old btree nodes done");
|
||||
@ -1521,7 +1498,7 @@ out:
|
||||
}
|
||||
|
||||
if (ret)
|
||||
bch_err(c, "Error in recovery: %s (%s)", err, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
else
|
||||
bch_verbose(c, "ret %s", bch2_err_str(ret));
|
||||
return ret;
|
||||
@ -1536,7 +1513,6 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
struct bch_inode_unpacked root_inode, lostfound_inode;
|
||||
struct bkey_inode_buf packed_inode;
|
||||
struct qstr lostfound = QSTR("lost+found");
|
||||
const char *err = "cannot allocate memory";
|
||||
struct bch_dev *ca;
|
||||
unsigned i;
|
||||
int ret;
|
||||
@ -1570,7 +1546,6 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
for_each_online_member(ca, c, i)
|
||||
bch2_dev_usage_init(ca);
|
||||
|
||||
err = "unable to allocate journal buckets";
|
||||
for_each_online_member(ca, c, i) {
|
||||
ret = bch2_dev_journal_alloc(ca);
|
||||
if (ret) {
|
||||
@ -1586,7 +1561,6 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
bch2_fs_journal_start(&c->journal, 1);
|
||||
bch2_journal_set_replay_done(&c->journal);
|
||||
|
||||
err = "error going read-write";
|
||||
ret = bch2_fs_read_write_early(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1596,7 +1570,6 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
* btree updates
|
||||
*/
|
||||
bch_verbose(c, "marking superblocks");
|
||||
err = "error marking superblock and journal";
|
||||
for_each_member_device(ca, c, i) {
|
||||
ret = bch2_trans_mark_dev_sb(c, ca);
|
||||
if (ret) {
|
||||
@ -1607,19 +1580,15 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
ca->new_fs_bucket_idx = 0;
|
||||
}
|
||||
|
||||
bch_verbose(c, "initializing freespace");
|
||||
err = "error initializing freespace";
|
||||
ret = bch2_fs_freespace_init(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
err = "error creating root snapshot node";
|
||||
ret = bch2_fs_initialize_subvolumes(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
bch_verbose(c, "reading snapshots table");
|
||||
err = "error reading snapshots table";
|
||||
ret = bch2_fs_snapshots_start(c);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -1631,16 +1600,16 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
bch2_inode_pack(&packed_inode, &root_inode);
|
||||
packed_inode.inode.k.p.snapshot = U32_MAX;
|
||||
|
||||
err = "error creating root directory";
|
||||
ret = bch2_btree_insert(c, BTREE_ID_inodes,
|
||||
&packed_inode.inode.k_i,
|
||||
NULL, NULL, 0);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "creating root directory");
|
||||
goto err;
|
||||
}
|
||||
|
||||
bch2_inode_init_early(c, &lostfound_inode);
|
||||
|
||||
err = "error creating lost+found";
|
||||
ret = bch2_trans_do(c, NULL, NULL, 0,
|
||||
bch2_create_trans(&trans,
|
||||
BCACHEFS_ROOT_SUBVOL_INUM,
|
||||
@ -1649,7 +1618,7 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
0, 0, S_IFDIR|0700, 0,
|
||||
NULL, NULL, (subvol_inum) { 0 }, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "error creating lost+found");
|
||||
bch_err_msg(c, ret, "creating lost+found");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -1659,10 +1628,11 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
goto err;
|
||||
}
|
||||
|
||||
err = "error writing first journal entry";
|
||||
ret = bch2_journal_flush(&c->journal);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
bch_err_msg(c, ret, "writing first journal entry");
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&c->sb_lock);
|
||||
SET_BCH_SB_INITIALIZED(c->disk_sb.sb, true);
|
||||
@ -1673,6 +1643,6 @@ int bch2_fs_initialize(struct bch_fs *c)
|
||||
|
||||
return 0;
|
||||
err:
|
||||
pr_err("Error initializing new filesystem: %s (%s)", err, bch2_err_str(ret));
|
||||
bch_err_fn(ca, ret);
|
||||
return ret;
|
||||
}
|
||||
|
@ -95,21 +95,22 @@ bool bch2_reflink_v_merge(struct bch_fs *c, struct bkey_s _l, struct bkey_s_c _r
|
||||
return l.v->refcount == r.v->refcount && bch2_extent_merge(c, _l, _r);
|
||||
}
|
||||
|
||||
static inline void check_indirect_extent_deleting(struct bkey_i *new, unsigned *flags)
|
||||
{
|
||||
if ((*flags & BTREE_TRIGGER_INSERT) && !*bkey_refcount(new)) {
|
||||
new->k.type = KEY_TYPE_deleted;
|
||||
new->k.size = 0;
|
||||
set_bkey_val_u64s(&new->k, 0);;
|
||||
*flags &= ~BTREE_TRIGGER_INSERT;
|
||||
}
|
||||
}
|
||||
|
||||
int bch2_trans_mark_reflink_v(struct btree_trans *trans,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c old, struct bkey_i *new,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
|
||||
struct bkey_i_reflink_v *r = bkey_i_to_reflink_v(new);
|
||||
|
||||
if (!r->v.refcount) {
|
||||
r->k.type = KEY_TYPE_deleted;
|
||||
r->k.size = 0;
|
||||
set_bkey_val_u64s(&r->k, 0);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
check_indirect_extent_deleting(new, &flags);
|
||||
|
||||
return bch2_trans_mark_extent(trans, btree_id, level, old, new, flags);
|
||||
}
|
||||
@ -123,7 +124,7 @@ int bch2_indirect_inline_data_invalid(const struct bch_fs *c, struct bkey_s_c k,
|
||||
}
|
||||
|
||||
void bch2_indirect_inline_data_to_text(struct printbuf *out,
|
||||
struct bch_fs *c, struct bkey_s_c k)
|
||||
struct bch_fs *c, struct bkey_s_c k)
|
||||
{
|
||||
struct bkey_s_c_indirect_inline_data d = bkey_s_c_to_indirect_inline_data(k);
|
||||
unsigned datalen = bkey_inline_data_bytes(k.k);
|
||||
@ -138,16 +139,7 @@ int bch2_trans_mark_indirect_inline_data(struct btree_trans *trans,
|
||||
struct bkey_s_c old, struct bkey_i *new,
|
||||
unsigned flags)
|
||||
{
|
||||
if (!(flags & BTREE_TRIGGER_OVERWRITE)) {
|
||||
struct bkey_i_indirect_inline_data *r =
|
||||
bkey_i_to_indirect_inline_data(new);
|
||||
|
||||
if (!r->v.refcount) {
|
||||
r->k.type = KEY_TYPE_deleted;
|
||||
r->k.size = 0;
|
||||
set_bkey_val_u64s(&r->k, 0);
|
||||
}
|
||||
}
|
||||
check_indirect_extent_deleting(new, &flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
48
libbcachefs/seqmutex.h
Normal file
48
libbcachefs/seqmutex.h
Normal file
@ -0,0 +1,48 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _BCACHEFS_SEQMUTEX_H
|
||||
#define _BCACHEFS_SEQMUTEX_H
|
||||
|
||||
#include <linux/mutex.h>
|
||||
|
||||
struct seqmutex {
|
||||
struct mutex lock;
|
||||
u32 seq;
|
||||
};
|
||||
|
||||
#define seqmutex_init(_lock) mutex_init(&(_lock)->lock)
|
||||
|
||||
static inline bool seqmutex_trylock(struct seqmutex *lock)
|
||||
{
|
||||
return mutex_trylock(&lock->lock);
|
||||
}
|
||||
|
||||
static inline void seqmutex_lock(struct seqmutex *lock)
|
||||
{
|
||||
mutex_lock(&lock->lock);
|
||||
}
|
||||
|
||||
static inline void seqmutex_unlock(struct seqmutex *lock)
|
||||
{
|
||||
lock->seq++;
|
||||
mutex_unlock(&lock->lock);
|
||||
}
|
||||
|
||||
static inline u32 seqmutex_seq(struct seqmutex *lock)
|
||||
{
|
||||
return lock->seq;
|
||||
}
|
||||
|
||||
static inline bool seqmutex_relock(struct seqmutex *lock, u32 seq)
|
||||
{
|
||||
if (lock->seq != seq || !mutex_trylock(&lock->lock))
|
||||
return false;
|
||||
|
||||
if (lock->seq != seq) {
|
||||
mutex_unlock(&lock->lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* _BCACHEFS_SEQMUTEX_H */
|
@ -385,7 +385,7 @@ static int check_snapshot_tree(struct btree_trans *trans,
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
u = bch2_bkey_make_mut_typed(trans, iter, k, 0, snapshot_tree);
|
||||
u = bch2_bkey_make_mut_typed(trans, iter, &k, 0, snapshot_tree);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -473,7 +473,7 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans,
|
||||
return ret;
|
||||
|
||||
if (ret || le32_to_cpu(s_t.root_snapshot) != root_id) {
|
||||
u = bch2_bkey_make_mut_typed(trans, &root_iter, root.s_c, 0, snapshot);
|
||||
u = bch2_bkey_make_mut_typed(trans, &root_iter, &root.s_c, 0, snapshot);
|
||||
ret = PTR_ERR_OR_ZERO(u) ?:
|
||||
snapshot_tree_create(trans, root_id,
|
||||
bch2_snapshot_tree_oldest_subvol(c, root_id),
|
||||
@ -487,7 +487,7 @@ static int snapshot_tree_ptr_repair(struct btree_trans *trans,
|
||||
}
|
||||
|
||||
if (s->k->p.snapshot != root_id) {
|
||||
u = bch2_bkey_make_mut_typed(trans, iter, s->s_c, 0, snapshot);
|
||||
u = bch2_bkey_make_mut_typed(trans, iter, &s->s_c, 0, snapshot);
|
||||
ret = PTR_ERR_OR_ZERO(u);
|
||||
if (ret)
|
||||
goto err;
|
||||
@ -623,7 +623,7 @@ int bch2_fs_check_snapshots(struct bch_fs *c)
|
||||
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
|
||||
check_snapshot(&trans, &iter, k)));
|
||||
if (ret)
|
||||
bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -677,7 +677,7 @@ static int check_subvol(struct btree_trans *trans,
|
||||
"subvolume %llu is not set as snapshot but is not master subvolume",
|
||||
k.k->p.offset)) {
|
||||
struct bkey_i_subvolume *s =
|
||||
bch2_bkey_make_mut_typed(trans, iter, subvol.s_c, 0, subvolume);
|
||||
bch2_bkey_make_mut_typed(trans, iter, &subvol.s_c, 0, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(s);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -702,8 +702,7 @@ int bch2_fs_check_subvols(struct bch_fs *c)
|
||||
NULL, NULL, BTREE_INSERT_LAZY_RW|BTREE_INSERT_NOFAIL,
|
||||
check_subvol(&trans, &iter, k)));
|
||||
if (ret)
|
||||
bch_err(c, "%s: error %s", __func__, bch2_err_str(ret));
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -724,7 +723,7 @@ int bch2_fs_snapshots_start(struct bch_fs *c)
|
||||
bch2_mark_snapshot(&trans, BTREE_ID_snapshots, 0, bkey_s_c_null, k, 0) ?:
|
||||
bch2_snapshot_set_equiv(&trans, k)));
|
||||
if (ret)
|
||||
bch_err(c, "error starting snapshots: %s", bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1123,6 +1122,8 @@ int bch2_delete_dead_snapshots(struct bch_fs *c)
|
||||
err:
|
||||
darray_exit(&deleted);
|
||||
bch2_trans_exit(&trans);
|
||||
if (ret)
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1248,7 +1249,7 @@ static int bch2_subvolume_reparent(struct btree_trans *trans,
|
||||
le32_to_cpu(bkey_s_c_to_subvolume(k).v->parent) != old_parent)
|
||||
return 0;
|
||||
|
||||
s = bch2_bkey_make_mut_typed(trans, iter, k, 0, subvolume);
|
||||
s = bch2_bkey_make_mut_typed(trans, iter, &k, 0, subvolume);
|
||||
ret = PTR_ERR_OR_ZERO(s);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -209,8 +209,8 @@ static int validate_sb_layout(struct bch_sb_layout *layout, struct printbuf *out
|
||||
u64 offset, prev_offset, max_sectors;
|
||||
unsigned i;
|
||||
|
||||
if (uuid_le_cmp(layout->magic, BCACHE_MAGIC) &&
|
||||
uuid_le_cmp(layout->magic, BCHFS_MAGIC)) {
|
||||
if (!uuid_equal(&layout->magic, &BCACHE_MAGIC) &&
|
||||
!uuid_equal(&layout->magic, &BCHFS_MAGIC)) {
|
||||
prt_printf(out, "Not a bcachefs superblock layout");
|
||||
return -BCH_ERR_invalid_sb_layout;
|
||||
}
|
||||
@ -298,12 +298,12 @@ static int bch2_sb_validate(struct bch_sb_handle *disk_sb, struct printbuf *out,
|
||||
return -BCH_ERR_invalid_sb_block_size;
|
||||
}
|
||||
|
||||
if (bch2_is_zero(sb->user_uuid.b, sizeof(uuid_le))) {
|
||||
if (bch2_is_zero(sb->user_uuid.b, sizeof(sb->user_uuid))) {
|
||||
prt_printf(out, "Bad user UUID (got zeroes)");
|
||||
return -BCH_ERR_invalid_sb_uuid;
|
||||
}
|
||||
|
||||
if (bch2_is_zero(sb->uuid.b, sizeof(uuid_le))) {
|
||||
if (bch2_is_zero(sb->uuid.b, sizeof(sb->uuid))) {
|
||||
prt_printf(out, "Bad intenal UUID (got zeroes)");
|
||||
return -BCH_ERR_invalid_sb_uuid;
|
||||
}
|
||||
@ -526,8 +526,8 @@ reread:
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (uuid_le_cmp(sb->sb->magic, BCACHE_MAGIC) &&
|
||||
uuid_le_cmp(sb->sb->magic, BCHFS_MAGIC)) {
|
||||
if (!uuid_equal(&sb->sb->magic, &BCACHE_MAGIC) &&
|
||||
!uuid_equal(&sb->sb->magic, &BCHFS_MAGIC)) {
|
||||
prt_printf(err, "Not a bcachefs superblock");
|
||||
return -BCH_ERR_invalid_sb_magic;
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ static inline void bch2_check_set_feature(struct bch_fs *c, unsigned feat)
|
||||
|
||||
static inline bool bch2_member_exists(struct bch_member *m)
|
||||
{
|
||||
return !bch2_is_zero(m->uuid.b, sizeof(uuid_le));
|
||||
return !bch2_is_zero(&m->uuid, sizeof(m->uuid));
|
||||
}
|
||||
|
||||
static inline bool bch2_dev_exists(struct bch_sb *sb,
|
||||
@ -104,7 +104,7 @@ static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi)
|
||||
? BCH_MEMBER_DURABILITY(mi) - 1
|
||||
: 1,
|
||||
.freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi),
|
||||
.valid = !bch2_is_zero(mi->uuid.b, sizeof(uuid_le)),
|
||||
.valid = bch2_member_exists(mi),
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -139,20 +139,20 @@ found:
|
||||
return c;
|
||||
}
|
||||
|
||||
static struct bch_fs *__bch2_uuid_to_fs(uuid_le uuid)
|
||||
static struct bch_fs *__bch2_uuid_to_fs(__uuid_t uuid)
|
||||
{
|
||||
struct bch_fs *c;
|
||||
|
||||
lockdep_assert_held(&bch_fs_list_lock);
|
||||
|
||||
list_for_each_entry(c, &bch_fs_list, list)
|
||||
if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid_le)))
|
||||
if (!memcmp(&c->disk_sb.sb->uuid, &uuid, sizeof(uuid)))
|
||||
return c;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct bch_fs *bch2_uuid_to_fs(uuid_le uuid)
|
||||
struct bch_fs *bch2_uuid_to_fs(__uuid_t uuid)
|
||||
{
|
||||
struct bch_fs *c;
|
||||
|
||||
@ -997,7 +997,7 @@ static int bch2_dev_in_fs(struct bch_sb *fs, struct bch_sb *sb)
|
||||
le64_to_cpu(fs->seq) > le64_to_cpu(sb->seq) ? fs : sb;
|
||||
struct bch_sb_field_members *mi = bch2_sb_get_members(newest);
|
||||
|
||||
if (uuid_le_cmp(fs->uuid, sb->uuid))
|
||||
if (!uuid_equal(&fs->uuid, &sb->uuid))
|
||||
return -BCH_ERR_device_not_a_member_of_filesystem;
|
||||
|
||||
if (!bch2_dev_exists(newest, mi, sb->dev_idx))
|
||||
|
@ -223,7 +223,7 @@ static inline bool is_superblock_bucket(struct bch_dev *ca, u64 b)
|
||||
}
|
||||
|
||||
struct bch_fs *bch2_dev_to_fs(dev_t);
|
||||
struct bch_fs *bch2_uuid_to_fs(uuid_le);
|
||||
struct bch_fs *bch2_uuid_to_fs(__uuid_t);
|
||||
|
||||
bool bch2_dev_state_allowed(struct bch_fs *, struct bch_dev *,
|
||||
enum bch_member_state, int);
|
||||
|
@ -379,7 +379,7 @@ static void bch2_btree_wakeup_all(struct bch_fs *c)
|
||||
{
|
||||
struct btree_trans *trans;
|
||||
|
||||
mutex_lock(&c->btree_trans_lock);
|
||||
seqmutex_lock(&c->btree_trans_lock);
|
||||
list_for_each_entry(trans, &c->btree_trans_list, list) {
|
||||
struct btree_bkey_cached_common *b = READ_ONCE(trans->locking);
|
||||
|
||||
@ -387,7 +387,7 @@ static void bch2_btree_wakeup_all(struct bch_fs *c)
|
||||
six_lock_wakeup_all(&b->lock);
|
||||
|
||||
}
|
||||
mutex_unlock(&c->btree_trans_lock);
|
||||
seqmutex_unlock(&c->btree_trans_lock);
|
||||
}
|
||||
|
||||
SHOW(bch2_fs)
|
||||
@ -850,8 +850,8 @@ static void dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
|
||||
prt_printf(out, "reserves:");
|
||||
prt_newline(out);
|
||||
for (i = 0; i < RESERVE_NR; i++) {
|
||||
prt_str(out, bch2_alloc_reserves[i]);
|
||||
for (i = 0; i < BCH_WATERMARK_NR; i++) {
|
||||
prt_str(out, bch2_watermarks[i]);
|
||||
prt_tab(out);
|
||||
prt_u64(out, bch2_dev_buckets_reserved(ca, i));
|
||||
prt_tab_rjust(out);
|
||||
|
@ -47,7 +47,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
|
||||
bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(&trans, &iter, &k.k_i, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): update error in: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "update error");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -56,7 +56,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
|
||||
bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_btree_delete_at(&trans, &iter, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): delete error (first): %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "delete error (first)");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -65,7 +65,7 @@ static int test_delete(struct bch_fs *c, u64 nr)
|
||||
bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_btree_delete_at(&trans, &iter, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): delete error (second): %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "delete error (second)");
|
||||
goto err;
|
||||
}
|
||||
err:
|
||||
@ -93,7 +93,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
|
||||
bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_trans_update(&trans, &iter, &k.k_i, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): update error: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "update error");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -104,7 +104,7 @@ static int test_delete_written(struct bch_fs *c, u64 nr)
|
||||
bch2_btree_iter_traverse(&iter) ?:
|
||||
bch2_btree_delete_at(&trans, &iter, 0));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): delete error: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "delete error");
|
||||
goto err;
|
||||
}
|
||||
err:
|
||||
@ -137,7 +137,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@ -153,7 +153,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
|
||||
0;
|
||||
}));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "error iterating forwards");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ static int test_iterate(struct bch_fs *c, u64 nr)
|
||||
0;
|
||||
}));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "error iterating backwards");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -204,7 +204,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@ -221,7 +221,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
|
||||
0;
|
||||
}));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "error iterating forwards");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -237,7 +237,7 @@ static int test_iterate_extents(struct bch_fs *c, u64 nr)
|
||||
0;
|
||||
}));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error iterating backwards: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "error iterating backwards");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -272,7 +272,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_insert(c, BTREE_ID_xattrs, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@ -289,7 +289,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
0;
|
||||
}));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "error iterating forwards");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -312,7 +312,7 @@ static int test_iterate_slots(struct bch_fs *c, u64 nr)
|
||||
0;
|
||||
}));
|
||||
if (ret < 0) {
|
||||
bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "error iterating forwards by slots");
|
||||
goto err;
|
||||
}
|
||||
ret = 0;
|
||||
@ -346,7 +346,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "insert error");
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
@ -364,7 +364,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
0;
|
||||
}));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error iterating forwards: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "error iterating forwards");
|
||||
goto err;
|
||||
}
|
||||
|
||||
@ -387,7 +387,7 @@ static int test_iterate_slots_extents(struct bch_fs *c, u64 nr)
|
||||
0;
|
||||
}));
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): error iterating forwards by slots: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "error iterating forwards by slots");
|
||||
goto err;
|
||||
}
|
||||
ret = 0;
|
||||
@ -461,7 +461,7 @@ static int insert_test_extent(struct bch_fs *c,
|
||||
ret = bch2_btree_insert(c, BTREE_ID_extents, &k.k_i,
|
||||
NULL, NULL, 0);
|
||||
if (ret)
|
||||
bch_err(c, "%s(): insert error: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -560,7 +560,7 @@ static int test_snapshots(struct bch_fs *c, u64 nr)
|
||||
|
||||
ret = test_snapshot_filter(c, snapids[0], snapids[1]);
|
||||
if (ret) {
|
||||
bch_err(c, "%s(): err from test_snapshot_filter: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(c, ret, "from test_snapshot_filter");
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -674,7 +674,7 @@ static int rand_mixed_trans(struct btree_trans *trans,
|
||||
k = bch2_btree_iter_peek(iter);
|
||||
ret = bkey_err(k);
|
||||
if (ret && !bch2_err_matches(ret, BCH_ERR_transaction_restart))
|
||||
bch_err(trans->c, "%s(): lookup error: %s", __func__, bch2_err_str(ret));
|
||||
bch_err_msg(trans->c, ret, "lookup error");
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -839,9 +839,4 @@ static inline int u8_cmp(u8 l, u8 r)
|
||||
|
||||
#include <linux/uuid.h>
|
||||
|
||||
static inline int uuid_le_cmp(const uuid_le u1, const uuid_le u2)
|
||||
{
|
||||
return memcmp(&u1, &u2, sizeof(uuid_le));
|
||||
}
|
||||
|
||||
#endif /* _BCACHEFS_UTIL_H */
|
||||
|
@ -618,8 +618,8 @@ static const struct xattr_handler bch_xattr_bcachefs_effective_handler = {
|
||||
const struct xattr_handler *bch2_xattr_handlers[] = {
|
||||
&bch_xattr_user_handler,
|
||||
#ifdef CONFIG_BCACHEFS_POSIX_ACL
|
||||
&posix_acl_access_xattr_handler,
|
||||
&posix_acl_default_xattr_handler,
|
||||
&nop_posix_acl_access,
|
||||
&nop_posix_acl_default,
|
||||
#endif
|
||||
&bch_xattr_trusted_handler,
|
||||
&bch_xattr_security_handler,
|
||||
@ -633,9 +633,9 @@ const struct xattr_handler *bch2_xattr_handlers[] = {
|
||||
static const struct xattr_handler *bch_xattr_handler_map[] = {
|
||||
[KEY_TYPE_XATTR_INDEX_USER] = &bch_xattr_user_handler,
|
||||
[KEY_TYPE_XATTR_INDEX_POSIX_ACL_ACCESS] =
|
||||
&posix_acl_access_xattr_handler,
|
||||
&nop_posix_acl_access,
|
||||
[KEY_TYPE_XATTR_INDEX_POSIX_ACL_DEFAULT] =
|
||||
&posix_acl_default_xattr_handler,
|
||||
&nop_posix_acl_default,
|
||||
[KEY_TYPE_XATTR_INDEX_TRUSTED] = &bch_xattr_trusted_handler,
|
||||
[KEY_TYPE_XATTR_INDEX_SECURITY] = &bch_xattr_security_handler,
|
||||
};
|
||||
|
@ -168,10 +168,10 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
||||
void bio_free_pages(struct bio *bio)
|
||||
{
|
||||
struct bvec_iter_all iter;
|
||||
struct bio_vec bvec;
|
||||
struct bio_vec *bvec;
|
||||
|
||||
bio_for_each_segment_all(bvec, bio, iter)
|
||||
__free_page(bvec.bv_page);
|
||||
__free_page(bvec->bv_page);
|
||||
}
|
||||
|
||||
void bio_advance(struct bio *bio, unsigned bytes)
|
||||
|
@ -3,12 +3,12 @@
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
#include <linux/xattr.h>
|
||||
|
||||
const struct xattr_handler posix_acl_access_xattr_handler = {
|
||||
const struct xattr_handler nop_posix_acl_access = {
|
||||
.name = XATTR_NAME_POSIX_ACL_ACCESS,
|
||||
.flags = ACL_TYPE_ACCESS,
|
||||
};
|
||||
|
||||
const struct xattr_handler posix_acl_default_xattr_handler = {
|
||||
const struct xattr_handler nop_posix_acl_default = {
|
||||
.name = XATTR_NAME_POSIX_ACL_DEFAULT,
|
||||
.flags = ACL_TYPE_DEFAULT,
|
||||
};
|
||||
|
Loading…
Reference in New Issue
Block a user