Update bcachefs sources to 1712318522 bcachefs: allow journal reply on ro mount

This commit is contained in:
Kent Overstreet 2019-03-29 14:36:06 -04:00
parent 133dfeb648
commit be02db130b
9 changed files with 54 additions and 53 deletions

View File

@ -1 +1 @@
d868a87c678935c89df9bca63d708d616529b0d2 1712318522fdaa533f8622f4c7da05e44a4828b0

View File

@ -65,9 +65,9 @@ int cmd_fsck(int argc, char *argv[])
if (IS_ERR(c)) if (IS_ERR(c))
die("error opening %s: %s", argv[0], strerror(-PTR_ERR(c))); die("error opening %s: %s", argv[0], strerror(-PTR_ERR(c)));
if (test_bit(BCH_FS_FSCK_FIXED_ERRORS, &c->flags)) if (test_bit(BCH_FS_ERRORS_FIXED, &c->flags))
ret = 2; ret = 2;
if (test_bit(BCH_FS_FSCK_UNFIXED_ERRORS, &c->flags)) if (test_bit(BCH_FS_ERROR, &c->flags))
ret = 4; ret = 4;
bch2_fs_stop(c); bch2_fs_stop(c);

View File

@ -470,14 +470,6 @@ struct bch_dev {
struct io_count __percpu *io_done; struct io_count __percpu *io_done;
}; };
/*
* Flag bits for what phase of startup/shutdown the cache set is at, how we're
* shutting down, etc.:
*
* BCH_FS_UNREGISTERING means we're not just shutting down, we're detaching
* all the backing devices first (their cached data gets invalidated, and they
* won't automatically reattach).
*/
enum { enum {
/* startup: */ /* startup: */
BCH_FS_ALLOC_READ_DONE, BCH_FS_ALLOC_READ_DONE,
@ -494,11 +486,10 @@ enum {
/* errors: */ /* errors: */
BCH_FS_ERROR, BCH_FS_ERROR,
BCH_FS_ERRORS_FIXED,
/* misc: */ /* misc: */
BCH_FS_BDEV_MOUNTED, BCH_FS_BDEV_MOUNTED,
BCH_FS_FSCK_FIXED_ERRORS,
BCH_FS_FSCK_UNFIXED_ERRORS,
BCH_FS_FIXED_GENS, BCH_FS_FIXED_GENS,
BCH_FS_REBUILD_REPLICAS, BCH_FS_REBUILD_REPLICAS,
BCH_FS_HOLD_BTREE_WRITES, BCH_FS_HOLD_BTREE_WRITES,

View File

@ -1236,7 +1236,9 @@ LE64_BITMASK(BCH_SB_USRQUOTA, struct bch_sb, flags[0], 57, 58);
LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59); LE64_BITMASK(BCH_SB_GRPQUOTA, struct bch_sb, flags[0], 58, 59);
LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60); LE64_BITMASK(BCH_SB_PRJQUOTA, struct bch_sb, flags[0], 59, 60);
/* 60-64 unused */ LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
/* 61-64 unused */
LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4); LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8); LE64_BITMASK(BCH_SB_COMPRESSION_TYPE, struct bch_sb, flags[1], 4, 8);

View File

@ -71,12 +71,9 @@ enum fsck_err_ret bch2_fsck_err(struct bch_fs *c, unsigned flags,
vprintk(fmt, args); vprintk(fmt, args);
va_end(args); va_end(args);
if (c->opts.errors == BCH_ON_ERROR_CONTINUE && return bch2_inconsistent_error(c)
flags & FSCK_CAN_FIX) ? FSCK_ERR_EXIT
return FSCK_ERR_FIX; : FSCK_ERR_FIX;
bch2_inconsistent_error(c);
return FSCK_ERR_EXIT;
} }
mutex_lock(&c->fsck_error_lock); mutex_lock(&c->fsck_error_lock);
@ -109,11 +106,7 @@ print:
if (c->opts.fix_errors == FSCK_OPT_EXIT) { if (c->opts.fix_errors == FSCK_OPT_EXIT) {
bch_err(c, "%s, exiting", buf); bch_err(c, "%s, exiting", buf);
mutex_unlock(&c->fsck_error_lock); } else if (flags & FSCK_CAN_FIX) {
return FSCK_ERR_EXIT;
}
if (flags & FSCK_CAN_FIX) {
if (c->opts.fix_errors == FSCK_OPT_ASK) { if (c->opts.fix_errors == FSCK_OPT_ASK) {
printk(KERN_ERR "%s: fix?", buf); printk(KERN_ERR "%s: fix?", buf);
fix = ask_yn(); fix = ask_yn();
@ -141,13 +134,16 @@ print:
mutex_unlock(&c->fsck_error_lock); mutex_unlock(&c->fsck_error_lock);
set_bit(fix if (fix) {
? BCH_FS_FSCK_FIXED_ERRORS set_bit(BCH_FS_ERRORS_FIXED, &c->flags);
: BCH_FS_FSCK_UNFIXED_ERRORS, &c->flags); return FSCK_ERR_FIX;
} else {
return fix ? FSCK_ERR_FIX set_bit(BCH_FS_ERROR, &c->flags);
: flags & FSCK_CAN_IGNORE ? FSCK_ERR_IGNORE return c->opts.fix_errors == FSCK_OPT_EXIT ||
: FSCK_ERR_EXIT; !(flags & FSCK_CAN_IGNORE)
? FSCK_ERR_EXIT
: FSCK_ERR_IGNORE;
}
} }
void bch2_flush_fsck_errs(struct bch_fs *c) void bch2_flush_fsck_errs(struct bch_fs *c)

View File

@ -90,7 +90,9 @@ static int reattach_inode(struct bch_fs *c,
bch2_inode_pack(&packed, lostfound_inode); bch2_inode_pack(&packed, lostfound_inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i, ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
NULL, NULL, BTREE_INSERT_NOFAIL); NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
if (ret) { if (ret) {
bch_err(c, "error %i reattaching inode %llu while updating lost+found", bch_err(c, "error %i reattaching inode %llu while updating lost+found",
ret, inum); ret, inum);
@ -100,7 +102,8 @@ static int reattach_inode(struct bch_fs *c,
ret = bch2_dirent_create(c, lostfound_inode->bi_inum, ret = bch2_dirent_create(c, lostfound_inode->bi_inum,
&lostfound_hash_info, &lostfound_hash_info,
DT_DIR, &name, inum, NULL, DT_DIR, &name, inum, NULL,
BTREE_INSERT_NOFAIL); BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
if (ret) { if (ret) {
bch_err(c, "error %i reattaching inode %llu while creating new dirent", bch_err(c, "error %i reattaching inode %llu while creating new dirent",
ret, inum); ret, inum);
@ -482,7 +485,8 @@ static int check_extents(struct bch_fs *c)
ret = bch2_btree_insert(c, BTREE_ID_INODES, ret = bch2_btree_insert(c, BTREE_ID_INODES,
&p.inode.k_i, NULL, NULL, &p.inode.k_i, NULL, NULL,
BTREE_INSERT_NOFAIL); BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
if (ret) { if (ret) {
bch_err(c, "error in fs gc: error %i " bch_err(c, "error in fs gc: error %i "
"updating inode", ret); "updating inode", ret);
@ -750,7 +754,9 @@ create_root:
bch2_inode_pack(&packed, root_inode); bch2_inode_pack(&packed, root_inode);
return bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i, return bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
NULL, NULL, BTREE_INSERT_NOFAIL); NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
} }
/* Get lost+found, create if it doesn't exist: */ /* Get lost+found, create if it doesn't exist: */
@ -794,7 +800,9 @@ create_lostfound:
bch2_inode_pack(&packed, root_inode); bch2_inode_pack(&packed, root_inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i, ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
NULL, NULL, BTREE_INSERT_NOFAIL); NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
if (ret) if (ret)
return ret; return ret;
@ -808,7 +816,8 @@ create_lostfound:
ret = bch2_dirent_create(c, BCACHEFS_ROOT_INO, &root_hash_info, DT_DIR, ret = bch2_dirent_create(c, BCACHEFS_ROOT_INO, &root_hash_info, DT_DIR,
&lostfound, lostfound_inode->bi_inum, NULL, &lostfound, lostfound_inode->bi_inum, NULL,
BTREE_INSERT_NOFAIL); BTREE_INSERT_NOFAIL|
BTREE_INSERT_LAZY_RW);
if (ret) if (ret)
return ret; return ret;

View File

@ -364,8 +364,11 @@ int bch2_fs_recovery(struct bch_fs *c)
c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current); c->disk_sb.sb->version = le16_to_cpu(bcachefs_metadata_version_current);
} }
if (!test_bit(BCH_FS_FSCK_UNFIXED_ERRORS, &c->flags)) if (c->opts.fsck &&
!test_bit(BCH_FS_ERROR, &c->flags)) {
c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_ATOMIC_NLINK; c->disk_sb.sb->features[0] |= 1ULL << BCH_FEATURE_ATOMIC_NLINK;
SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 0);
}
mutex_unlock(&c->sb_lock); mutex_unlock(&c->sb_lock);
if (enabled_qtypes(c)) { if (enabled_qtypes(c)) {

View File

@ -706,6 +706,9 @@ int bch2_write_super(struct bch_fs *c)
le64_add_cpu(&c->disk_sb.sb->seq, 1); le64_add_cpu(&c->disk_sb.sb->seq, 1);
if (test_bit(BCH_FS_ERROR, &c->flags))
SET_BCH_SB_HAS_ERRORS(c->disk_sb.sb, 1);
for_each_online_member(ca, c, i) for_each_online_member(ca, c, i)
bch2_sb_from_fs(c, ca); bch2_sb_from_fs(c, ca);
@ -718,8 +721,7 @@ int bch2_write_super(struct bch_fs *c)
} }
} }
if (c->opts.nochanges || if (c->opts.nochanges)
test_bit(BCH_FS_ERROR, &c->flags))
goto out; goto out;
for_each_online_member(ca, c, i) { for_each_online_member(ca, c, i) {

View File

@ -206,7 +206,7 @@ static void __bch2_fs_read_only(struct bch_fs *c)
{ {
struct bch_dev *ca; struct bch_dev *ca;
bool wrote; bool wrote;
unsigned i; unsigned i, clean_passes = 0;
int ret; int ret;
bch2_rebalance_stop(c); bch2_rebalance_stop(c);
@ -226,18 +226,18 @@ static void __bch2_fs_read_only(struct bch_fs *c)
goto allocator_not_running; goto allocator_not_running;
do { do {
ret = bch2_alloc_write(c, false, &wrote);
if (ret) {
bch2_fs_inconsistent(c, "error writing out alloc info %i", ret);
break;
}
ret = bch2_stripes_write(c, &wrote); ret = bch2_stripes_write(c, &wrote);
if (ret) { if (ret) {
bch2_fs_inconsistent(c, "error writing out stripes"); bch2_fs_inconsistent(c, "error writing out stripes");
break; break;
} }
ret = bch2_alloc_write(c, false, &wrote);
if (ret) {
bch2_fs_inconsistent(c, "error writing out alloc info %i", ret);
break;
}
for_each_member_device(ca, c, i) for_each_member_device(ca, c, i)
bch2_dev_allocator_quiesce(c, ca); bch2_dev_allocator_quiesce(c, ca);
@ -252,7 +252,9 @@ static void __bch2_fs_read_only(struct bch_fs *c)
*/ */
closure_wait_event(&c->btree_interior_update_wait, closure_wait_event(&c->btree_interior_update_wait,
!bch2_btree_interior_updates_nr_pending(c)); !bch2_btree_interior_updates_nr_pending(c));
} while (wrote);
clean_passes = wrote ? 0 : clean_passes + 1;
} while (clean_passes < 2);
allocator_not_running: allocator_not_running:
for_each_member_device(ca, c, i) for_each_member_device(ca, c, i)
bch2_dev_allocator_stop(ca); bch2_dev_allocator_stop(ca);
@ -461,9 +463,6 @@ int bch2_fs_read_write_early(struct bch_fs *c)
{ {
lockdep_assert_held(&c->state_lock); lockdep_assert_held(&c->state_lock);
if (c->opts.read_only)
return -EROFS;
return __bch2_fs_read_write(c, true); return __bch2_fs_read_write(c, true);
} }
@ -873,7 +872,6 @@ err:
} }
BUG_ON(!err); BUG_ON(!err);
set_bit(BCH_FS_ERROR, &c->flags);
goto out; goto out;
} }