2017-01-08 12:13:18 +03:00
|
|
|
/*
|
|
|
|
* Code for moving data off a device.
|
|
|
|
*/
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
#include "bcachefs.h"
|
2017-01-08 12:13:18 +03:00
|
|
|
#include "btree_update.h"
|
|
|
|
#include "buckets.h"
|
|
|
|
#include "extents.h"
|
|
|
|
#include "io.h"
|
|
|
|
#include "journal.h"
|
|
|
|
#include "keylist.h"
|
|
|
|
#include "migrate.h"
|
|
|
|
#include "move.h"
|
2017-03-09 20:27:30 +03:00
|
|
|
#include "super-io.h"
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-12-14 00:01:18 +03:00
|
|
|
static bool migrate_pred(void *arg, struct bkey_s_c_extent e)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
2017-12-14 00:01:18 +03:00
|
|
|
struct bch_dev *ca = arg;
|
2017-01-08 12:13:18 +03:00
|
|
|
const struct bch_extent_ptr *ptr;
|
|
|
|
|
2017-12-14 00:01:18 +03:00
|
|
|
extent_for_each_ptr(e, ptr)
|
2016-10-04 06:22:17 +03:00
|
|
|
if (ptr->dev == ca->dev_idx)
|
2017-12-14 00:01:18 +03:00
|
|
|
return true;
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-12-14 00:01:18 +03:00
|
|
|
return false;
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_DATA_OFF_ITER 10
|
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
static int bch2_dev_usrdata_migrate(struct bch_fs *c, struct bch_dev *ca,
|
|
|
|
int flags)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
2017-12-14 00:01:18 +03:00
|
|
|
struct btree_iter iter;
|
|
|
|
struct bkey_s_c k;
|
|
|
|
u64 keys_moved, sectors_moved;
|
2017-01-08 12:13:18 +03:00
|
|
|
unsigned pass = 0;
|
|
|
|
int ret = 0;
|
|
|
|
|
2017-03-11 00:40:01 +03:00
|
|
|
BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-05-08 13:28:15 +03:00
|
|
|
if (!(bch2_dev_has_data(c, ca) & (1 << BCH_DATA_USER)))
|
2017-03-09 20:27:30 +03:00
|
|
|
return 0;
|
|
|
|
|
2017-01-08 12:13:18 +03:00
|
|
|
/*
|
|
|
|
* In theory, only one pass should be necessary as we've
|
|
|
|
* quiesced all writes before calling this.
|
|
|
|
*
|
|
|
|
* However, in practice, more than one pass may be necessary:
|
|
|
|
* - Some move fails due to an error. We can can find this out
|
|
|
|
* from the moving_context.
|
|
|
|
* - Some key swap failed because some of the pointers in the
|
|
|
|
* key in the tree changed due to caching behavior, btree gc
|
|
|
|
* pruning stale pointers, or tiering (if the device being
|
|
|
|
* removed is in tier 0). A smarter bkey_cmpxchg would
|
|
|
|
* handle these cases.
|
|
|
|
*
|
|
|
|
* Thus this scans the tree one more time than strictly necessary,
|
|
|
|
* but that can be viewed as a verification pass.
|
|
|
|
*/
|
|
|
|
do {
|
2017-12-14 00:01:18 +03:00
|
|
|
ret = bch2_move_data(c, NULL,
|
|
|
|
SECTORS_IN_FLIGHT_PER_DEVICE,
|
|
|
|
NULL,
|
|
|
|
writepoint_hashed((unsigned long) current),
|
|
|
|
0,
|
|
|
|
ca->dev_idx,
|
|
|
|
migrate_pred, ca,
|
|
|
|
&keys_moved,
|
|
|
|
§ors_moved);
|
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error migrating data: %i", ret);
|
|
|
|
return ret;
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
2017-12-14 00:01:18 +03:00
|
|
|
} while (keys_moved && pass++ < MAX_DATA_OFF_ITER);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-12-14 00:01:18 +03:00
|
|
|
if (keys_moved) {
|
|
|
|
bch_err(c, "unable to migrate all data in %d iterations",
|
|
|
|
MAX_DATA_OFF_ITER);
|
|
|
|
return -1;
|
|
|
|
}
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-12-14 00:01:18 +03:00
|
|
|
mutex_lock(&c->replicas_gc_lock);
|
|
|
|
bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
|
|
|
|
|
|
|
|
for_each_btree_key(&iter, c, BTREE_ID_EXTENTS, POS_MIN, BTREE_ITER_PREFETCH, k) {
|
|
|
|
if (!bkey_extent_is_data(k.k))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = bch2_check_mark_super(c, bkey_s_c_to_extent(k),
|
|
|
|
BCH_DATA_USER);
|
|
|
|
if (ret) {
|
|
|
|
bch_err(c, "error migrating data %i from check_mark_super()", ret);
|
|
|
|
break;
|
|
|
|
}
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
2017-05-08 13:28:15 +03:00
|
|
|
bch2_replicas_gc_end(c, ret);
|
|
|
|
mutex_unlock(&c->replicas_gc_lock);
|
|
|
|
return ret;
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
2017-05-05 12:49:48 +03:00
|
|
|
static int bch2_move_btree_off(struct bch_fs *c, struct bch_dev *ca,
|
|
|
|
enum btree_id id)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
|
|
|
struct btree_iter iter;
|
|
|
|
struct btree *b;
|
|
|
|
int ret;
|
|
|
|
|
2017-03-11 00:40:01 +03:00
|
|
|
BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-04-24 08:56:57 +03:00
|
|
|
for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
|
2017-01-08 12:13:18 +03:00
|
|
|
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
|
2017-05-05 12:49:48 +03:00
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
if (!bch2_extent_has_device(e, ca->dev_idx))
|
2017-01-08 12:13:18 +03:00
|
|
|
continue;
|
|
|
|
|
2017-05-05 12:49:48 +03:00
|
|
|
ret = bch2_btree_node_rewrite(c, &iter, b->data->keys.seq, 0);
|
2017-01-08 12:13:18 +03:00
|
|
|
if (ret) {
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_set_locks_want(&iter, 0);
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
2017-03-20 02:56:34 +03:00
|
|
|
ret = bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
if (ret)
|
|
|
|
return ret; /* btree IO error */
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
|
2017-04-24 08:56:57 +03:00
|
|
|
for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
|
2017-01-08 12:13:18 +03:00
|
|
|
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
BUG_ON(bch2_extent_has_device(e, ca->dev_idx));
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This moves only the meta-data off, leaving the data (if any) in place.
|
|
|
|
* The data is moved off by bch_move_data_off_device, if desired, and
|
|
|
|
* called first.
|
|
|
|
*
|
|
|
|
* Before calling this, allocation of buckets to the device must have
|
|
|
|
* been disabled, as else we'll continue to write meta-data to the device
|
|
|
|
* when new buckets are picked for meta-data writes.
|
|
|
|
* In addition, the copying gc and allocator threads for the device
|
|
|
|
* must have been stopped. The allocator thread is the only thread
|
|
|
|
* that writes prio/gen information.
|
|
|
|
*
|
|
|
|
* Meta-data consists of:
|
|
|
|
* - Btree nodes
|
|
|
|
* - Prio/gen information
|
|
|
|
* - Journal entries
|
|
|
|
* - Superblock
|
|
|
|
*
|
|
|
|
* This has to move the btree nodes and the journal only:
|
|
|
|
* - prio/gen information is not written once the allocator thread is stopped.
|
|
|
|
* also, as the prio/gen information is per-device it is not moved.
|
|
|
|
* - the superblock will be written by the caller once after everything
|
|
|
|
* is stopped.
|
|
|
|
*
|
|
|
|
* Note that currently there is no way to stop btree node and journal
|
|
|
|
* meta-data writes to a device without moving the meta-data because
|
|
|
|
* once a bucket is open for a btree node, unless a replacement btree
|
|
|
|
* node is allocated (and the tree updated), the bucket will continue
|
|
|
|
* to be written with updates. Similarly for the journal (it gets
|
|
|
|
* written until filled).
|
|
|
|
*
|
|
|
|
* This routine leaves the data (if any) in place. Whether the data
|
|
|
|
* should be moved off is a decision independent of whether the meta
|
|
|
|
* data should be moved off and stopped:
|
|
|
|
*
|
|
|
|
* - For device removal, both data and meta-data are moved off, in
|
|
|
|
* that order.
|
|
|
|
*
|
|
|
|
* - However, for turning a device read-only without removing it, only
|
|
|
|
* meta-data is moved off since that's the only way to prevent it
|
|
|
|
* from being written. Data is left in the device, but no new data
|
|
|
|
* is written.
|
|
|
|
*/
|
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
static int bch2_dev_metadata_migrate(struct bch_fs *c, struct bch_dev *ca,
|
|
|
|
int flags)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
|
|
|
unsigned i;
|
2017-05-08 13:28:15 +03:00
|
|
|
int ret = 0;
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-03-11 00:40:01 +03:00
|
|
|
BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
|
2017-03-09 20:27:30 +03:00
|
|
|
|
2017-05-08 13:28:15 +03:00
|
|
|
if (!(bch2_dev_has_data(c, ca) &
|
|
|
|
((1 << BCH_DATA_JOURNAL)|
|
|
|
|
(1 << BCH_DATA_BTREE))))
|
2017-03-09 20:27:30 +03:00
|
|
|
return 0;
|
|
|
|
|
2017-05-08 13:28:15 +03:00
|
|
|
mutex_lock(&c->replicas_gc_lock);
|
2017-12-28 04:32:40 +03:00
|
|
|
bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
for (i = 0; i < BTREE_ID_NR; i++) {
|
2017-05-05 12:49:48 +03:00
|
|
|
ret = bch2_move_btree_off(c, ca, i);
|
2017-01-08 12:13:18 +03:00
|
|
|
if (ret)
|
2017-05-08 13:28:15 +03:00
|
|
|
goto err;
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
2017-05-08 13:28:15 +03:00
|
|
|
err:
|
|
|
|
bch2_replicas_gc_end(c, ret);
|
|
|
|
mutex_unlock(&c->replicas_gc_lock);
|
|
|
|
return ret;
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
int bch2_dev_data_migrate(struct bch_fs *c, struct bch_dev *ca, int flags)
|
|
|
|
{
|
|
|
|
return bch2_dev_usrdata_migrate(c, ca, flags) ?:
|
|
|
|
bch2_dev_metadata_migrate(c, ca, flags);
|
|
|
|
}
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
static int drop_dev_ptrs(struct bch_fs *c, struct bkey_s_extent e,
|
|
|
|
unsigned dev_idx, int flags, bool metadata)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
2017-12-22 02:00:30 +03:00
|
|
|
unsigned replicas = metadata ? c->opts.metadata_replicas : c->opts.data_replicas;
|
|
|
|
unsigned lost = metadata ? BCH_FORCE_IF_METADATA_LOST : BCH_FORCE_IF_DATA_LOST;
|
|
|
|
unsigned degraded = metadata ? BCH_FORCE_IF_METADATA_DEGRADED : BCH_FORCE_IF_DATA_DEGRADED;
|
|
|
|
unsigned nr_good;
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-12-28 04:32:40 +03:00
|
|
|
bch2_extent_drop_device(e, dev_idx);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
nr_good = bch2_extent_nr_good_ptrs(c, e.c);
|
|
|
|
if ((!nr_good && !(flags & lost)) ||
|
|
|
|
(nr_good < replicas && !(flags & degraded)))
|
|
|
|
return -EINVAL;
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
return 0;
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This doesn't actually move any data -- it marks the keys as bad
|
|
|
|
* if they contain a pointer to a device that is forcibly removed
|
|
|
|
* and don't have other valid pointers. If there are valid pointers,
|
|
|
|
* the necessary pointers to the removed device are replaced with
|
|
|
|
* bad pointers instead.
|
2017-03-09 20:27:30 +03:00
|
|
|
*
|
2017-01-08 12:13:18 +03:00
|
|
|
* This is only called if bch_move_data_off_device above failed, meaning
|
|
|
|
* that we've already tried to move the data MAX_DATA_OFF_ITER times and
|
|
|
|
* are not likely to succeed if we try again.
|
|
|
|
*/
|
2017-12-22 02:00:30 +03:00
|
|
|
static int bch2_dev_usrdata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
|
|
|
struct bkey_s_c k;
|
2017-12-22 02:00:30 +03:00
|
|
|
struct bkey_s_extent e;
|
|
|
|
BKEY_PADDED(key) tmp;
|
2017-01-08 12:13:18 +03:00
|
|
|
struct btree_iter iter;
|
2017-05-08 13:28:15 +03:00
|
|
|
int ret = 0;
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-05-08 13:28:15 +03:00
|
|
|
mutex_lock(&c->replicas_gc_lock);
|
|
|
|
bch2_replicas_gc_start(c, 1 << BCH_DATA_USER);
|
|
|
|
|
|
|
|
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS,
|
2017-04-24 08:56:57 +03:00
|
|
|
POS_MIN, BTREE_ITER_PREFETCH);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
while ((k = bch2_btree_iter_peek(&iter)).k &&
|
2017-01-08 12:13:18 +03:00
|
|
|
!(ret = btree_iter_err(k))) {
|
|
|
|
if (!bkey_extent_is_data(k.k))
|
|
|
|
goto advance;
|
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
if (!bch2_extent_has_device(bkey_s_c_to_extent(k), dev_idx))
|
2017-01-08 12:13:18 +03:00
|
|
|
goto advance;
|
|
|
|
|
2017-12-22 02:00:30 +03:00
|
|
|
bkey_reassemble(&tmp.key, k);
|
|
|
|
e = bkey_i_to_s_extent(&tmp.key);
|
|
|
|
|
|
|
|
ret = drop_dev_ptrs(c, e, dev_idx, flags, false);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the new extent no longer has any pointers, bch2_extent_normalize()
|
|
|
|
* will do the appropriate thing with it (turning it into a
|
|
|
|
* KEY_TYPE_ERROR key, or just a discard if it was a cached extent)
|
|
|
|
*/
|
|
|
|
bch2_extent_normalize(c, e.s);
|
|
|
|
|
|
|
|
if (bkey_extent_is_data(e.k) &&
|
|
|
|
(ret = bch2_check_mark_super(c, e.c, BCH_DATA_USER)))
|
|
|
|
break;
|
|
|
|
|
|
|
|
iter.pos = bkey_start_pos(&tmp.key.k);
|
|
|
|
|
|
|
|
ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
|
|
|
|
BTREE_INSERT_ATOMIC|
|
|
|
|
BTREE_INSERT_NOFAIL,
|
|
|
|
BTREE_INSERT_ENTRY(&iter, &tmp.key));
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* don't want to leave ret == -EINTR, since if we raced and
|
|
|
|
* something else overwrote the key we could spuriously return
|
|
|
|
* -EINTR below:
|
|
|
|
*/
|
|
|
|
if (ret == -EINTR)
|
|
|
|
ret = 0;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
continue;
|
|
|
|
advance:
|
2017-05-13 05:45:15 +03:00
|
|
|
if (bkey_extent_is_data(k.k)) {
|
|
|
|
ret = bch2_check_mark_super(c, bkey_s_c_to_extent(k),
|
|
|
|
BCH_DATA_USER);
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
}
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_advance_pos(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-05-08 13:28:15 +03:00
|
|
|
bch2_replicas_gc_end(c, ret);
|
|
|
|
mutex_unlock(&c->replicas_gc_lock);
|
|
|
|
|
2017-01-08 12:13:18 +03:00
|
|
|
return ret;
|
|
|
|
}
|
2017-12-22 02:00:30 +03:00
|
|
|
|
|
|
|
static int bch2_dev_metadata_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
|
|
|
{
|
|
|
|
struct btree_iter iter;
|
|
|
|
struct closure cl;
|
|
|
|
struct btree *b;
|
|
|
|
unsigned id;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* don't handle this yet: */
|
|
|
|
if (flags & BCH_FORCE_IF_METADATA_LOST)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
closure_init_stack(&cl);
|
|
|
|
|
|
|
|
mutex_lock(&c->replicas_gc_lock);
|
|
|
|
bch2_replicas_gc_start(c, 1 << BCH_DATA_BTREE);
|
|
|
|
|
|
|
|
for (id = 0; id < BTREE_ID_NR; id++) {
|
|
|
|
for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
|
|
|
|
__BKEY_PADDED(k, BKEY_BTREE_PTR_VAL_U64s_MAX) tmp;
|
|
|
|
struct bkey_i_extent *new_key;
|
|
|
|
retry:
|
|
|
|
if (!bch2_extent_has_device(bkey_i_to_s_c_extent(&b->key),
|
|
|
|
dev_idx)) {
|
|
|
|
bch2_btree_iter_set_locks_want(&iter, 0);
|
|
|
|
|
|
|
|
ret = bch2_check_mark_super(c, bkey_i_to_s_c_extent(&b->key),
|
|
|
|
BCH_DATA_BTREE);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
} else {
|
|
|
|
bkey_copy(&tmp.k, &b->key);
|
|
|
|
new_key = bkey_i_to_extent(&tmp.k);
|
|
|
|
|
|
|
|
ret = drop_dev_ptrs(c, extent_i_to_s(new_key),
|
|
|
|
dev_idx, flags, true);
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
|
|
|
|
if (!bch2_btree_iter_set_locks_want(&iter, U8_MAX)) {
|
|
|
|
b = bch2_btree_iter_peek_node(&iter);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = bch2_btree_node_update_key(c, &iter, b, new_key);
|
|
|
|
if (ret == -EINTR) {
|
|
|
|
b = bch2_btree_iter_peek_node(&iter);
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
if (ret)
|
|
|
|
goto err;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
bch2_btree_iter_unlock(&iter);
|
|
|
|
|
|
|
|
/* btree root */
|
|
|
|
mutex_lock(&c->btree_root_lock);
|
|
|
|
mutex_unlock(&c->btree_root_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = 0;
|
|
|
|
out:
|
|
|
|
bch2_replicas_gc_end(c, ret);
|
|
|
|
mutex_unlock(&c->replicas_gc_lock);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
err:
|
|
|
|
bch2_btree_iter_unlock(&iter);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
int bch2_dev_data_drop(struct bch_fs *c, unsigned dev_idx, int flags)
|
|
|
|
{
|
|
|
|
return bch2_dev_usrdata_drop(c, dev_idx, flags) ?:
|
|
|
|
bch2_dev_metadata_drop(c, dev_idx, flags);
|
|
|
|
}
|