2017-01-08 12:13:18 +03:00
|
|
|
/*
|
|
|
|
* Code for moving data off a device.
|
|
|
|
*/
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
#include "bcachefs.h"
|
2017-01-08 12:13:18 +03:00
|
|
|
#include "btree_update.h"
|
|
|
|
#include "buckets.h"
|
|
|
|
#include "extents.h"
|
|
|
|
#include "io.h"
|
|
|
|
#include "journal.h"
|
|
|
|
#include "keylist.h"
|
|
|
|
#include "migrate.h"
|
|
|
|
#include "move.h"
|
2017-03-09 20:27:30 +03:00
|
|
|
#include "super-io.h"
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-03-11 00:40:01 +03:00
|
|
|
static int issue_migration_move(struct bch_dev *ca,
|
2017-01-08 12:13:18 +03:00
|
|
|
struct moving_context *ctxt,
|
|
|
|
struct bkey_s_c k)
|
|
|
|
{
|
2017-03-11 00:40:01 +03:00
|
|
|
struct bch_fs *c = ca->fs;
|
2017-01-08 12:13:18 +03:00
|
|
|
struct disk_reservation res;
|
|
|
|
const struct bch_extent_ptr *ptr;
|
|
|
|
int ret;
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
if (bch2_disk_reservation_get(c, &res, k.k->size, 0))
|
2017-01-08 12:13:18 +03:00
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
extent_for_each_ptr(bkey_s_c_to_extent(k), ptr)
|
2016-10-04 06:22:17 +03:00
|
|
|
if (ptr->dev == ca->dev_idx)
|
2017-01-08 12:13:18 +03:00
|
|
|
goto found;
|
|
|
|
|
|
|
|
BUG();
|
|
|
|
found:
|
|
|
|
/* XXX: we need to be doing something with the disk reservation */
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
ret = bch2_data_move(c, ctxt, &c->migration_write_point, k, ptr);
|
2017-01-08 12:13:18 +03:00
|
|
|
if (ret)
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_disk_reservation_put(c, &res);
|
2017-01-08 12:13:18 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define MAX_DATA_OFF_ITER 10
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This moves only the data off, leaving the meta-data (if any) in place.
|
|
|
|
* It walks the key space, and for any key with a valid pointer to the
|
|
|
|
* relevant device, it copies it elsewhere, updating the key to point to
|
|
|
|
* the copy.
|
|
|
|
* The meta-data is moved off by bch_move_meta_data_off_device.
|
|
|
|
*
|
|
|
|
* Note: If the number of data replicas desired is > 1, ideally, any
|
|
|
|
* new copies would not be made in the same device that already have a
|
|
|
|
* copy (if there are enough devices).
|
|
|
|
* This is _not_ currently implemented. The multiple replicas can
|
|
|
|
* land in the same device even if there are others available.
|
|
|
|
*/
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
int bch2_move_data_off_device(struct bch_dev *ca)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
|
|
|
struct moving_context ctxt;
|
2017-03-11 00:40:01 +03:00
|
|
|
struct bch_fs *c = ca->fs;
|
2017-03-09 20:27:30 +03:00
|
|
|
struct bch_sb_field_members *mi;
|
2017-01-08 12:13:18 +03:00
|
|
|
unsigned pass = 0;
|
|
|
|
u64 seen_key_count;
|
|
|
|
int ret = 0;
|
|
|
|
|
2017-03-11 00:40:01 +03:00
|
|
|
BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-03-09 20:27:30 +03:00
|
|
|
if (!ca->mi.has_data)
|
|
|
|
return 0;
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_move_ctxt_init(&ctxt, NULL, SECTORS_IN_FLIGHT_PER_DEVICE);
|
2017-01-08 12:13:18 +03:00
|
|
|
ctxt.avoid = ca;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* In theory, only one pass should be necessary as we've
|
|
|
|
* quiesced all writes before calling this.
|
|
|
|
*
|
|
|
|
* However, in practice, more than one pass may be necessary:
|
|
|
|
* - Some move fails due to an error. We can can find this out
|
|
|
|
* from the moving_context.
|
|
|
|
* - Some key swap failed because some of the pointers in the
|
|
|
|
* key in the tree changed due to caching behavior, btree gc
|
|
|
|
* pruning stale pointers, or tiering (if the device being
|
|
|
|
* removed is in tier 0). A smarter bkey_cmpxchg would
|
|
|
|
* handle these cases.
|
|
|
|
*
|
|
|
|
* Thus this scans the tree one more time than strictly necessary,
|
|
|
|
* but that can be viewed as a verification pass.
|
|
|
|
*/
|
|
|
|
|
|
|
|
do {
|
|
|
|
struct btree_iter iter;
|
|
|
|
struct bkey_s_c k;
|
|
|
|
|
|
|
|
seen_key_count = 0;
|
|
|
|
atomic_set(&ctxt.error_count, 0);
|
|
|
|
atomic_set(&ctxt.error_flags, 0);
|
|
|
|
|
2017-04-24 08:56:57 +03:00
|
|
|
bch2_btree_iter_init(&iter, c, BTREE_ID_EXTENTS, POS_MIN,
|
|
|
|
BTREE_ITER_PREFETCH);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
while (!bch2_move_ctxt_wait(&ctxt) &&
|
|
|
|
(k = bch2_btree_iter_peek(&iter)).k &&
|
2017-01-08 12:13:18 +03:00
|
|
|
!(ret = btree_iter_err(k))) {
|
|
|
|
if (!bkey_extent_is_data(k.k) ||
|
2017-03-20 02:56:34 +03:00
|
|
|
!bch2_extent_has_device(bkey_s_c_to_extent(k),
|
2016-10-04 06:22:17 +03:00
|
|
|
ca->dev_idx))
|
2017-01-08 12:13:18 +03:00
|
|
|
goto next;
|
|
|
|
|
|
|
|
ret = issue_migration_move(ca, &ctxt, k);
|
|
|
|
if (ret == -ENOMEM) {
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* memory allocation failure, wait for some IO
|
|
|
|
* to finish
|
|
|
|
*/
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_move_ctxt_wait_for_io(&ctxt);
|
2017-01-08 12:13:18 +03:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (ret == -ENOSPC)
|
|
|
|
break;
|
|
|
|
BUG_ON(ret);
|
|
|
|
|
|
|
|
seen_key_count++;
|
|
|
|
next:
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_advance_pos(&iter);
|
|
|
|
bch2_btree_iter_cond_resched(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
}
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_unlock(&iter);
|
|
|
|
bch2_move_ctxt_exit(&ctxt);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
} while (seen_key_count && pass++ < MAX_DATA_OFF_ITER);
|
|
|
|
|
|
|
|
if (seen_key_count) {
|
|
|
|
pr_err("Unable to migrate all data in %d iterations.",
|
|
|
|
MAX_DATA_OFF_ITER);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-03-09 20:27:30 +03:00
|
|
|
mutex_lock(&c->sb_lock);
|
2017-03-20 02:56:34 +03:00
|
|
|
mi = bch2_sb_get_members(c->disk_sb);
|
2017-03-09 20:27:30 +03:00
|
|
|
SET_BCH_MEMBER_HAS_DATA(&mi->members[ca->dev_idx], false);
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_write_super(c);
|
2017-03-09 20:27:30 +03:00
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2017-01-08 12:13:18 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This walks the btree, and for any node on the relevant device it moves the
|
|
|
|
* node elsewhere.
|
|
|
|
*/
|
2017-03-20 02:56:34 +03:00
|
|
|
static int bch2_move_btree_off(struct bch_dev *ca, enum btree_id id)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
2017-03-11 00:40:01 +03:00
|
|
|
struct bch_fs *c = ca->fs;
|
2017-01-08 12:13:18 +03:00
|
|
|
struct btree_iter iter;
|
|
|
|
struct closure cl;
|
|
|
|
struct btree *b;
|
|
|
|
int ret;
|
|
|
|
|
2017-03-11 00:40:01 +03:00
|
|
|
BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
closure_init_stack(&cl);
|
|
|
|
|
2017-04-24 08:56:57 +03:00
|
|
|
for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
|
2017-01-08 12:13:18 +03:00
|
|
|
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
|
|
|
|
retry:
|
2017-03-20 02:56:34 +03:00
|
|
|
if (!bch2_extent_has_device(e, ca->dev_idx))
|
2017-01-08 12:13:18 +03:00
|
|
|
continue;
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
ret = bch2_btree_node_rewrite(&iter, b, &cl);
|
2017-01-08 12:13:18 +03:00
|
|
|
if (ret == -EINTR || ret == -ENOSPC) {
|
|
|
|
/*
|
|
|
|
* Drop locks to upgrade locks or wait on
|
|
|
|
* reserve: after retaking, recheck in case we
|
|
|
|
* raced.
|
|
|
|
*/
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
closure_sync(&cl);
|
2017-03-20 02:56:34 +03:00
|
|
|
b = bch2_btree_iter_peek_node(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
if (ret) {
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_set_locks_want(&iter, 0);
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
2017-03-20 02:56:34 +03:00
|
|
|
ret = bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
if (ret)
|
|
|
|
return ret; /* btree IO error */
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
if (IS_ENABLED(CONFIG_BCACHEFS_DEBUG)) {
|
2017-04-24 08:56:57 +03:00
|
|
|
for_each_btree_node(&iter, c, id, POS_MIN, BTREE_ITER_PREFETCH, b) {
|
2017-01-08 12:13:18 +03:00
|
|
|
struct bkey_s_c_extent e = bkey_i_to_s_c_extent(&b->key);
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
BUG_ON(bch2_extent_has_device(e, ca->dev_idx));
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This moves only the meta-data off, leaving the data (if any) in place.
|
|
|
|
* The data is moved off by bch_move_data_off_device, if desired, and
|
|
|
|
* called first.
|
|
|
|
*
|
|
|
|
* Before calling this, allocation of buckets to the device must have
|
|
|
|
* been disabled, as else we'll continue to write meta-data to the device
|
|
|
|
* when new buckets are picked for meta-data writes.
|
|
|
|
* In addition, the copying gc and allocator threads for the device
|
|
|
|
* must have been stopped. The allocator thread is the only thread
|
|
|
|
* that writes prio/gen information.
|
|
|
|
*
|
|
|
|
* Meta-data consists of:
|
|
|
|
* - Btree nodes
|
|
|
|
* - Prio/gen information
|
|
|
|
* - Journal entries
|
|
|
|
* - Superblock
|
|
|
|
*
|
|
|
|
* This has to move the btree nodes and the journal only:
|
|
|
|
* - prio/gen information is not written once the allocator thread is stopped.
|
|
|
|
* also, as the prio/gen information is per-device it is not moved.
|
|
|
|
* - the superblock will be written by the caller once after everything
|
|
|
|
* is stopped.
|
|
|
|
*
|
|
|
|
* Note that currently there is no way to stop btree node and journal
|
|
|
|
* meta-data writes to a device without moving the meta-data because
|
|
|
|
* once a bucket is open for a btree node, unless a replacement btree
|
|
|
|
* node is allocated (and the tree updated), the bucket will continue
|
|
|
|
* to be written with updates. Similarly for the journal (it gets
|
|
|
|
* written until filled).
|
|
|
|
*
|
|
|
|
* This routine leaves the data (if any) in place. Whether the data
|
|
|
|
* should be moved off is a decision independent of whether the meta
|
|
|
|
* data should be moved off and stopped:
|
|
|
|
*
|
|
|
|
* - For device removal, both data and meta-data are moved off, in
|
|
|
|
* that order.
|
|
|
|
*
|
|
|
|
* - However, for turning a device read-only without removing it, only
|
|
|
|
* meta-data is moved off since that's the only way to prevent it
|
|
|
|
* from being written. Data is left in the device, but no new data
|
|
|
|
* is written.
|
|
|
|
*/
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
int bch2_move_metadata_off_device(struct bch_dev *ca)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
2017-03-11 00:40:01 +03:00
|
|
|
struct bch_fs *c = ca->fs;
|
2017-03-09 20:27:30 +03:00
|
|
|
struct bch_sb_field_members *mi;
|
2017-01-08 12:13:18 +03:00
|
|
|
unsigned i;
|
|
|
|
int ret;
|
|
|
|
|
2017-03-11 00:40:01 +03:00
|
|
|
BUG_ON(ca->mi.state == BCH_MEMBER_STATE_RW);
|
2017-03-09 20:27:30 +03:00
|
|
|
|
|
|
|
if (!ca->mi.has_metadata)
|
|
|
|
return 0;
|
|
|
|
|
2017-01-08 12:13:18 +03:00
|
|
|
/* 1st, Move the btree nodes off the device */
|
|
|
|
|
|
|
|
for (i = 0; i < BTREE_ID_NR; i++) {
|
2017-03-20 02:56:34 +03:00
|
|
|
ret = bch2_move_btree_off(ca, i);
|
2017-01-08 12:13:18 +03:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* There are no prios/gens to move -- they are already in the device. */
|
|
|
|
|
|
|
|
/* 2nd. Move the journal off the device */
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
ret = bch2_journal_move(ca);
|
2017-01-08 12:13:18 +03:00
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2017-03-09 20:27:30 +03:00
|
|
|
mutex_lock(&c->sb_lock);
|
2017-03-20 02:56:34 +03:00
|
|
|
mi = bch2_sb_get_members(c->disk_sb);
|
2017-03-09 20:27:30 +03:00
|
|
|
SET_BCH_MEMBER_HAS_METADATA(&mi->members[ca->dev_idx], false);
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_write_super(c);
|
2017-03-09 20:27:30 +03:00
|
|
|
mutex_unlock(&c->sb_lock);
|
|
|
|
|
2017-01-08 12:13:18 +03:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flagging data bad when forcibly removing a device after failing to
|
|
|
|
* migrate the data off the device.
|
|
|
|
*/
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
static int bch2_flag_key_bad(struct btree_iter *iter,
|
2017-03-11 00:40:01 +03:00
|
|
|
struct bch_dev *ca,
|
2017-01-08 12:13:18 +03:00
|
|
|
struct bkey_s_c_extent orig)
|
|
|
|
{
|
|
|
|
BKEY_PADDED(key) tmp;
|
|
|
|
struct bkey_s_extent e;
|
|
|
|
struct bch_extent_ptr *ptr;
|
2017-03-11 00:40:01 +03:00
|
|
|
struct bch_fs *c = ca->fs;
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
bkey_reassemble(&tmp.key, orig.s_c);
|
|
|
|
e = bkey_i_to_s_extent(&tmp.key);
|
|
|
|
|
|
|
|
extent_for_each_ptr_backwards(e, ptr)
|
2016-10-04 06:22:17 +03:00
|
|
|
if (ptr->dev == ca->dev_idx)
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_extent_drop_ptr(e, ptr);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
/*
|
2017-03-20 02:56:34 +03:00
|
|
|
* If the new extent no longer has any pointers, bch2_extent_normalize()
|
2017-01-08 12:13:18 +03:00
|
|
|
* will do the appropriate thing with it (turning it into a
|
|
|
|
* KEY_TYPE_ERROR key, or just a discard if it was a cached extent)
|
|
|
|
*/
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_extent_normalize(c, e.s);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
return bch2_btree_insert_at(c, NULL, NULL, NULL,
|
2017-01-08 12:13:18 +03:00
|
|
|
BTREE_INSERT_ATOMIC,
|
|
|
|
BTREE_INSERT_ENTRY(iter, &tmp.key));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This doesn't actually move any data -- it marks the keys as bad
|
|
|
|
* if they contain a pointer to a device that is forcibly removed
|
|
|
|
* and don't have other valid pointers. If there are valid pointers,
|
|
|
|
* the necessary pointers to the removed device are replaced with
|
|
|
|
* bad pointers instead.
|
2017-03-09 20:27:30 +03:00
|
|
|
*
|
2017-01-08 12:13:18 +03:00
|
|
|
* This is only called if bch_move_data_off_device above failed, meaning
|
|
|
|
* that we've already tried to move the data MAX_DATA_OFF_ITER times and
|
|
|
|
* are not likely to succeed if we try again.
|
|
|
|
*/
|
2017-03-20 02:56:34 +03:00
|
|
|
int bch2_flag_data_bad(struct bch_dev *ca)
|
2017-01-08 12:13:18 +03:00
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
struct bkey_s_c k;
|
|
|
|
struct bkey_s_c_extent e;
|
|
|
|
struct btree_iter iter;
|
|
|
|
|
2017-04-24 08:56:57 +03:00
|
|
|
bch2_btree_iter_init(&iter, ca->fs, BTREE_ID_EXTENTS,
|
|
|
|
POS_MIN, BTREE_ITER_PREFETCH);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
while ((k = bch2_btree_iter_peek(&iter)).k &&
|
2017-01-08 12:13:18 +03:00
|
|
|
!(ret = btree_iter_err(k))) {
|
|
|
|
if (!bkey_extent_is_data(k.k))
|
|
|
|
goto advance;
|
|
|
|
|
|
|
|
e = bkey_s_c_to_extent(k);
|
2017-03-20 02:56:34 +03:00
|
|
|
if (!bch2_extent_has_device(e, ca->dev_idx))
|
2017-01-08 12:13:18 +03:00
|
|
|
goto advance;
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
ret = bch2_flag_key_bad(&iter, ca, e);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
/*
|
|
|
|
* don't want to leave ret == -EINTR, since if we raced and
|
|
|
|
* something else overwrote the key we could spuriously return
|
|
|
|
* -EINTR below:
|
|
|
|
*/
|
|
|
|
if (ret == -EINTR)
|
|
|
|
ret = 0;
|
|
|
|
if (ret)
|
|
|
|
break;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the replica we're dropping was dirty and there is an
|
|
|
|
* additional cached replica, the cached replica will now be
|
|
|
|
* considered dirty - upon inserting the new version of the key,
|
|
|
|
* the bucket accounting will be updated to reflect the fact
|
|
|
|
* that the cached data is now dirty and everything works out as
|
|
|
|
* if by magic without us having to do anything.
|
|
|
|
*
|
|
|
|
* The one thing we need to be concerned with here is there's a
|
|
|
|
* race between when we drop any stale pointers from the key
|
|
|
|
* we're about to insert, and when the key actually gets
|
|
|
|
* inserted and the cached data is marked as dirty - we could
|
|
|
|
* end up trying to insert a key with a pointer that should be
|
|
|
|
* dirty, but points to stale data.
|
|
|
|
*
|
|
|
|
* If that happens the insert code just bails out and doesn't do
|
|
|
|
* the insert - however, it doesn't return an error. Hence we
|
|
|
|
* need to always recheck the current key before advancing to
|
|
|
|
* the next:
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
advance:
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_advance_pos(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
}
|
|
|
|
|
2017-03-20 02:56:34 +03:00
|
|
|
bch2_btree_iter_unlock(&iter);
|
2017-01-08 12:13:18 +03:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|