aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/volumes.c
diff options
context:
space:
mode:
authorIlya Dryomov <idryomov@gmail.com>2012-01-16 22:04:49 +0200
committerIlya Dryomov <idryomov@gmail.com>2012-01-16 22:04:49 +0200
commit837d5b6e46d1a4af5b6cc8f2fe83cb5de79a2961 (patch)
tree4ef87d05240e90480749c345274a83094caf66f0 /fs/btrfs/volumes.c
parent9555c6c180600b40f6e86bd4dc53bf47e06ed663 (diff)
Btrfs: allow for pausing restriper
Implement an ioctl for pausing restriper. This pauses the relocation, but balance is still considered to be "in progress": balance item is not deleted, other volume operations cannot be started, etc. If paused in the middle of profile changing operation we will continue making allocations with the target profile. Add a hook to close_ctree() to pause restriper and free its data structures on unmount. (It's safe to unmount when restriper is in "paused" state, we will resume with the same parameters on the next mount) Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Diffstat (limited to 'fs/btrfs/volumes.c')
-rw-r--r--fs/btrfs/volumes.c51
1 files changed, 49 insertions, 2 deletions
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e0160607e6e..d32660ce753 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -2492,6 +2492,11 @@ static int __btrfs_balance(struct btrfs_fs_info *fs_info)
key.type = BTRFS_CHUNK_ITEM_KEY;
while (1) {
+ if (atomic_read(&fs_info->balance_pause_req)) {
+ ret = -ECANCELED;
+ goto error;
+ }
+
ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
if (ret < 0)
goto error;
@@ -2553,6 +2558,11 @@ error:
return ret;
}
+static inline int balance_need_close(struct btrfs_fs_info *fs_info)
+{
+ return atomic_read(&fs_info->balance_pause_req) == 0;
+}
+
static void __cancel_balance(struct btrfs_fs_info *fs_info)
{
int ret;
@@ -2575,7 +2585,8 @@ int btrfs_balance(struct btrfs_balance_control *bctl,
u64 allowed;
int ret;
- if (btrfs_fs_closing(fs_info)) {
+ if (btrfs_fs_closing(fs_info) ||
+ atomic_read(&fs_info->balance_pause_req)) {
ret = -EINVAL;
goto out;
}
@@ -2680,18 +2691,25 @@ do_balance:
spin_unlock(&fs_info->balance_lock);
}
+ atomic_inc(&fs_info->balance_running);
mutex_unlock(&fs_info->balance_mutex);
ret = __btrfs_balance(fs_info);
mutex_lock(&fs_info->balance_mutex);
+ atomic_dec(&fs_info->balance_running);
if (bargs) {
memset(bargs, 0, sizeof(*bargs));
update_ioctl_balance_args(fs_info, bargs);
}
- __cancel_balance(fs_info);
+ if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
+ balance_need_close(fs_info)) {
+ __cancel_balance(fs_info);
+ }
+
+ wake_up(&fs_info->balance_wait_q);
return ret;
out:
@@ -2785,6 +2803,35 @@ out:
return ret;
}
+int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
+{
+ int ret = 0;
+
+ mutex_lock(&fs_info->balance_mutex);
+ if (!fs_info->balance_ctl) {
+ mutex_unlock(&fs_info->balance_mutex);
+ return -ENOTCONN;
+ }
+
+ if (atomic_read(&fs_info->balance_running)) {
+ atomic_inc(&fs_info->balance_pause_req);
+ mutex_unlock(&fs_info->balance_mutex);
+
+ wait_event(fs_info->balance_wait_q,
+ atomic_read(&fs_info->balance_running) == 0);
+
+ mutex_lock(&fs_info->balance_mutex);
+ /* we are good with balance_ctl ripped off from under us */
+ BUG_ON(atomic_read(&fs_info->balance_running));
+ atomic_dec(&fs_info->balance_pause_req);
+ } else {
+ ret = -ENOTCONN;
+ }
+
+ mutex_unlock(&fs_info->balance_mutex);
+ return ret;
+}
+
/*
* shrinking a device means finding all of the device extents past
* the new size, and then following the back refs to the chunks.