aboutsummaryrefslogtreecommitdiff
path: root/fs/btrfs/delayed-ref.c
diff options
context:
space:
mode:
authorJan Schmidt <list.btrfs@jan-o-sch.net>2011-12-12 16:10:07 +0100
committerJan Schmidt <list.btrfs@jan-o-sch.net>2012-01-04 16:12:48 +0100
commita168650c08300434e1456abe7b6451f1448230d3 (patch)
treeaddddb060b018eb569e97c11e1a1451feef11c7a /fs/btrfs/delayed-ref.c
parentd1270cd91f308c9d22b2804720c36ccd32dbc35e (diff)
Btrfs: add waitqueue instead of doing busy waiting for more delayed refs
Now that we may be holding back delayed refs for a limited period, we might end up having no runnable delayed refs. Without this commit, we'd do busy waiting in that thread until another (runnable) ref arives. Instead, we're detecting this situation and use a waitqueue, such that we only try to run more refs after a) another runnable ref was added or b) delayed refs are no longer held back Signed-off-by: Jan Schmidt <list.btrfs@jan-o-sch.net>
Diffstat (limited to 'fs/btrfs/delayed-ref.c')
-rw-r--r--fs/btrfs/delayed-ref.c8
1 files changed, 8 insertions, 0 deletions
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index ee181989d44..66e4f29505a 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -664,6 +664,9 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
num_bytes, parent, ref_root, level, action,
for_cow);
BUG_ON(ret);
+ if (!need_ref_seq(for_cow, ref_root) &&
+ waitqueue_active(&delayed_refs->seq_wait))
+ wake_up(&delayed_refs->seq_wait);
spin_unlock(&delayed_refs->lock);
return 0;
}
@@ -712,6 +715,9 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
num_bytes, parent, ref_root, owner, offset,
action, for_cow);
BUG_ON(ret);
+ if (!need_ref_seq(for_cow, ref_root) &&
+ waitqueue_active(&delayed_refs->seq_wait))
+ wake_up(&delayed_refs->seq_wait);
spin_unlock(&delayed_refs->lock);
return 0;
}
@@ -739,6 +745,8 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
extent_op->is_data);
BUG_ON(ret);
+ if (waitqueue_active(&delayed_refs->seq_wait))
+ wake_up(&delayed_refs->seq_wait);
spin_unlock(&delayed_refs->lock);
return 0;
}