aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorMike Kravetz <mike.kravetz@oracle.com>2015-10-22 09:03:20 +1100
committerStephen Rothwell <sfr@canb.auug.org.au>2015-10-22 09:03:20 +1100
commit4e0a78fea078af972276c2d3aeaceb2bac80e033 (patch)
treed474d7ec68e579828f003b1f92f7202a3a8475f1 /fs
parent0830d5afd4ab69d01cf5ceba9b9f2796564c4eb6 (diff)
mm/hugetlb: setup hugetlb_falloc during fallocate hole punch
When performing a fallocate hole punch, set up a hugetlb_falloc struct and make i_private point to it. i_private will point to this struct for the duration of the operation. At the end of the operation, wake up anyone who faulted on the hole and is on the waitq. Signed-off-by: Mike Kravetz <mike.kravetz@oracle.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Hugh Dickins <hughd@google.com> Cc: Davidlohr Bueso <dave@stgolabs.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'fs')
-rw-r--r--fs/hugetlbfs/inode.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 316adb968b65..719bbe0cf8b4 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -507,6 +507,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{
struct hstate *h = hstate_inode(inode);
loff_t hpage_size = huge_page_size(h);
+ unsigned long hpage_shift = huge_page_shift(h);
loff_t hole_start, hole_end;
/*
@@ -518,8 +519,30 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (hole_end > hole_start) {
struct address_space *mapping = inode->i_mapping;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(hugetlb_falloc_waitq);
+ /*
+ * Page faults on the area to be hole punched must be stopped
+ * during the operation. Initialize struct and have
+ * inode->i_private point to it.
+ */
+ struct hugetlb_falloc hugetlb_falloc = {
+ .waitq = &hugetlb_falloc_waitq,
+ .start = hole_start >> hpage_shift,
+ .end = hole_end >> hpage_shift
+ };
mutex_lock(&inode->i_mutex);
+
+ /*
+ * inode->i_private will be checked in the page fault path.
+ * The locking assures that all writes to the structure are
+ * complete before assigning to i_private. A fault on another
+ * CPU will see the fully initialized structure.
+ */
+ spin_lock(&inode->i_lock);
+ inode->i_private = &hugetlb_falloc;
+ spin_unlock(&inode->i_lock);
+
i_mmap_lock_write(mapping);
if (!RB_EMPTY_ROOT(&mapping->i_mmap))
hugetlb_vmdelete_list(&mapping->i_mmap,
@@ -527,6 +550,12 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
hole_end >> PAGE_SHIFT);
i_mmap_unlock_write(mapping);
remove_inode_hugepages(inode, hole_start, hole_end);
+
+ spin_lock(&inode->i_lock);
+ inode->i_private = NULL;
+ wake_up_all(&hugetlb_falloc_waitq);
+ spin_unlock(&inode->i_lock);
+
mutex_unlock(&inode->i_mutex);
}
@@ -647,9 +676,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
i_size_write(inode, offset + len);
inode->i_ctime = CURRENT_TIME;
- spin_lock(&inode->i_lock);
- inode->i_private = NULL;
- spin_unlock(&inode->i_lock);
out:
mutex_unlock(&inode->i_mutex);
return error;