aboutsummaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
authorMatthew Wilcox <mawilcox@microsoft.com>2018-04-10 22:39:01 -0700
committerDarrick J. Wong <darrick.wong@oracle.com>2018-04-10 22:39:01 -0700
commitfbbb4509048cf4e41a1254978859b588e0c86eab (patch)
tree6062070516fbb8cbcbca0b5117642957ec24a181 /fs
parent4919d42ab69a4e4601f3cd20a9540f3835e0dd48 (diff)
Export __set_page_dirty
XFS currently contains a copy-and-paste of __set_page_dirty(). Export it from buffer.c instead. Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Acked-by: Jeff Layton <jlayton@kernel.org> Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com> Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/buffer.c3
-rw-r--r--fs/xfs/xfs_aops.c15
2 files changed, 4 insertions, 14 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 9a73924db22f..0b487cdb7124 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -594,7 +594,7 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
*
* The caller must hold lock_page_memcg().
*/
-static void __set_page_dirty(struct page *page, struct address_space *mapping,
+void __set_page_dirty(struct page *page, struct address_space *mapping,
int warn)
{
unsigned long flags;
@@ -608,6 +608,7 @@ static void __set_page_dirty(struct page *page, struct address_space *mapping,
}
spin_unlock_irqrestore(&mapping->tree_lock, flags);
}
+EXPORT_SYMBOL_GPL(__set_page_dirty);
/*
* Add a page to the dirty page list.
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 19eadc807056..c67683ebfe68 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1472,19 +1472,8 @@ xfs_vm_set_page_dirty(
newly_dirty = !TestSetPageDirty(page);
spin_unlock(&mapping->private_lock);
- if (newly_dirty) {
- /* sigh - __set_page_dirty() is static, so copy it here, too */
- unsigned long flags;
-
- spin_lock_irqsave(&mapping->tree_lock, flags);
- if (page->mapping) { /* Race with truncate? */
- WARN_ON_ONCE(!PageUptodate(page));
- account_page_dirtied(page, mapping);
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page), PAGECACHE_TAG_DIRTY);
- }
- spin_unlock_irqrestore(&mapping->tree_lock, flags);
- }
+ if (newly_dirty)
+ __set_page_dirty(page, mapping, 1);
unlock_page_memcg(page);
if (newly_dirty)
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);