aboutsummaryrefslogtreecommitdiff
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorMartin Schwidefsky <schwidefsky@de.ibm.com>2011-05-23 10:24:39 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2011-05-23 10:24:31 +0200
commit2d42552d1c1659b014851cf449ad2fe458509128 (patch)
treeb9ef22867ce52e23b5249a7ad38637eec40363b8 /mm/rmap.c
parentc26001d4e9133fe45e47eee18cfd826219e71fb9 (diff)
[S390] merge page_test_dirty and page_clear_dirty
The page_clear_dirty primitive always sets the default storage key which resets the access control bits and the fetch protection bit. That will surprise a KVM guest that sets non-zero access control bits or the fetch protection bit. Merge page_test_dirty and page_clear_dirty back to a single function and only clear the dirty bit from the storage key. In addition move the function page_test_and_clear_dirty and page_test_and_clear_young to page.h where they belong. This requires to change the parameter from a struct page * to a page frame number. Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 8da044a1db0..522e4a93cad 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -719,7 +719,7 @@ int page_referenced(struct page *page,
unlock_page(page);
}
out:
- if (page_test_and_clear_young(page))
+ if (page_test_and_clear_young(page_to_pfn(page)))
referenced++;
return referenced;
@@ -785,10 +785,8 @@ int page_mkclean(struct page *page)
struct address_space *mapping = page_mapping(page);
if (mapping) {
ret = page_mkclean_file(mapping, page);
- if (page_test_dirty(page)) {
- page_clear_dirty(page, 1);
+ if (page_test_and_clear_dirty(page_to_pfn(page), 1))
ret = 1;
- }
}
}
@@ -981,10 +979,9 @@ void page_remove_rmap(struct page *page)
* not if it's in swapcache - there might be another pte slot
* containing the swap entry, but page not yet written to swap.
*/
- if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
- page_clear_dirty(page, 1);
+ if ((!PageAnon(page) || PageSwapCache(page)) &&
+ page_test_and_clear_dirty(page_to_pfn(page), 1))
set_page_dirty(page);
- }
/*
* Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
* and not charged by memcg for now.