aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorYing Han <yinghan@google.com>2012-05-29 15:06:25 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 16:22:20 -0700
commit096a7cf44712ab531101bb4689f75f7fcd9b9f18 (patch)
tree319313b6fcb6cc45bd7bee6b3fda4c8416863371
parentc3ac9a8ade65ccbfd145fbff895ae8d8d62d09b0 (diff)
mm: rename is_mlocked_vma() to mlocked_vma_newpage()
Andrew pointed out that the is_mlocked_vma() is misnamed. A function with name like that would expect bool return and no side-effects. Since it is called on the fault path for new page, rename it in this patch. Signed-off-by: Ying Han <yinghan@google.com> Reviewed-by: Rik van Riel <riel@redhat.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujtisu.com> Reviewed-by: Minchan Kim <minchan@kernel.org> [akpm@linux-foundation.org: s/mlock_vma_newpage/mlock_vma_newpage/, per Minchan] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/internal.h5
-rw-r--r--mm/vmscan.c2
2 files changed, 4 insertions, 3 deletions
diff --git a/mm/internal.h b/mm/internal.h
index aee4761cf9a..8b0fc8da802 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -164,7 +164,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
* to determine if it's being mapped into a LOCKED vma.
* If so, mark page as mlocked.
*/
-static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page)
+static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
+ struct page *page)
{
VM_BUG_ON(PageLRU(page));
@@ -222,7 +223,7 @@ extern unsigned long vma_address(struct page *page,
struct vm_area_struct *vma);
#endif
#else /* !CONFIG_MMU */
-static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p)
+static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
{
return 0;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8fffc65a84d..44f04364a30 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -3321,7 +3321,7 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
if (mapping_unevictable(page_mapping(page)))
return 0;
- if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page)))
+ if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
return 0;
return 1;