aboutsummaryrefslogtreecommitdiff
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2013-09-11 14:21:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 15:57:34 -0700
commitb136be5e0b6e8e3e4dcb6722b51bb35199b06810 (patch)
treeec898b9b4cca78e69b8cdf3fd1232c1d7f066e1d /mm/vmalloc.c
parent6b70f7dff8f7ce2f4692afc7d4ef9f73f8c82434 (diff)
mm, vmalloc: use well-defined find_last_bit() func
Our intention in here is to find last_bit within the region to flush. There is well-defined function, find_last_bit() for this purpose and its performance may be slightly better than current implementation. So change it. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Reviewed-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index d23c4325872..93d3182c330 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1016,15 +1016,16 @@ void vm_unmap_aliases(void)
rcu_read_lock();
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
- int i;
+ int i, j;
spin_lock(&vb->lock);
i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
- while (i < VMAP_BBMAP_BITS) {
+ if (i < VMAP_BBMAP_BITS) {
unsigned long s, e;
- int j;
- j = find_next_zero_bit(vb->dirty_map,
- VMAP_BBMAP_BITS, i);
+
+ j = find_last_bit(vb->dirty_map,
+ VMAP_BBMAP_BITS);
+ j = j + 1; /* need exclusive index */
s = vb->va->va_start + (i << PAGE_SHIFT);
e = vb->va->va_start + (j << PAGE_SHIFT);
@@ -1034,10 +1035,6 @@ void vm_unmap_aliases(void)
start = s;
if (e > end)
end = e;
-
- i = j;
- i = find_next_bit(vb->dirty_map,
- VMAP_BBMAP_BITS, i);
}
spin_unlock(&vb->lock);
}