aboutsummaryrefslogtreecommitdiff
path: root/arch/arm/mm/mmu.c
diff options
context:
space:
mode:
authorJoonsoo Kim <js1304@gmail.com>2013-02-09 06:28:06 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2013-02-16 17:54:22 +0000
commit101eeda38c0ab8a4f916176e325d9e036d981a24 (patch)
tree874b1f0002c9153e6af28b1ec3dd6aa0e7737086 /arch/arm/mm/mmu.c
parented8fd2186a4e4f3b98434093b56f9b793d48443e (diff)
ARM: 7646/1: mm: use static_vm for managing static mapped areas
A static mapped area is ARM-specific, so it is better not to use generic vmalloc data structure, that is, vmlist and vmlist_lock for managing static mapped area. And it causes some needless overhead and reducing this overhead is better idea. Now, we have newly introduced static_vm infrastructure. With it, we don't need to iterate all mapped areas. Instead, we just iterate static mapped areas. It helps to reduce an overhead of finding matched area. And architecture dependency on vmalloc layer is removed, so it will help to maintainability for vmalloc layer. Reviewed-by: Nicolas Pitre <nico@linaro.org> Acked-by: Rob Herring <rob.herring@calxeda.com> Tested-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r--arch/arm/mm/mmu.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f0610243bd..a35b314d270 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
{
struct map_desc *md;
struct vm_struct *vm;
+ struct static_vm *svm;
if (!nr)
return;
- vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
+ svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) {
create_mapping(md);
+
+ vm = &svm->vm;
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init;
- vm_area_add_early(vm++);
+ add_static_vm_early(svm++);
}
}
@@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller)
{
struct vm_struct *vm;
+ struct static_vm *svm;
+
+ svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
- vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+ vm = &svm->vm;
vm->addr = (void *)addr;
vm->size = size;
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = caller;
- vm_area_add_early(vm);
+ add_static_vm_early(svm);
}
#ifndef CONFIG_ARM_LPAE
@@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
static void __init fill_pmd_gaps(void)
{
+ struct static_vm *svm;
struct vm_struct *vm;
unsigned long addr, next = 0;
pmd_t *pmd;
- /* we're still single threaded hence no lock needed here */
- for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
- continue;
+ list_for_each_entry(svm, &static_vmlist, list) {
+ vm = &svm->vm;
addr = (unsigned long)vm->addr;
if (addr < next)
continue;
@@ -859,17 +864,12 @@ static void __init pci_reserve_io(void)
{
struct vm_struct *vm;
unsigned long addr;
+ struct static_vm *svm;
- /* we're still single threaded hence no lock needed here */
- for (vm = vmlist; vm; vm = vm->next) {
- if (!(vm->flags & VM_ARM_STATIC_MAPPING))
- continue;
- addr = (unsigned long)vm->addr;
- addr &= ~(SZ_2M - 1);
- if (addr == PCI_IO_VIRT_BASE)
- return;
+ svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
+ if (svm)
+ return;
- }
vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
}
#else