diff options
author | Volodymyr Babchuk <vlad.babchuk@gmail.com> | 2018-01-19 19:05:40 +0200 |
---|---|---|
committer | Jérôme Forissier <jerome.forissier@linaro.org> | 2018-07-04 17:51:52 +0200 |
commit | 6de30696854249a4a57a0c86401a0eccd6af3c8a (patch) | |
tree | 7a6b94b55def8c49e6d74461632a93d009775bd5 | |
parent | 7558c164b73dc436084fed81a2e75b5b809e0bc1 (diff) |
mmu: align va of memory regions to pa modulo PGDIR_SIZE
If pa % PGDIR_SIZE == va % PGDIR_SIZE, then we can effectively map
large smallpage-aligned regions. Most of the region can be mapped
with super blocks and only ends will be mapped using small pages.
Signed-off-by: Volodymyr Babchuk <vlad.babchuk@gmail.com>
Reviewed-by: Jens Wiklander <jens.wiklander@linaro.org>
-rw-r--r-- | core/arch/arm/mm/core_mmu.c | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/core/arch/arm/mm/core_mmu.c b/core/arch/arm/mm/core_mmu.c index 1eeedcdc..6bb0c030 100644 --- a/core/arch/arm/mm/core_mmu.c +++ b/core/arch/arm/mm/core_mmu.c @@ -913,6 +913,14 @@ static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems) map->attr = core_mmu_type_to_attr(map->type); va -= map->size; va = ROUNDDOWN(va, map->region_size); + /* + * Make sure that va is aligned with pa for + * efficient pgdir mapping. Basically pa & + * pgdir_mask should be == va & pgdir_mask + */ + if (map->size > 2 * CORE_MMU_PGDIR_SIZE) + va -= CORE_MMU_PGDIR_SIZE - + ((map->pa - va) & CORE_MMU_PGDIR_MASK); map->va = va; } } else { @@ -930,6 +938,14 @@ static void init_mem_map(struct tee_mmap_region *memory_map, size_t num_elems) #endif map->attr = core_mmu_type_to_attr(map->type); va = ROUNDUP(va, map->region_size); + /* + * Make sure that va is aligned with pa for + * efficient pgdir mapping. Basically pa & + * pgdir_mask should be == va & pgdir_mask + */ + if (map->size > 2 * CORE_MMU_PGDIR_SIZE) + va += (map->pa - va) & CORE_MMU_PGDIR_MASK; + map->va = va; va += map->size; } |