From 5009065d38c95455bd2d27c2838313e3dd0c5bc7 Mon Sep 17 00:00:00 2001 From: Ohad Ben-Cohen Date: Thu, 10 Nov 2011 11:32:25 +0200 Subject: iommu/core: stop converting bytes to page order back and forth Express sizes in bytes rather than in page order, to eliminate the size->order->size conversions we have whenever the IOMMU API is calling the low level drivers' map/unmap methods. Adopt all existing drivers. Signed-off-by: Ohad Ben-Cohen Cc: David Brown Cc: David Woodhouse Cc: Joerg Roedel Cc: Stepan Moskovchenko Cc: KyongHo Cho Cc: Hiroshi DOYU Cc: Laurent Pinchart Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'drivers/iommu/intel-iommu.c') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c0c7820d4c4..2a165010a1c 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -3979,12 +3979,11 @@ static void intel_iommu_detach_device(struct iommu_domain *domain, static int intel_iommu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t hpa, - int gfp_order, int iommu_prot) + size_t size, int iommu_prot) { struct dmar_domain *dmar_domain = domain->priv; u64 max_addr; int prot = 0; - size_t size; int ret; if (iommu_prot & IOMMU_READ) @@ -3994,7 +3993,6 @@ static int intel_iommu_map(struct iommu_domain *domain, if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping) prot |= DMA_PTE_SNP; - size = PAGE_SIZE << gfp_order; max_addr = iova + size; if (dmar_domain->max_addr < max_addr) { u64 end; @@ -4017,11 +4015,10 @@ static int intel_iommu_map(struct iommu_domain *domain, return ret; } -static int intel_iommu_unmap(struct iommu_domain *domain, - unsigned long iova, int gfp_order) +static size_t intel_iommu_unmap(struct iommu_domain *domain, + unsigned long iova, size_t size) { struct dmar_domain *dmar_domain = domain->priv; - size_t size = PAGE_SIZE << gfp_order; int order; order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, @@ -4030,7 +4027,7 @@ static int intel_iommu_unmap(struct iommu_domain *domain, if (dmar_domain->max_addr == iova + size) dmar_domain->max_addr = iova; - return order; + return PAGE_SIZE << order; } static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, -- cgit v1.2.3 From 6d1c56a9db48977942602a50e88eeb61a3e625eb Mon Sep 17 00:00:00 2001 From: Ohad Ben-Cohen Date: Thu, 10 Nov 2011 11:32:30 +0200 Subject: iommu/intel: announce supported page sizes Let the IOMMU core know we support arbitrary page sizes (as long as they're an order of 4KiB). This way the IOMMU core will retain the existing behavior we're used to; it will let us map regions that: - their size is an order of 4KiB - they are naturally aligned Note: Intel IOMMU hardware doesn't support arbitrary page sizes, but the driver does (it splits arbitrary-sized mappings into the pages supported by the hardware). To make everything simpler for now, though, this patch effectively tells the IOMMU core to keep giving this driver the same memory regions it did before, so nothing is changed as far as it's concerned. At this point, the page sizes announced remain static within the IOMMU core. To correctly utilize the pgsize-splitting of the IOMMU core by this driver, it seems that some core changes should still be done, because Intel's IOMMU page size capabilities seem to have the potential to be different between different DMA remapping devices. Signed-off-by: Ohad Ben-Cohen Cc: David Woodhouse Signed-off-by: Joerg Roedel --- drivers/iommu/intel-iommu.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) (limited to 'drivers/iommu/intel-iommu.c') diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 2a165010a1c..4c780efff16 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -78,6 +78,24 @@ #define LEVEL_STRIDE (9) #define LEVEL_MASK (((u64)1 << LEVEL_STRIDE) - 1) +/* + * This bitmap is used to advertise the page sizes our hardware support + * to the IOMMU core, which will then use this information to split + * physically contiguous memory regions it is mapping into page sizes + * that we support. + * + * Traditionally the IOMMU core just handed us the mappings directly, + * after making sure the size is an order of a 4KiB page and that the + * mapping has natural alignment. + * + * To retain this behavior, we currently advertise that we support + * all page sizes that are an order of 4KiB. + * + * If at some point we'd like to utilize the IOMMU core's new behavior, + * we could change this to advertise the real page sizes we support. + */ +#define INTEL_IOMMU_PGSIZES (~0xFFFUL) + static inline int agaw_to_level(int agaw) { return agaw + 2; @@ -4066,6 +4084,7 @@ static struct iommu_ops intel_iommu_ops = { .unmap = intel_iommu_unmap, .iova_to_phys = intel_iommu_iova_to_phys, .domain_has_cap = intel_iommu_domain_has_cap, + .pgsize_bitmap = INTEL_IOMMU_PGSIZES, }; static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) -- cgit v1.2.3