summaryrefslogtreecommitdiff
path: root/drivers/iommu/dma-mapping-fast.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/dma-mapping-fast.c')
-rw-r--r--drivers/iommu/dma-mapping-fast.c38
1 files changed, 26 insertions, 12 deletions
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 8c6364f03eac..0881d68f34d8 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -188,7 +188,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
iommu_tlbiall(mapping->domain);
mapping->have_stale_tlbs = false;
- av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, mapping->base,
+ av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start,
+ mapping->base,
mapping->base + mapping->size - 1,
skip_sync);
}
@@ -367,7 +369,8 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
if (unlikely(iova == DMA_ERROR_CODE))
goto fail;
- pmd = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, iova);
+ pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start, iova);
if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
goto fail_free_iova;
@@ -391,7 +394,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
unsigned long flags;
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
- mapping->base, iova);
+ mapping->domain->geometry.aperture_start,
+ iova);
unsigned long offset = iova & ~FAST_PAGE_MASK;
size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
int nptes = len >> FAST_PAGE_SHIFT;
@@ -414,7 +418,8 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev,
{
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
- mapping->base, iova);
+ mapping->domain->geometry.aperture_start,
+ iova);
unsigned long offset = iova & ~FAST_PAGE_MASK;
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
@@ -427,7 +432,8 @@ static void fast_smmu_sync_single_for_device(struct device *dev,
{
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
- mapping->base, iova);
+ mapping->domain->geometry.aperture_start,
+ iova);
unsigned long offset = iova & ~FAST_PAGE_MASK;
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
@@ -555,8 +561,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
while (sg_miter_next(&miter)) {
int nptes = miter.length >> FAST_PAGE_SHIFT;
- ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base,
- iova_iter);
+ ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start,
+ iova_iter);
if (unlikely(av8l_fast_map_public(
ptep, page_to_phys(miter.page),
miter.length, prot))) {
@@ -584,7 +591,9 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
out_unmap:
/* need to take the lock again for page tables and iova */
spin_lock_irqsave(&mapping->lock, flags);
- ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_addr);
+ ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start,
+ dma_addr);
av8l_fast_unmap_public(ptep, size);
fast_dmac_clean_range(mapping, ptep, ptep + count);
out_free_iova:
@@ -616,7 +625,8 @@ static void fast_smmu_free(struct device *dev, size_t size,
pages = area->pages;
dma_common_free_remap(vaddr, size, VM_USERMAP, false);
- ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_handle);
+ ptep = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->domain->geometry.aperture_start, dma_handle);
spin_lock_irqsave(&mapping->lock, flags);
av8l_fast_unmap_public(ptep, size);
fast_dmac_clean_range(mapping, ptep, ptep + count);
@@ -720,7 +730,7 @@ static const struct dma_map_ops fast_smmu_dma_ops = {
*
* Creates a mapping structure which holds information about used/unused IO
* address ranges, which is required to perform mapping with IOMMU aware
- * functions. The only VA range supported is [0, 4GB).
+ * functions. The only VA range supported is [0, 4GB].
*
* The client device need to be attached to the mapping with
* fast_smmu_attach_device function.
@@ -774,6 +784,7 @@ int fast_smmu_attach_device(struct device *dev,
struct iommu_domain *domain = mapping->domain;
struct iommu_pgtbl_info info;
u64 size = (u64)mapping->bits << PAGE_SHIFT;
+ struct iommu_domain_geometry geometry;
if (mapping->base + size > (SZ_1G * 4ULL))
return -EINVAL;
@@ -788,8 +799,11 @@ int fast_smmu_attach_device(struct device *dev,
mapping->fast->domain = domain;
mapping->fast->dev = dev;
- domain->geometry.aperture_start = mapping->base;
- domain->geometry.aperture_end = mapping->base + size - 1;
+ geometry.aperture_start = mapping->base;
+ geometry.aperture_end = mapping->base + size - 1;
+ if (iommu_domain_set_attr(domain, DOMAIN_ATTR_GEOMETRY,
+ &geometry))
+ return -EINVAL;
if (iommu_attach_device(domain, dev))
return -EINVAL;