diff options
Diffstat (limited to 'drivers/iommu/dma-mapping-fast.c')
| -rw-r--r-- | drivers/iommu/dma-mapping-fast.c | 25 |
1 files changed, 17 insertions, 8 deletions
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c index 6d90221486c9..2acb9242bcf8 100644 --- a/drivers/iommu/dma-mapping-fast.c +++ b/drivers/iommu/dma-mapping-fast.c @@ -151,7 +151,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping, iommu_tlbiall(mapping->domain); mapping->have_stale_tlbs = false; - av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync); + av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, mapping->base, + mapping->base + mapping->size - 1, + skip_sync); } return (bit << FAST_PAGE_SHIFT) + mapping->base; @@ -328,7 +330,7 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page, if (unlikely(iova == DMA_ERROR_CODE)) goto fail; - pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); + pmd = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, iova); if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot))) goto fail_free_iova; @@ -351,7 +353,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova, { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; unsigned long flags; - av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); + av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->base, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; size_t len = ALIGN(size + offset, FAST_PAGE_SIZE); int nptes = len >> FAST_PAGE_SHIFT; @@ -373,7 +376,8 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir) { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; - av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); + av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->base, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); @@ -385,7 +389,8 @@ static void fast_smmu_sync_single_for_device(struct device *dev, dma_addr_t iova, size_t size, enum dma_data_direction dir) { struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast; - av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova); + av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, + mapping->base, iova); unsigned long offset = iova & ~FAST_PAGE_MASK; struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK)); @@ -513,7 +518,8 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, while (sg_miter_next(&miter)) { int nptes = miter.length >> FAST_PAGE_SHIFT; - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, + iova_iter); if (unlikely(av8l_fast_map_public( ptep, page_to_phys(miter.page), miter.length, prot))) { @@ -541,7 +547,7 @@ static void *fast_smmu_alloc(struct device *dev, size_t size, out_unmap: /* need to take the lock again for page tables and iova */ spin_lock_irqsave(&mapping->lock, flags); - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_addr); av8l_fast_unmap_public(ptep, size); fast_dmac_clean_range(mapping, ptep, ptep + count); out_free_iova: @@ -573,7 +579,7 @@ static void fast_smmu_free(struct device *dev, size_t size, pages = area->pages; dma_common_free_remap(vaddr, size, VM_USERMAP, false); - ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle); + ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_handle); spin_lock_irqsave(&mapping->lock, flags); av8l_fast_unmap_public(ptep, size); fast_dmac_clean_range(mapping, ptep, ptep + count); @@ -745,6 +751,9 @@ int fast_smmu_attach_device(struct device *dev, mapping->fast->domain = domain; mapping->fast->dev = dev; + domain->geometry.aperture_start = mapping->base; + domain->geometry.aperture_end = mapping->base + size - 1; + if (iommu_attach_device(domain, dev)) return -EINVAL; |
