summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c3
-rw-r--r--drivers/iommu/arm-smmu.c2
-rw-r--r--drivers/iommu/dma-mapping-fast.c25
-rw-r--r--drivers/iommu/dmar.c7
-rw-r--r--drivers/iommu/intel-iommu.c30
-rw-r--r--drivers/iommu/intel-svm.c28
-rw-r--r--drivers/iommu/io-pgtable-fast.c116
-rw-r--r--drivers/iommu/io-pgtable.h2
8 files changed, 153 insertions, 60 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 0397985a2601..5975d76ce755 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1833,6 +1833,9 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom)
kfree(dom->aperture[i]);
}
+ if (dom->domain.id)
+ domain_id_free(dom->domain.id);
+
kfree(dom);
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index ce1eb562be36..03a691723349 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1835,6 +1835,8 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.oas = oas,
.tlb = &arm_smmu_gather_ops,
.iommu_dev = smmu->dev,
+ .iova_base = domain->geometry.aperture_start,
+ .iova_end = domain->geometry.aperture_end,
};
}
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 6d90221486c9..2acb9242bcf8 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -151,7 +151,9 @@ static dma_addr_t __fast_smmu_alloc_iova(struct dma_fast_smmu_mapping *mapping,
iommu_tlbiall(mapping->domain);
mapping->have_stale_tlbs = false;
- av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, skip_sync);
+ av8l_fast_clear_stale_ptes(mapping->pgtbl_pmds, mapping->base,
+ mapping->base + mapping->size - 1,
+ skip_sync);
}
return (bit << FAST_PAGE_SHIFT) + mapping->base;
@@ -328,7 +330,7 @@ static dma_addr_t fast_smmu_map_page(struct device *dev, struct page *page,
if (unlikely(iova == DMA_ERROR_CODE))
goto fail;
- pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+ pmd = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, iova);
if (unlikely(av8l_fast_map_public(pmd, phys_to_map, len, prot)))
goto fail_free_iova;
@@ -351,7 +353,8 @@ static void fast_smmu_unmap_page(struct device *dev, dma_addr_t iova,
{
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
unsigned long flags;
- av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+ av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->base, iova);
unsigned long offset = iova & ~FAST_PAGE_MASK;
size_t len = ALIGN(size + offset, FAST_PAGE_SIZE);
int nptes = len >> FAST_PAGE_SHIFT;
@@ -373,7 +376,8 @@ static void fast_smmu_sync_single_for_cpu(struct device *dev,
dma_addr_t iova, size_t size, enum dma_data_direction dir)
{
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
- av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+ av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->base, iova);
unsigned long offset = iova & ~FAST_PAGE_MASK;
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
@@ -385,7 +389,8 @@ static void fast_smmu_sync_single_for_device(struct device *dev,
dma_addr_t iova, size_t size, enum dma_data_direction dir)
{
struct dma_fast_smmu_mapping *mapping = dev->archdata.mapping->fast;
- av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds, iova);
+ av8l_fast_iopte *pmd = iopte_pmd_offset(mapping->pgtbl_pmds,
+ mapping->base, iova);
unsigned long offset = iova & ~FAST_PAGE_MASK;
struct page *page = phys_to_page((*pmd & FAST_PTE_ADDR_MASK));
@@ -513,7 +518,8 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
while (sg_miter_next(&miter)) {
int nptes = miter.length >> FAST_PAGE_SHIFT;
- ptep = iopte_pmd_offset(mapping->pgtbl_pmds, iova_iter);
+ ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base,
+ iova_iter);
if (unlikely(av8l_fast_map_public(
ptep, page_to_phys(miter.page),
miter.length, prot))) {
@@ -541,7 +547,7 @@ static void *fast_smmu_alloc(struct device *dev, size_t size,
out_unmap:
/* need to take the lock again for page tables and iova */
spin_lock_irqsave(&mapping->lock, flags);
- ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_addr);
+ ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_addr);
av8l_fast_unmap_public(ptep, size);
fast_dmac_clean_range(mapping, ptep, ptep + count);
out_free_iova:
@@ -573,7 +579,7 @@ static void fast_smmu_free(struct device *dev, size_t size,
pages = area->pages;
dma_common_free_remap(vaddr, size, VM_USERMAP, false);
- ptep = iopte_pmd_offset(mapping->pgtbl_pmds, dma_handle);
+ ptep = iopte_pmd_offset(mapping->pgtbl_pmds, mapping->base, dma_handle);
spin_lock_irqsave(&mapping->lock, flags);
av8l_fast_unmap_public(ptep, size);
fast_dmac_clean_range(mapping, ptep, ptep + count);
@@ -745,6 +751,9 @@ int fast_smmu_attach_device(struct device *dev,
mapping->fast->domain = domain;
mapping->fast->dev = dev;
+ domain->geometry.aperture_start = mapping->base;
+ domain->geometry.aperture_end = mapping->base + size - 1;
+
if (iommu_attach_device(domain, dev))
return -EINVAL;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3821c4786662..e913a930ac80 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -326,7 +326,9 @@ static int dmar_pci_bus_notifier(struct notifier_block *nb,
struct pci_dev *pdev = to_pci_dev(data);
struct dmar_pci_notify_info *info;
- /* Only care about add/remove events for physical functions */
+ /* Only care about add/remove events for physical functions.
+ * For VFs we actually do the lookup based on the corresponding
+ * PF in device_to_iommu() anyway. */
if (pdev->is_virtfn)
return NOTIFY_DONE;
if (action != BUS_NOTIFY_ADD_DEVICE &&
@@ -1858,10 +1860,11 @@ static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
/*
* All PCI devices managed by this unit should have been destroyed.
*/
- if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
+ if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) {
for_each_active_dev_scope(dmaru->devices,
dmaru->devices_cnt, i, dev)
return -EBUSY;
+ }
ret = dmar_ir_hotplug(dmaru, false);
if (ret == 0)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 24d81308a1a6..59e9abd3345e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -885,7 +885,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
return NULL;
if (dev_is_pci(dev)) {
+ struct pci_dev *pf_pdev;
+
pdev = to_pci_dev(dev);
+ /* VFs aren't listed in scope tables; we need to look up
+ * the PF instead to find the IOMMU. */
+ pf_pdev = pci_physfn(pdev);
+ dev = &pf_pdev->dev;
segment = pci_domain_nr(pdev->bus);
} else if (has_acpi_companion(dev))
dev = &ACPI_COMPANION(dev)->dev;
@@ -898,6 +904,13 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
for_each_active_dev_scope(drhd->devices,
drhd->devices_cnt, i, tmp) {
if (tmp == dev) {
+ /* For a VF use its original BDF# not that of the PF
+ * which we used for the IOMMU lookup. Strictly speaking
+ * we could do this for all PCI devices; we only need to
+ * get the BDF# from the scope table for ACPI matches. */
+ if (pdev->is_virtfn)
+ goto got_pdev;
+
*bus = drhd->devices[i].bus;
*devfn = drhd->devices[i].devfn;
goto out;
@@ -1672,6 +1685,7 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
if (!iommu->domains || !iommu->domain_ids)
return;
+again:
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry_safe(info, tmp, &device_domain_list, global) {
struct dmar_domain *domain;
@@ -1684,10 +1698,19 @@ static void disable_dmar_iommu(struct intel_iommu *iommu)
domain = info->domain;
- dmar_remove_one_dev_info(domain, info->dev);
+ __dmar_remove_one_dev_info(info);
- if (!domain_type_is_vm_or_si(domain))
+ if (!domain_type_is_vm_or_si(domain)) {
+ /*
+ * The domain_exit() function can't be called under
+ * device_domain_lock, as it takes this lock itself.
+ * So release the lock here and re-run the loop
+ * afterwards.
+ */
+ spin_unlock_irqrestore(&device_domain_lock, flags);
domain_exit(domain);
+ goto again;
+ }
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@@ -4182,10 +4205,11 @@ int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
if (!atsru)
return 0;
- if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
+ if (!atsru->include_all && atsru->devices && atsru->devices_cnt) {
for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
i, dev)
return -EBUSY;
+ }
return 0;
}
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index d9939fa9b588..f929879ecae6 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -39,10 +39,18 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
struct page *pages;
int order;
- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
- if (order < 0)
- order = 0;
-
+ /* Start at 2 because it's defined as 2^(1+PSS) */
+ iommu->pasid_max = 2 << ecap_pss(iommu->ecap);
+
+ /* Eventually I'm promised we will get a multi-level PASID table
+ * and it won't have to be physically contiguous. Until then,
+ * limit the size because 8MiB contiguous allocations can be hard
+ * to come by. The limit of 0x20000, which is 1MiB for each of
+ * the PASID and PASID-state tables, is somewhat arbitrary. */
+ if (iommu->pasid_max > 0x20000)
+ iommu->pasid_max = 0x20000;
+
+ order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate PASID table\n",
@@ -53,6 +61,8 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
pr_info("%s: Allocated order %d PASID table.\n", iommu->name, order);
if (ecap_dis(iommu->ecap)) {
+ /* Just making it explicit... */
+ BUILD_BUG_ON(sizeof(struct pasid_entry) != sizeof(struct pasid_state_entry));
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
if (pages)
iommu->pasid_state_table = page_address(pages);
@@ -68,11 +78,7 @@ int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu)
int intel_svm_free_pasid_tables(struct intel_iommu *iommu)
{
- int order;
-
- order = ecap_pss(iommu->ecap) + 7 - PAGE_SHIFT;
- if (order < 0)
- order = 0;
+ int order = get_order(sizeof(struct pasid_entry) * iommu->pasid_max);
if (iommu->pasid_table) {
free_pages((unsigned long)iommu->pasid_table, order);
@@ -371,8 +377,8 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
}
svm->iommu = iommu;
- if (pasid_max > 2 << ecap_pss(iommu->ecap))
- pasid_max = 2 << ecap_pss(iommu->ecap);
+ if (pasid_max > iommu->pasid_max)
+ pasid_max = iommu->pasid_max;
/* Do not use PASID 0 in caching mode (virtualised IOMMU) */
ret = idr_alloc(&iommu->pasid_idr, svm,
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index 5d32a382e291..3582e206db68 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/io-pgtable-fast.h>
+#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <linux/vmalloc.h>
@@ -42,6 +43,9 @@ struct av8l_fast_io_pgtable {
av8l_fast_iopte *puds[4];
av8l_fast_iopte *pmds;
struct page **pages; /* page table memory */
+ int nr_pages;
+ dma_addr_t base;
+ dma_addr_t end;
};
/* Page table bits */
@@ -168,12 +172,14 @@ static void __av8l_check_for_stale_tlb(av8l_fast_iopte *ptep)
}
}
-void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, bool skip_sync)
+void av8l_fast_clear_stale_ptes(av8l_fast_iopte *pmds, u64 base,
+ u64 end, bool skip_sync)
{
int i;
av8l_fast_iopte *pmdp = pmds;
- for (i = 0; i < ((SZ_1G * 4UL) >> AV8L_FAST_PAGE_SHIFT); ++i) {
+ for (i = base >> AV8L_FAST_PAGE_SHIFT;
+ i <= (end >> AV8L_FAST_PAGE_SHIFT); ++i) {
if (!(*pmdp & AV8L_FAST_PTE_VALID)) {
*pmdp = 0;
if (!skip_sync)
@@ -224,7 +230,7 @@ static int av8l_fast_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
- av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
+ av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova);
unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
av8l_fast_map_public(ptep, paddr, size, prot);
@@ -255,7 +261,7 @@ static size_t av8l_fast_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size)
{
struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
- av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, iova);
+ av8l_fast_iopte *ptep = iopte_pmd_offset(data->pmds, data->base, iova);
unsigned long nptes = size >> AV8L_FAST_PAGE_SHIFT;
__av8l_fast_unmap(ptep, size, false);
@@ -333,7 +339,7 @@ av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
}
/*
- * We need 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and
+ * We need max 1 page for the pgd, 4 pages for puds (1GB VA per pud page) and
* 2048 pages for pmds (each pud page contains 512 table entries, each
* pointing to a pmd).
*/
@@ -342,12 +348,38 @@ av8l_fast_alloc_pgtable_data(struct io_pgtable_cfg *cfg)
#define NUM_PMD_PAGES 2048
#define NUM_PGTBL_PAGES (NUM_PGD_PAGES + NUM_PUD_PAGES + NUM_PMD_PAGES)
+/* undefine arch specific definitions which depends on page table format */
+#undef pud_index
+#undef pud_mask
+#undef pud_next
+#undef pmd_index
+#undef pmd_mask
+#undef pmd_next
+
+#define pud_index(addr) (((addr) >> 30) & 0x3)
+#define pud_mask(addr) ((addr) & ~((1UL << 30) - 1))
+#define pud_next(addr, end) \
+({ unsigned long __boundary = pud_mask(addr + (1UL << 30));\
+ (__boundary - 1 < (end) - 1) ? __boundary : (end); \
+})
+
+#define pmd_index(addr) (((addr) >> 21) & 0x1ff)
+#define pmd_mask(addr) ((addr) & ~((1UL << 21) - 1))
+#define pmd_next(addr, end) \
+({ unsigned long __boundary = pmd_mask(addr + (1UL << 21));\
+ (__boundary - 1 < (end) - 1) ? __boundary : (end); \
+})
+
static int
av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
struct io_pgtable_cfg *cfg, void *cookie)
{
int i, j, pg = 0;
struct page **pages, *page;
+ dma_addr_t base = cfg->iova_base;
+ dma_addr_t end = cfg->iova_end;
+ dma_addr_t pud, pmd;
+ int pmd_pg_index;
pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN |
__GFP_NORETRY);
@@ -365,10 +397,11 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
data->pgd = page_address(page);
/*
- * We need 2048 entries at level 2 to map 4GB of VA space. A page
- * can hold 512 entries, so we need 4 pages.
+ * We need max 2048 entries at level 2 to map 4GB of VA space. A page
+ * can hold 512 entries, so we need max 4 pages.
*/
- for (i = 0; i < 4; ++i) {
+ for (i = pud_index(base), pud = base; pud < end;
+ ++i, pud = pud_next(pud, end)) {
av8l_fast_iopte pte, *ptep;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
@@ -383,18 +416,26 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
dmac_clean_range(data->pgd, data->pgd + 4);
/*
- * We have 4 puds, each of which can point to 512 pmds, so we'll
- * have 2048 pmds, each of which can hold 512 ptes, for a grand
+ * We have max 4 puds, each of which can point to 512 pmds, so we'll
+ * have max 2048 pmds, each of which can hold 512 ptes, for a grand
* total of 2048*512=1048576 PTEs.
*/
- for (i = 0; i < 4; ++i) {
- for (j = 0; j < 512; ++j) {
+ pmd_pg_index = pg;
+ for (i = pud_index(base), pud = base; pud < end;
+ ++i, pud = pud_next(pud, end)) {
+ for (j = pmd_index(pud), pmd = pud; pmd < pud_next(pud, end);
+ ++j, pmd = pmd_next(pmd, end)) {
av8l_fast_iopte pte, *pudp;
+ void *addr;
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page)
goto err_free_pages;
pages[pg++] = page;
+
+ addr = page_address(page);
+ dmac_clean_range(addr, addr + SZ_4K);
+
pte = page_to_phys(page) | AV8L_FAST_PTE_TYPE_TABLE;
pudp = data->puds[i] + j;
*pudp = pte;
@@ -402,21 +443,21 @@ av8l_fast_prepopulate_pgtables(struct av8l_fast_io_pgtable *data,
dmac_clean_range(data->puds[i], data->puds[i] + 512);
}
- if (WARN_ON(pg != NUM_PGTBL_PAGES))
- goto err_free_pages;
-
/*
* We map the pmds into a virtually contiguous space so that we
* don't have to traverse the first two levels of the page tables
* to find the appropriate pud. Instead, it will be a simple
* offset from the virtual base of the pmds.
*/
- data->pmds = vmap(&pages[NUM_PGD_PAGES + NUM_PUD_PAGES], NUM_PMD_PAGES,
+ data->pmds = vmap(&pages[pmd_pg_index], pg - pmd_pg_index,
VM_IOREMAP, PAGE_KERNEL);
if (!data->pmds)
goto err_free_pages;
data->pages = pages;
+ data->nr_pages = pg;
+ data->base = base;
+ data->end = end;
return 0;
err_free_pages:
@@ -516,7 +557,7 @@ static void av8l_fast_free_pgtable(struct io_pgtable *iop)
struct av8l_fast_io_pgtable *data = iof_pgtable_to_data(iop);
vunmap(data->pmds);
- for (i = 0; i < NUM_PGTBL_PAGES; ++i)
+ for (i = 0; i < data->nr_pages; ++i)
__free_page(data->pages[i]);
kvfree(data->pages);
kfree(data);
@@ -588,6 +629,7 @@ static int __init av8l_fast_positive_testing(void)
struct av8l_fast_io_pgtable *data;
av8l_fast_iopte *pmds;
u64 max = SZ_1G * 4ULL - 1;
+ u64 base = 0;
cfg = (struct io_pgtable_cfg) {
.quirks = 0,
@@ -595,6 +637,8 @@ static int __init av8l_fast_positive_testing(void)
.ias = 32,
.oas = 32,
.pgsize_bitmap = SZ_4K,
+ .iova_base = base,
+ .iova_end = max,
};
cfg_cookie = &cfg;
@@ -607,81 +651,81 @@ static int __init av8l_fast_positive_testing(void)
pmds = data->pmds;
/* map the entire 4GB VA space with 4K map calls */
- for (iova = 0; iova < max; iova += SZ_4K) {
+ for (iova = base; iova < max; iova += SZ_4K) {
if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
failed++;
continue;
}
}
- if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
- max)))
+ if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base,
+ base, max - base)))
failed++;
/* unmap it all */
- for (iova = 0; iova < max; iova += SZ_4K) {
+ for (iova = base; iova < max; iova += SZ_4K) {
if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
failed++;
}
/* sweep up TLB proving PTEs */
- av8l_fast_clear_stale_ptes(pmds, false);
+ av8l_fast_clear_stale_ptes(pmds, base, max, false);
/* map the entire 4GB VA space with 8K map calls */
- for (iova = 0; iova < max; iova += SZ_8K) {
+ for (iova = base; iova < max; iova += SZ_8K) {
if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
failed++;
continue;
}
}
- if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
- max)))
+ if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base,
+ base, max - base)))
failed++;
/* unmap it all with 8K unmap calls */
- for (iova = 0; iova < max; iova += SZ_8K) {
+ for (iova = base; iova < max; iova += SZ_8K) {
if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
failed++;
}
/* sweep up TLB proving PTEs */
- av8l_fast_clear_stale_ptes(pmds, false);
+ av8l_fast_clear_stale_ptes(pmds, base, max, false);
/* map the entire 4GB VA space with 16K map calls */
- for (iova = 0; iova < max; iova += SZ_16K) {
+ for (iova = base; iova < max; iova += SZ_16K) {
if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
failed++;
continue;
}
}
- if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
- max)))
+ if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base,
+ base, max - base)))
failed++;
/* unmap it all */
- for (iova = 0; iova < max; iova += SZ_16K) {
+ for (iova = base; iova < max; iova += SZ_16K) {
if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
failed++;
}
/* sweep up TLB proving PTEs */
- av8l_fast_clear_stale_ptes(pmds, false);
+ av8l_fast_clear_stale_ptes(pmds, base, max, false);
/* map the entire 4GB VA space with 64K map calls */
- for (iova = 0; iova < max; iova += SZ_64K) {
+ for (iova = base; iova < max; iova += SZ_64K) {
if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
failed++;
continue;
}
}
- if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
- max)))
+ if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, base,
+ base, max - base)))
failed++;
/* unmap it all at once */
- if (WARN_ON(ops->unmap(ops, 0, max) != max))
+ if (WARN_ON(ops->unmap(ops, base, max - base) != (max - base)))
failed++;
free_io_pgtable_ops(ops);
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index f4533040806f..e6939c2212d4 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -69,6 +69,8 @@ struct io_pgtable_cfg {
unsigned int oas;
const struct iommu_gather_ops *tlb;
struct device *iommu_dev;
+ dma_addr_t iova_base;
+ dma_addr_t iova_end;
/* Low-level data specific to the table format */
union {