summaryrefslogtreecommitdiff
path: root/drivers/iommu/iommu.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu/iommu.c')
-rw-r--r--drivers/iommu/iommu.c152
1 files changed, 135 insertions, 17 deletions
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 515bb8b80952..8ab502d80270 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -31,8 +31,11 @@
#include <linux/err.h>
#include <linux/pci.h>
#include <linux/bitops.h>
+#include <linux/debugfs.h>
#include <trace/events/iommu.h>
+#include "iommu-debug.h"
+
static struct kset *iommu_group_kset;
static struct ida iommu_group_ida;
static struct mutex iommu_group_mutex;
@@ -1023,6 +1026,8 @@ EXPORT_SYMBOL_GPL(bus_set_iommu);
bool iommu_present(struct bus_type *bus)
{
+ if (!bus)
+ return false;
return bus->iommu_ops != NULL;
}
EXPORT_SYMBOL_GPL(iommu_present);
@@ -1059,6 +1064,45 @@ void iommu_set_fault_handler(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
+/**
+ * iommu_trigger_fault() - trigger an IOMMU fault
+ * @domain: iommu domain
+ *
+ * Triggers a fault on the device to which this domain is attached.
+ *
+ * This function should only be used for debugging purposes, for obvious
+ * reasons.
+ */
+void iommu_trigger_fault(struct iommu_domain *domain, unsigned long flags)
+{
+ if (domain->ops->trigger_fault)
+ domain->ops->trigger_fault(domain, flags);
+}
+
+/**
+ * iommu_reg_read() - read an IOMMU register
+ *
+ * Reads the IOMMU register at the given offset.
+ */
+unsigned long iommu_reg_read(struct iommu_domain *domain, unsigned long offset)
+{
+ if (domain->ops->reg_read)
+ return domain->ops->reg_read(domain, offset);
+ return 0;
+}
+
+/**
+ * iommu_reg_write() - write an IOMMU register
+ *
+ * Writes the given value to the IOMMU register at the given offset.
+ */
+void iommu_reg_write(struct iommu_domain *domain, unsigned long offset,
+ unsigned long val)
+{
+ if (domain->ops->reg_write)
+ domain->ops->reg_write(domain, offset, val);
+}
+
static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
unsigned type)
{
@@ -1074,6 +1118,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
domain->ops = bus->iommu_ops;
domain->type = type;
+ iommu_debug_domain_add(domain);
+
return domain;
}
@@ -1085,6 +1131,7 @@ EXPORT_SYMBOL_GPL(iommu_domain_alloc);
void iommu_domain_free(struct iommu_domain *domain)
{
+ iommu_debug_domain_remove(domain);
domain->ops->domain_free(domain);
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
@@ -1097,8 +1144,10 @@ static int __iommu_attach_device(struct iommu_domain *domain,
return -ENODEV;
ret = domain->ops->attach_dev(domain, dev);
- if (!ret)
+ if (!ret) {
trace_attach_device_to_domain(dev);
+ iommu_debug_attach_device(domain, dev);
+ }
return ret;
}
@@ -1137,6 +1186,7 @@ static void __iommu_detach_device(struct iommu_domain *domain,
if (unlikely(domain->ops->detach_dev == NULL))
return;
+ iommu_debug_detach_device(domain, dev);
domain->ops->detach_dev(domain, dev);
trace_detach_device_from_domain(dev);
}
@@ -1277,8 +1327,24 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
}
EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
-static size_t iommu_pgsize(struct iommu_domain *domain,
- unsigned long addr_merge, size_t size)
+phys_addr_t iommu_iova_to_phys_hard(struct iommu_domain *domain,
+ dma_addr_t iova)
+{
+ if (unlikely(domain->ops->iova_to_phys_hard == NULL))
+ return 0;
+
+ return domain->ops->iova_to_phys_hard(domain, iova);
+}
+
+static unsigned long iommu_get_pgsize_bitmap(struct iommu_domain *domain)
+{
+ if (domain->ops->get_pgsize_bitmap)
+ return domain->ops->get_pgsize_bitmap(domain);
+ return domain->ops->pgsize_bitmap;
+}
+
+size_t iommu_pgsize(unsigned long pgsize_bitmap,
+ unsigned long addr_merge, size_t size)
{
unsigned int pgsize_idx;
size_t pgsize;
@@ -1297,10 +1363,14 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
pgsize = (1UL << (pgsize_idx + 1)) - 1;
/* throw away page sizes not supported by the hardware */
- pgsize &= domain->ops->pgsize_bitmap;
+ pgsize &= pgsize_bitmap;
/* make sure we're still sane */
- BUG_ON(!pgsize);
+ if (!pgsize) {
+ pr_err("invalid pgsize/addr/size! 0x%lx 0x%lx 0x%zx\n",
+ pgsize_bitmap, addr_merge, size);
+ BUG();
+ }
/* pick the biggest page */
pgsize_idx = __fls(pgsize);
@@ -1312,20 +1382,25 @@ static size_t iommu_pgsize(struct iommu_domain *domain,
int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot)
{
- unsigned long orig_iova = iova;
+ unsigned long orig_iova = iova, pgsize_bitmap;
unsigned int min_pagesz;
size_t orig_size = size;
int ret = 0;
+ trace_map_start(iova, paddr, size);
if (unlikely(domain->ops->map == NULL ||
- domain->ops->pgsize_bitmap == 0UL))
+ (domain->ops->pgsize_bitmap == 0UL &&
+ !domain->ops->get_pgsize_bitmap))) {
+ trace_map_end(iova, paddr, size);
return -ENODEV;
+ }
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
return -EINVAL;
+ pgsize_bitmap = iommu_get_pgsize_bitmap(domain);
/* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ min_pagesz = 1 << __ffs(pgsize_bitmap);
/*
* both the virtual address and the physical one, as well as
@@ -1335,13 +1410,14 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
iova, &paddr, size, min_pagesz);
+ trace_map_end(iova, paddr, size);
return -EINVAL;
}
pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) {
- size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
+ size_t pgsize = iommu_pgsize(pgsize_bitmap, iova | paddr, size);
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
@@ -1361,6 +1437,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
else
trace_map(orig_iova, paddr, orig_size);
+ trace_map_end(iova, paddr, size);
return ret;
}
EXPORT_SYMBOL_GPL(iommu_map);
@@ -1371,15 +1448,21 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
unsigned int min_pagesz;
unsigned long orig_iova = iova;
+ trace_unmap_start(iova, 0, size);
if (unlikely(domain->ops->unmap == NULL ||
- domain->ops->pgsize_bitmap == 0UL))
+ (domain->ops->pgsize_bitmap == 0UL &&
+ !domain->ops->get_pgsize_bitmap))) {
+ trace_unmap_end(iova, 0, size);
return -ENODEV;
+ }
- if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) {
+ trace_unmap_end(iova, 0, size);
return -EINVAL;
+ }
/* find out the minimum page size supported */
- min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ min_pagesz = 1 << __ffs(iommu_get_pgsize_bitmap(domain));
/*
* The virtual address, as well as the size of the mapping, must be
@@ -1389,6 +1472,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
if (!IS_ALIGNED(iova | size, min_pagesz)) {
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
iova, size, min_pagesz);
+ trace_unmap_end(iova, 0, size);
return -EINVAL;
}
@@ -1399,9 +1483,9 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
- size_t pgsize = iommu_pgsize(domain, iova, size - unmapped);
+ size_t left = size - unmapped;
- unmapped_page = domain->ops->unmap(domain, iova, pgsize);
+ unmapped_page = domain->ops->unmap(domain, iova, left);
if (!unmapped_page)
break;
@@ -1413,6 +1497,7 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
}
trace_unmap(orig_iova, size, unmapped);
+ trace_unmap_end(orig_iova, 0, size);
return unmapped;
}
EXPORT_SYMBOL_GPL(iommu_unmap);
@@ -1424,11 +1509,14 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
size_t mapped = 0;
unsigned int i, min_pagesz;
int ret;
+ unsigned long pgsize_bitmap;
- if (unlikely(domain->ops->pgsize_bitmap == 0UL))
+ if (unlikely(domain->ops->pgsize_bitmap == 0UL &&
+ !domain->ops->get_pgsize_bitmap))
return 0;
- min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
+ pgsize_bitmap = iommu_get_pgsize_bitmap(domain);
+ min_pagesz = 1 << __ffs(pgsize_bitmap);
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
@@ -1460,6 +1548,20 @@ out_err:
}
EXPORT_SYMBOL_GPL(default_iommu_map_sg);
+/* DEPRECATED */
+int iommu_map_range(struct iommu_domain *domain, unsigned int iova,
+ struct scatterlist *sg, unsigned int len, int opt)
+{
+ return -ENODEV;
+}
+
+/* DEPRECATED */
+int iommu_unmap_range(struct iommu_domain *domain, unsigned int iova,
+ unsigned int len)
+{
+ return -ENODEV;
+}
+
int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot)
{
@@ -1480,6 +1582,8 @@ void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr)
}
EXPORT_SYMBOL_GPL(iommu_domain_window_disable);
+struct dentry *iommu_debugfs_top;
+
static int __init iommu_init(void)
{
iommu_group_kset = kset_create_and_add("iommu_groups",
@@ -1489,6 +1593,12 @@ static int __init iommu_init(void)
BUG_ON(!iommu_group_kset);
+ iommu_debugfs_top = debugfs_create_dir("iommu", NULL);
+ if (!iommu_debugfs_top) {
+ pr_err("Couldn't create iommu debugfs directory\n");
+ return -ENODEV;
+ }
+
return 0;
}
core_initcall(iommu_init);
@@ -1509,7 +1619,7 @@ int iommu_domain_get_attr(struct iommu_domain *domain,
break;
case DOMAIN_ATTR_PAGING:
paging = data;
- *paging = (domain->ops->pgsize_bitmap != 0UL);
+ *paging = (iommu_get_pgsize_bitmap(domain) != 0UL);
break;
case DOMAIN_ATTR_WINDOWS:
count = data;
@@ -1558,6 +1668,14 @@ int iommu_domain_set_attr(struct iommu_domain *domain,
}
EXPORT_SYMBOL_GPL(iommu_domain_set_attr);
+int iommu_dma_supported(struct iommu_domain *domain, struct device *dev,
+ u64 mask)
+{
+ if (domain->ops->dma_supported)
+ return domain->ops->dma_supported(domain, dev, mask);
+ return 0;
+}
+
void iommu_get_dm_regions(struct device *dev, struct list_head *list)
{
const struct iommu_ops *ops = dev->bus->iommu_ops;