lkml.org 
[lkml]   [2022]   [May]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v6 03/12] iommu: Add attach/detach_dev_pasid domain ops
Date
Attaching an IOMMU domain to a PASID of a device is a generic operation
for modern IOMMU drivers which support PASID-granular DMA address
translation. Currently visible usage scenarios include (but not limited):

- SVA (Shared Virtual Address)
- kernel DMA with PASID
- hardware-assist mediated device

This adds a pair of common domain ops for this purpose and adds helpers
to attach/detach a domain to/from a {device, PASID}. Some buses, like
PCI, route packets without considering the PASID value. Thus a DMA target
address with PASID might be treated as P2P if the address falls into the
MMIO BAR of other devices in the group. To make things simple, these
interfaces only apply to devices belonging to the singleton groups, and
the singleton is immutable in fabric i.e. not affected by hotplug.

Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org>
---
include/linux/iommu.h | 21 +++++++++++++
drivers/iommu/iommu.c | 71 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 92 insertions(+)

diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index b8ffaf2cb1d0..ab36244d4e94 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -263,6 +263,8 @@ struct iommu_ops {
* struct iommu_domain_ops - domain specific operations
* @attach_dev: attach an iommu domain to a device
* @detach_dev: detach an iommu domain from a device
+ * @attach_dev_pasid: attach an iommu domain to a pasid of device
+ * @detach_dev_pasid: detach an iommu domain from a pasid of device
* @map: map a physically contiguous memory region to an iommu domain
* @map_pages: map a physically contiguous set of pages of the same size to
* an iommu domain.
@@ -283,6 +285,10 @@ struct iommu_ops {
struct iommu_domain_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
+ int (*attach_dev_pasid)(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+ void (*detach_dev_pasid)(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);

int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
@@ -678,6 +684,10 @@ int iommu_group_claim_dma_owner(struct iommu_group *group, void *owner);
void iommu_group_release_dma_owner(struct iommu_group *group);
bool iommu_group_dma_owner_claimed(struct iommu_group *group);

+int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
+void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
#else /* CONFIG_IOMMU_API */

struct iommu_ops {};
@@ -1051,6 +1061,17 @@ static inline bool iommu_group_dma_owner_claimed(struct iommu_group *group)
{
return false;
}
+
+static inline int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ return -ENODEV;
+}
+
+static inline void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+}
#endif /* CONFIG_IOMMU_API */

/**
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 29906bc16371..16e8db2d86fc 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -38,6 +38,7 @@ struct iommu_group {
struct kobject kobj;
struct kobject *devices_kobj;
struct list_head devices;
+ struct xarray pasid_array;
struct mutex mutex;
void *iommu_data;
void (*iommu_data_release)(void *iommu_data);
@@ -630,6 +631,7 @@ struct iommu_group *iommu_group_alloc(void)
mutex_init(&group->mutex);
INIT_LIST_HEAD(&group->devices);
INIT_LIST_HEAD(&group->entry);
+ xa_init(&group->pasid_array);

ret = ida_simple_get(&iommu_group_ida, 0, 0, GFP_KERNEL);
if (ret < 0) {
@@ -3190,3 +3192,72 @@ bool iommu_group_dma_owner_claimed(struct iommu_group *group)
return user;
}
EXPORT_SYMBOL_GPL(iommu_group_dma_owner_claimed);
+
+static bool device_group_immutable_singleton(struct device *dev)
+{
+ struct iommu_group *group = iommu_group_get(dev);
+ int count;
+
+ if (!group)
+ return false;
+
+ mutex_lock(&group->mutex);
+ count = iommu_group_device_count(group);
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+ if (count != 1)
+ return false;
+
+ /*
+ * The PCI device could be considered to be fully isolated if all
+ * devices on the path from the device to the host-PCI bridge are
+ * protected from peer-to-peer DMA by ACS.
+ */
+ if (dev_is_pci(dev))
+ return pci_acs_path_enabled(to_pci_dev(dev), NULL,
+ REQ_ACS_FLAGS);
+
+ return true;
+}
+
+int iommu_attach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct iommu_group *group;
+ int ret = -EINVAL;
+ void *curr;
+
+ if (!domain->ops->attach_dev_pasid)
+ return -EOPNOTSUPP;
+
+ if (!device_group_immutable_singleton(dev))
+ return -EINVAL;
+
+ group = iommu_group_get(dev);
+ mutex_lock(&group->mutex);
+ curr = xa_cmpxchg(&group->pasid_array, pasid, NULL, domain, GFP_KERNEL);
+ if (curr)
+ goto out_unlock;
+ ret = domain->ops->attach_dev_pasid(domain, dev, pasid);
+ if (ret)
+ xa_erase(&group->pasid_array, pasid);
+out_unlock:
+ mutex_unlock(&group->mutex);
+ iommu_group_put(group);
+
+ return ret;
+}
+
+void iommu_detach_device_pasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid)
+{
+ struct iommu_group *group = iommu_group_get(dev);
+
+ mutex_lock(&group->mutex);
+ domain->ops->detach_dev_pasid(domain, dev, pasid);
+ xa_erase(&group->pasid_array, pasid);
+ mutex_unlock(&group->mutex);
+
+ iommu_group_put(group);
+}
--
2.25.1
\
 
 \ /
  Last update: 2022-05-10 08:23    [W:0.284 / U:0.204 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site