lkml.org 
[lkml]   [2020]   [Apr]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH RFC 12/15] dmaengine: idxd: add device support functions in prep for mdev
From
Date
Add some device support helper functions that will be used by VFIO mediated
device in preparation of adding VFIO mdev support.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
---
drivers/dma/idxd/device.c | 130 +++++++++++++++++++++++++++++++++++++++++++++
drivers/dma/idxd/idxd.h | 7 ++
drivers/dma/idxd/init.c | 19 +++++++
3 files changed, 156 insertions(+)

diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c
index a46b6558984c..830aa5859646 100644
--- a/drivers/dma/idxd/device.c
+++ b/drivers/dma/idxd/device.c
@@ -319,6 +319,40 @@ void idxd_wq_unmap_portal(struct idxd_wq *wq)
devm_iounmap(dev, wq->portal);
}

+int idxd_wq_abort(struct idxd_wq *wq)
+{
+ int rc;
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand, status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ dev_dbg(dev, "Abort WQ %d\n", wq->id);
+ if (wq->state != IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d not active\n", wq->id);
+ return -ENXIO;
+ }
+
+ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+ dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_ABORT_WQ, operand);
+ rc = idxd_cmd_send(idxd, IDXD_CMD_ABORT_WQ, operand);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_cmd_wait(idxd, &status, IDXD_DRAIN_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "WQ abort failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ dev_dbg(dev, "WQ %d aborted\n", wq->id);
+ return 0;
+}
+
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid)
{
struct idxd_device *idxd = wq->idxd;
@@ -372,6 +406,66 @@ int idxd_wq_disable_pasid(struct idxd_wq *wq)
return 0;
}

+void idxd_wq_update_pasid(struct idxd_wq *wq, int pasid)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int offset;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ /* PASID fields are 8 bytes into the WQCFG register */
+ offset = idxd->wqcfg_offset + wq->id * 32 + 8;
+ wq->wqcfg.pasid = pasid;
+ iowrite32(wq->wqcfg.bits[2], idxd->reg_base + offset);
+}
+
+void idxd_wq_update_priv(struct idxd_wq *wq, int priv)
+{
+ struct idxd_device *idxd = wq->idxd;
+ int offset;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ /* priv field is 8 bytes into the WQCFG register */
+ offset = idxd->wqcfg_offset + wq->id * 32 + 8;
+ wq->wqcfg.priv = !!priv;
+ iowrite32(wq->wqcfg.bits[2], idxd->reg_base + offset);
+}
+
+int idxd_wq_drain(struct idxd_wq *wq)
+{
+ int rc;
+ struct idxd_device *idxd = wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand, status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ dev_dbg(dev, "Drain WQ %d\n", wq->id);
+ if (wq->state != IDXD_WQ_ENABLED) {
+ dev_dbg(dev, "WQ %d not active\n", wq->id);
+ return -ENXIO;
+ }
+
+ operand = BIT(wq->id % 16) | ((wq->id / 16) << 16);
+ dev_dbg(dev, "cmd: %u operand: %#x\n", IDXD_CMD_DRAIN_WQ, operand);
+ rc = idxd_cmd_send(idxd, IDXD_CMD_DRAIN_WQ, operand);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_cmd_wait(idxd, &status, IDXD_DRAIN_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "WQ drain failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ dev_dbg(dev, "WQ %d drained\n", wq->id);
+ return 0;
+}
+
/* Device control bits */
static inline bool idxd_is_enabled(struct idxd_device *idxd)
{
@@ -542,6 +636,42 @@ int idxd_device_drain_pasid(struct idxd_device *idxd, int pasid)
return 0;
}

+int idxd_device_request_int_handle(struct idxd_device *idxd, int idx,
+ int *handle)
+{
+ int rc;
+ struct device *dev = &idxd->pdev->dev;
+ u32 operand, status;
+
+ lockdep_assert_held(&idxd->dev_lock);
+
+ if (!idxd->hw.gen_cap.int_handle_req)
+ return -EOPNOTSUPP;
+
+ dev_dbg(dev, "get int handle, idx %d\n", idx);
+
+ operand = idx & 0xffff;
+ dev_dbg(dev, "cmd: %u operand: %#x\n",
+ IDXD_CMD_REQUEST_INT_HANDLE, operand);
+ rc = idxd_cmd_send(idxd, IDXD_CMD_REQUEST_INT_HANDLE, operand);
+ if (rc < 0)
+ return rc;
+
+ rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT);
+ if (rc < 0)
+ return rc;
+
+ if (status != IDXD_CMDSTS_SUCCESS) {
+ dev_dbg(dev, "request int handle failed: %#x\n", status);
+ return -ENXIO;
+ }
+
+ *handle = (status >> 8) & 0xffff;
+
+ dev_dbg(dev, "int handle acquired: %u\n", *handle);
+ return 0;
+}
+
/* Device configuration bits */
static void idxd_group_config_write(struct idxd_group *group)
{
diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h
index 3a942e9c5980..9b56a4c7f3fc 100644
--- a/drivers/dma/idxd/idxd.h
+++ b/drivers/dma/idxd/idxd.h
@@ -199,6 +199,7 @@ struct idxd_device {

atomic_t num_allocated_ims;
struct sbitmap ims_sbmap;
+ int *int_handles;
};

/* IDXD software descriptor */
@@ -303,6 +304,8 @@ int idxd_device_ro_config(struct idxd_device *idxd);
void idxd_device_wqs_clear_state(struct idxd_device *idxd);
int idxd_device_drain_pasid(struct idxd_device *idxd, int pasid);
void idxd_device_load_config(struct idxd_device *idxd);
+int idxd_device_request_int_handle(struct idxd_device *idxd,
+ int idx, int *handle);

/* work queue control */
int idxd_wq_alloc_resources(struct idxd_wq *wq);
@@ -313,6 +316,10 @@ int idxd_wq_map_portal(struct idxd_wq *wq);
void idxd_wq_unmap_portal(struct idxd_wq *wq);
int idxd_wq_set_pasid(struct idxd_wq *wq, int pasid);
int idxd_wq_disable_pasid(struct idxd_wq *wq);
+int idxd_wq_abort(struct idxd_wq *wq);
+void idxd_wq_update_pasid(struct idxd_wq *wq, int pasid);
+void idxd_wq_update_priv(struct idxd_wq *wq, int priv);
+int idxd_wq_drain(struct idxd_wq *wq);

/* submission */
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc,
diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c
index 15b3ef73cac3..babe6e614087 100644
--- a/drivers/dma/idxd/init.c
+++ b/drivers/dma/idxd/init.c
@@ -56,6 +56,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
int i, msixcnt;
int rc = 0;
union msix_perm mperm;
+ unsigned long flags;

msixcnt = pci_msix_vec_count(pdev);
if (msixcnt < 0) {
@@ -130,6 +131,17 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
}
dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n",
i, msix->vector);
+
+ if (idxd->hw.gen_cap.int_handle_req) {
+ spin_lock_irqsave(&idxd->dev_lock, flags);
+ rc = idxd_device_request_int_handle(idxd, i,
+ &idxd->int_handles[i]);
+ spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ if (rc < 0)
+ goto err_no_irq;
+ dev_dbg(dev, "int handle requested: %u\n",
+ idxd->int_handles[i]);
+ }
}

idxd_unmask_error_interrupts(idxd);
@@ -168,6 +180,13 @@ static int idxd_setup_internals(struct idxd_device *idxd)
struct device *dev = &idxd->pdev->dev;
int i;

+ if (idxd->hw.gen_cap.int_handle_req) {
+ idxd->int_handles = devm_kcalloc(dev, idxd->max_wqs,
+ sizeof(int), GFP_KERNEL);
+ if (!idxd->int_handles)
+ return -ENOMEM;
+ }
+
idxd->groups = devm_kcalloc(dev, idxd->max_groups,
sizeof(struct idxd_group), GFP_KERNEL);
if (!idxd->groups)
\
 
 \ /
  Last update: 2020-04-22 01:35    [W:0.326 / U:0.072 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site