lkml.org 
[lkml]   [2022]   [Feb]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 2/8] VMCI: dma dg: add MMIO access to registers
Date
Detect the support for MMIO access through examination of the length
of the region requested in BAR1. If it is 256KB, the VMCI device
supports MMIO access to registers.

If MMIO access is supported, map the area of the region used for
MMIO access (64KB size at offset 128KB).

Add wrapper functions for accessing 32 bit register accesses through
either MMIO or IO ports based on device configuration.

Sending and receiving datagrams through iowrite8_rep/ioread8_rep is
left unchanged for now, and will be addressed in a later change.

Reviewed-by: Vishnu Dasa <vdasa@vmware.com>
Signed-off-by: Jorgen Hansen <jhansen@vmware.com>
---
drivers/misc/vmw_vmci/vmci_guest.c | 68 ++++++++++++++++++++++--------
include/linux/vmw_vmci_defs.h | 12 ++++++
2 files changed, 62 insertions(+), 18 deletions(-)

diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 1018dc77269d..38ee7ed32ab9 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -45,6 +45,7 @@ static u32 vm_context_id = VMCI_INVALID_ID;
struct vmci_guest_device {
struct device *dev; /* PCI device we are attached to */
void __iomem *iobase;
+ void __iomem *mmio_base;

bool exclusive_vectors;

@@ -89,6 +90,21 @@ u32 vmci_get_vm_context_id(void)
return vm_context_id;
}

+static unsigned int vmci_read_reg(struct vmci_guest_device *dev, u32 reg)
+{
+ if (dev->mmio_base != NULL)
+ return readl(dev->mmio_base + reg);
+ return ioread32(dev->iobase + reg);
+}
+
+static void vmci_write_reg(struct vmci_guest_device *dev, u32 val, u32 reg)
+{
+ if (dev->mmio_base != NULL)
+ writel(val, dev->mmio_base + reg);
+ else
+ iowrite32(val, dev->iobase + reg);
+}
+
/*
* VM to hypervisor call mechanism. We use the standard VMware naming
* convention since shared code is calling this function as well.
@@ -116,7 +132,7 @@ int vmci_send_datagram(struct vmci_datagram *dg)
if (vmci_dev_g) {
iowrite8_rep(vmci_dev_g->iobase + VMCI_DATA_OUT_ADDR,
dg, VMCI_DG_SIZE(dg));
- result = ioread32(vmci_dev_g->iobase + VMCI_RESULT_LOW_ADDR);
+ result = vmci_read_reg(vmci_dev_g, VMCI_RESULT_LOW_ADDR);
} else {
result = VMCI_ERROR_UNAVAILABLE;
}
@@ -384,7 +400,7 @@ static irqreturn_t vmci_interrupt(int irq, void *_dev)
unsigned int icr;

/* Acknowledge interrupt and determine what needs doing. */
- icr = ioread32(dev->iobase + VMCI_ICR_ADDR);
+ icr = vmci_read_reg(dev, VMCI_ICR_ADDR);
if (icr == 0 || icr == ~0)
return IRQ_NONE;

@@ -429,7 +445,8 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct vmci_guest_device *vmci_dev;
- void __iomem *iobase;
+ void __iomem *iobase = NULL;
+ void __iomem *mmio_base = NULL;
unsigned int capabilities;
unsigned int caps_in_use;
unsigned long cmd;
@@ -445,16 +462,32 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
return error;
}

- error = pcim_iomap_regions(pdev, 1 << 0, KBUILD_MODNAME);
- if (error) {
- dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
- return error;
+ /*
+ * The VMCI device with mmio access to registers requests 256KB
+ * for BAR1. If present, driver will use new VMCI device
+ * functionality for register access and datagram send/recv.
+ */
+
+ if (pci_resource_len(pdev, 1) == VMCI_WITH_MMIO_ACCESS_BAR_SIZE) {
+ dev_info(&pdev->dev, "MMIO register access is available\n");
+ mmio_base = pci_iomap_range(pdev, 1, VMCI_MMIO_ACCESS_OFFSET,
+ VMCI_MMIO_ACCESS_SIZE);
+ /* If the map fails, we fall back to IOIO access. */
+ if (!mmio_base)
+ dev_warn(&pdev->dev, "Failed to map MMIO register access\n");
}

- iobase = pcim_iomap_table(pdev)[0];
+ if (!mmio_base) {
+ error = pcim_iomap_regions(pdev, BIT(0), KBUILD_MODNAME);
+ if (error) {
+ dev_err(&pdev->dev, "Failed to reserve/map IO regions\n");
+ return error;
+ }
+ iobase = pcim_iomap_table(pdev)[0];
+ }

- dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, irq %u\n",
- (unsigned long)iobase, pdev->irq);
+ dev_info(&pdev->dev, "Found VMCI PCI device at %#lx, %#lx, irq %u\n",
+ (unsigned long)iobase, (unsigned long)mmio_base, pdev->irq);

vmci_dev = devm_kzalloc(&pdev->dev, sizeof(*vmci_dev), GFP_KERNEL);
if (!vmci_dev) {
@@ -466,6 +499,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
vmci_dev->dev = &pdev->dev;
vmci_dev->exclusive_vectors = false;
vmci_dev->iobase = iobase;
+ vmci_dev->mmio_base = mmio_base;

tasklet_init(&vmci_dev->datagram_tasklet,
vmci_dispatch_dgs, (unsigned long)vmci_dev);
@@ -490,7 +524,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
*
* Right now, we need datagrams. There are no fallbacks.
*/
- capabilities = ioread32(vmci_dev->iobase + VMCI_CAPS_ADDR);
+ capabilities = vmci_read_reg(vmci_dev, VMCI_CAPS_ADDR);
if (!(capabilities & VMCI_CAPS_DATAGRAM)) {
dev_err(&pdev->dev, "Device does not support datagrams\n");
error = -ENXIO;
@@ -534,7 +568,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use);

/* Let the host know which capabilities we intend to use. */
- iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR);
+ vmci_write_reg(vmci_dev, caps_in_use, VMCI_CAPS_ADDR);

/* Set up global device so that we can start sending datagrams */
spin_lock_irq(&vmci_dev_spinlock);
@@ -630,11 +664,10 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,
cmd = VMCI_IMR_DATAGRAM;
if (caps_in_use & VMCI_CAPS_NOTIFICATIONS)
cmd |= VMCI_IMR_NOTIFICATION;
- iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR);
+ vmci_write_reg(vmci_dev, cmd, VMCI_IMR_ADDR);

/* Enable interrupts. */
- iowrite32(VMCI_CONTROL_INT_ENABLE,
- vmci_dev->iobase + VMCI_CONTROL_ADDR);
+ vmci_write_reg(vmci_dev, VMCI_CONTROL_INT_ENABLE, VMCI_CONTROL_ADDR);

pci_set_drvdata(pdev, vmci_dev);

@@ -657,8 +690,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev,

err_remove_bitmap:
if (vmci_dev->notification_bitmap) {
- iowrite32(VMCI_CONTROL_RESET,
- vmci_dev->iobase + VMCI_CONTROL_ADDR);
+ vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR);
dma_free_coherent(&pdev->dev, PAGE_SIZE,
vmci_dev->notification_bitmap,
vmci_dev->notification_base);
@@ -700,7 +732,7 @@ static void vmci_guest_remove_device(struct pci_dev *pdev)
spin_unlock_irq(&vmci_dev_spinlock);

dev_dbg(&pdev->dev, "Resetting vmci device\n");
- iowrite32(VMCI_CONTROL_RESET, vmci_dev->iobase + VMCI_CONTROL_ADDR);
+ vmci_write_reg(vmci_dev, VMCI_CONTROL_RESET, VMCI_CONTROL_ADDR);

/*
* Free IRQ and then disable MSI/MSI-X as appropriate. For
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h
index 9911ecfc18ba..8fc00e2685cf 100644
--- a/include/linux/vmw_vmci_defs.h
+++ b/include/linux/vmw_vmci_defs.h
@@ -82,6 +82,18 @@ enum {
*/
#define VMCI_MAX_PINNED_QP_MEMORY ((size_t)(32 * 1024))

+/*
+ * The version of the VMCI device that supports MMIO access to registers
+ * requests 256KB for BAR1 whereas the version of VMCI that supports
+ * MSI/MSI-X only requests 8KB. The layout of the larger 256KB region is:
+ * - the first 128KB are used for MSI/MSI-X.
+ * - the following 64KB are used for MMIO register access.
+ * - the remaining 64KB are unused.
+ */
+#define VMCI_WITH_MMIO_ACCESS_BAR_SIZE ((size_t)(256 * 1024))
+#define VMCI_MMIO_ACCESS_OFFSET ((size_t)(128 * 1024))
+#define VMCI_MMIO_ACCESS_SIZE ((size_t)(64 * 1024))
+
/*
* We have a fixed set of resource IDs available in the VMX.
* This allows us to have a very simple implementation since we statically
--
2.25.1
\
 
 \ /
  Last update: 2022-02-03 14:20    [W:0.278 / U:0.432 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site