lkml.org 
[lkml]   [2022]   [Apr]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v4 5/5] usb: host: add xhci-exynos driver
Date
This driver is for Samsung Exynos xhci host conroller. It uses xhci-plat
driver mainly and extends some functions by xhci hooks and overrides.

It supports USB Audio offload with Co-processor. It only cares DCBAA,
Device Context, Transfer Ring, Event Ring, and ERST. They are allocated
on specific address with xhci hooks. Co-processor could use them directly
without xhci driver after then.

Signed-off-by: Daehwan Jung <dh10.jung@samsung.com>
---
drivers/usb/host/Kconfig | 8 +
drivers/usb/host/Makefile | 1 +
drivers/usb/host/xhci-exynos.c | 567 +++++++++++++++++++++++++++++++++
drivers/usb/host/xhci-exynos.h | 50 +++
4 files changed, 626 insertions(+)
create mode 100644 drivers/usb/host/xhci-exynos.c
create mode 100644 drivers/usb/host/xhci-exynos.h

diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 682b3d2da623..ccafcd9b4212 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -104,6 +104,14 @@ config USB_XHCI_TEGRA
Say 'Y' to enable the support for the xHCI host controller
found in NVIDIA Tegra124 and later SoCs.

+config USB_XHCI_EXYNOS
+ tristate "xHCI support for Samsung Exynos SoC Series"
+ depends on USB_XHCI_PLATFORM
+ depends on ARCH_EXYNOS || COMPILE_TEST
+ help
+ Say 'Y' to enable the support for the xHCI host controller
+ found in Samsung Exynos SoCs.
+
endif # USB_XHCI_HCD

config USB_EHCI_BRCMSTB
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 2948983618fb..300f22b6eb1b 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -86,3 +86,4 @@ obj-$(CONFIG_USB_HCD_SSB) += ssb-hcd.o
obj-$(CONFIG_USB_FOTG210_HCD) += fotg210-hcd.o
obj-$(CONFIG_USB_MAX3421_HCD) += max3421-hcd.o
obj-$(CONFIG_USB_XEN_HCD) += xen-hcd.o
+obj-$(CONFIG_USB_XHCI_EXYNOS) += xhci-exynos.o
diff --git a/drivers/usb/host/xhci-exynos.c b/drivers/usb/host/xhci-exynos.c
new file mode 100644
index 000000000000..4f22f620d7e0
--- /dev/null
+++ b/drivers/usb/host/xhci-exynos.c
@@ -0,0 +1,567 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * xhci-exynos.c - xHCI host controller driver platform Bus Glue for Exynos.
+ *
+ * Copyright (C) 2022 Samsung Electronics Incorporated - http://www.samsung.com
+ * Author: Daehwan Jung <dh10.jung@samsung.com>
+ *
+ * A lot of code borrowed from the Linux xHCI driver.
+ */
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "xhci.h"
+#include "xhci-plat.h"
+#include "xhci-exynos.h"
+
+static const struct dev_pm_ops xhci_exynos_pm_ops;
+
+static struct xhci_vendor_ops ops;
+static void xhci_exynos_parse_endpoint(struct xhci_hcd *xhci, struct usb_device *udev,
+ struct usb_endpoint_descriptor *desc, struct xhci_container_ctx *ctx);
+static void xhci_exynos_free_event_ring(struct xhci_hcd *xhci);
+static struct xhci_ring *xhci_ring_alloc_uram(struct xhci_hcd *xhci,
+ unsigned int num_segs, unsigned int cycle_state,
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags,
+ u32 endpoint_type);
+static void xhci_exynos_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+static int xhci_exynos_alloc_event_ring(struct xhci_hcd *xhci, gfp_t flags);
+
+static int xhci_exynos_register_vendor_ops(struct xhci_vendor_ops *vendor_ops)
+{
+ return xhci_plat_register_vendor_ops(&ops);
+}
+
+static void xhci_exynos_quirks(struct device *dev, struct xhci_hcd *xhci)
+{
+ struct xhci_plat_priv *priv = xhci_to_priv(xhci);
+ struct xhci_hcd_exynos *xhci_exynos = priv->vendor_priv;
+
+ xhci->quirks |= XHCI_PLAT | xhci_exynos->quirks;
+}
+
+/* called during probe() after chip reset completes */
+static int xhci_exynos_setup(struct usb_hcd *hcd)
+{
+ int ret;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ ret = xhci_gen_setup(hcd, xhci_exynos_quirks);
+ xhci_exynos_alloc_event_ring(xhci, GFP_KERNEL);
+
+ return ret;
+}
+
+static void xhci_exynos_usb_offload_enable_event_ring(struct xhci_hcd *xhci)
+{
+ struct xhci_plat_priv *priv = xhci_to_priv(xhci);
+ struct xhci_hcd_exynos *xhci_exynos = priv->vendor_priv;
+
+ u32 temp;
+ u64 temp_64;
+
+ temp_64 = xhci_read_64(xhci, &xhci_exynos->ir_set_audio->erst_dequeue);
+ temp_64 &= ~ERST_PTR_MASK;
+ xhci_info(xhci, "ERST2 deq = 64'h%0lx", (unsigned long) temp_64);
+
+ xhci_info(xhci, "// [USB Audio] Set the interrupt modulation register");
+ temp = readl(&xhci_exynos->ir_set_audio->irq_control);
+ temp &= ~ER_IRQ_INTERVAL_MASK;
+ temp |= (u32)160;
+ writel(temp, &xhci_exynos->ir_set_audio->irq_control);
+
+ temp = readl(&xhci_exynos->ir_set_audio->irq_pending);
+ xhci_info(xhci, "// [USB Audio] Enabling event ring interrupter %p by writing 0x%x to irq_pending",
+ xhci_exynos->ir_set_audio, (unsigned int) ER_IRQ_ENABLE(temp));
+ writel(ER_IRQ_ENABLE(temp), &xhci_exynos->ir_set_audio->irq_pending);
+}
+
+static int xhci_exynos_start(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci;
+ int ret;
+
+ ret = xhci_run(hcd);
+
+ xhci = hcd_to_xhci(hcd);
+ xhci_exynos_usb_offload_enable_event_ring(xhci);
+
+ return ret;
+}
+
+static int xhci_exynos_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
+ struct usb_host_endpoint *ep)
+{
+ int ret;
+ struct xhci_hcd *xhci;
+ struct xhci_virt_device *virt_dev;
+
+ ret = xhci_add_endpoint(hcd, udev, ep);
+
+ if (!ret && udev->slot_id) {
+ xhci = hcd_to_xhci(hcd);
+ virt_dev = xhci->devs[udev->slot_id];
+ xhci_exynos_parse_endpoint(xhci, udev, &ep->desc, virt_dev->out_ctx);
+ }
+
+ return ret;
+}
+
+static int xhci_exynos_address_device(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd *xhci;
+ int ret;
+
+ ret = xhci_address_device(hcd, udev);
+ xhci = hcd_to_xhci(hcd);
+
+ /* TODO: store and pass hw info to Co-Processor here*/
+
+ return ret;
+}
+
+static int xhci_exynos_wake_lock(struct xhci_hcd_exynos *xhci_exynos,
+ int is_main_hcd, int is_lock)
+{
+ struct usb_hcd *hcd = xhci_exynos->hcd;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct wakeup_source *main_wakelock, *shared_wakelock;
+
+ main_wakelock = xhci_exynos->main_wakelock;
+ shared_wakelock = xhci_exynos->shared_wakelock;
+
+ if (xhci->xhc_state & XHCI_STATE_REMOVING)
+ return -ESHUTDOWN;
+
+ if (is_lock) {
+ if (is_main_hcd)
+ __pm_stay_awake(main_wakelock);
+ else
+ __pm_stay_awake(shared_wakelock);
+ } else {
+ if (is_main_hcd)
+ __pm_relax(main_wakelock);
+ else
+ __pm_relax(shared_wakelock);
+ }
+
+ return 0;
+}
+
+static int xhci_exynos_bus_suspend(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+ struct xhci_hcd_exynos *xhci_exynos = priv->vendor_priv;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+
+ int ret, main_hcd;
+
+ if (hcd == xhci->main_hcd)
+ main_hcd = 1;
+ else
+ main_hcd = 0;
+
+ ret = xhci_bus_suspend(hcd);
+ xhci_exynos_wake_lock(xhci_exynos, main_hcd, 0);
+
+ return ret;
+}
+
+static int xhci_exynos_bus_resume(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+ struct xhci_hcd_exynos *xhci_exynos = priv->vendor_priv;
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ int ret, main_hcd;
+
+ if (hcd == xhci->main_hcd)
+ main_hcd = 1;
+ else
+ main_hcd = 0;
+
+ ret = xhci_bus_resume(hcd);
+ xhci_exynos_wake_lock(xhci_exynos, main_hcd, 1);
+
+ return ret;
+}
+
+const struct xhci_driver_overrides xhci_exynos_overrides = {
+ .reset = xhci_exynos_setup,
+ .start = xhci_exynos_start,
+ .add_endpoint = xhci_exynos_add_endpoint,
+ .address_device = xhci_exynos_address_device,
+ .bus_suspend = xhci_exynos_bus_suspend,
+ .bus_resume = xhci_exynos_bus_resume,
+};
+
+static int xhci_exynos_vendor_init(struct xhci_hcd *xhci, struct device *dev)
+{
+ struct usb_hcd *hcd;
+ struct xhci_hcd_exynos *xhci_exynos;
+ struct xhci_plat_priv *priv;
+ struct wakeup_source *main_wakelock, *shared_wakelock;
+ struct platform_device *pdev;
+
+ pdev = to_platform_device(dev);
+
+ xhci_plat_override_driver(&xhci_exynos_overrides);
+ dev->driver->pm = &xhci_exynos_pm_ops;
+
+ main_wakelock = wakeup_source_register(dev, dev_name(dev));
+ __pm_stay_awake(main_wakelock);
+
+ /* Initialization shared wakelock for SS HCD */
+ shared_wakelock = wakeup_source_register(dev, dev_name(dev));
+ __pm_stay_awake(shared_wakelock);
+
+ hcd = xhci->main_hcd;
+
+ priv = hcd_to_xhci_priv(hcd);
+ xhci_exynos = priv->vendor_priv;
+ xhci_exynos->dev = dev;
+ xhci_exynos->main_wakelock = main_wakelock;
+ xhci_exynos->shared_wakelock = shared_wakelock;
+
+ return 0;
+}
+
+static void xhci_exynos_vendor_cleanup(struct xhci_hcd *xhci)
+{
+ xhci_exynos_free_event_ring(xhci);
+}
+
+static bool xhci_exynos_is_usb_offload_enabled(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev, unsigned int ep_index)
+{
+ /* TODO */
+ return true;
+}
+
+static struct xhci_device_context_array *xhci_exynos_alloc_dcbaa(
+ struct xhci_hcd *xhci, gfp_t flags)
+{
+ int i;
+
+ xhci->dcbaa = ioremap(EXYNOS_URAM_DCBAA_ADDR,
+ sizeof(*xhci->dcbaa));
+ if (!xhci->dcbaa)
+ return NULL;
+ /* Clear DCBAA */
+ for (i = 0; i < MAX_HC_SLOTS; i++)
+ xhci->dcbaa->dev_context_ptrs[i] = 0x0;
+
+ xhci->dcbaa->dma = EXYNOS_URAM_DCBAA_ADDR;
+
+ return xhci->dcbaa;
+}
+
+static void xhci_exynos_free_dcbaa(struct xhci_hcd *xhci)
+{
+ iounmap(xhci->dcbaa);
+ xhci->dcbaa = NULL;
+}
+
+static struct xhci_ring *xhci_exynos_alloc_transfer_ring(struct xhci_hcd *xhci, u32 endpoint_type,
+ enum xhci_ring_type ring_type, unsigned int max_packet, gfp_t mem_flags)
+{
+ return xhci_ring_alloc_uram(xhci, 1, 1, ring_type, max_packet, mem_flags, endpoint_type);
+}
+
+static void xhci_exynos_free_transfer_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ unsigned int ep_index)
+{
+ xhci_exynos_ring_free(xhci, ring);
+}
+
+static void xhci_exynos_alloc_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx,
+ int type, gfp_t flags)
+{
+ /* Only first Device Context uses URAM */
+ int i;
+
+ ctx->bytes = ioremap(EXYNOS_URAM_DEVICE_CTX_ADDR, EXYNOS_URAM_CTX_SIZE);
+ if (!ctx->bytes)
+ return;
+
+ for (i = 0; i < EXYNOS_URAM_CTX_SIZE; i++)
+ ctx->bytes[i] = 0;
+
+ ctx->dma = EXYNOS_URAM_DEVICE_CTX_ADDR;
+}
+
+static void xhci_exynos_free_container_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
+{
+ /* Ignore dma_pool_free if it is allocated from URAM */
+ if (ctx->dma != EXYNOS_URAM_DEVICE_CTX_ADDR)
+ dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma);
+}
+
+static int xhci_exynos_sync_dev_ctx(struct xhci_hcd *xhci, unsigned int slot_id)
+{
+ struct xhci_plat_priv *priv = xhci_to_priv(xhci);
+ struct xhci_hcd_exynos *xhci_exynos = priv->vendor_priv;
+ struct xhci_virt_device *virt_dev;
+ struct xhci_slot_ctx *slot_ctx;
+
+ int i;
+ int last_ep;
+ int last_ep_ctx = 31;
+
+ virt_dev = xhci->devs[slot_id];
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
+
+ last_ep = LAST_CTX_TO_EP_NUM(le32_to_cpu(slot_ctx->dev_info));
+
+ if (last_ep < 31)
+ last_ep_ctx = last_ep + 1;
+
+ for (i = 0; i < last_ep_ctx; ++i) {
+ unsigned int epaddr = xhci_get_endpoint_address(i);
+ struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i);
+
+ if (epaddr == xhci_exynos->in_ep)
+ xhci_exynos->in_deq = ep_ctx->deq;
+ else if (epaddr == xhci_exynos->out_ep)
+ xhci_exynos->out_deq = ep_ctx->deq;
+ }
+
+ return 0;
+}
+static struct xhci_vendor_ops ops = {
+ .vendor_init = xhci_exynos_vendor_init,
+ .vendor_cleanup = xhci_exynos_vendor_cleanup,
+ .is_usb_offload_enabled = xhci_exynos_is_usb_offload_enabled,
+ .alloc_dcbaa = xhci_exynos_alloc_dcbaa,
+ .free_dcbaa = xhci_exynos_free_dcbaa,
+ .alloc_transfer_ring = xhci_exynos_alloc_transfer_ring,
+ .free_transfer_ring = xhci_exynos_free_transfer_ring,
+ .alloc_container_ctx = xhci_exynos_alloc_container_ctx,
+ .free_container_ctx = xhci_exynos_free_container_ctx,
+ .sync_dev_ctx = xhci_exynos_sync_dev_ctx,
+};
+
+
+static void xhci_exynos_parse_endpoint(struct xhci_hcd *xhci, struct usb_device *udev,
+ struct usb_endpoint_descriptor *desc, struct xhci_container_ctx *ctx)
+{
+ struct xhci_plat_priv *priv = xhci_to_priv(xhci);
+ struct xhci_hcd_exynos *xhci_exynos = priv->vendor_priv;
+ struct usb_endpoint_descriptor *d = desc;
+ unsigned int ep_index;
+ struct xhci_ep_ctx *ep_ctx;
+
+ ep_index = xhci_get_endpoint_index(d);
+ ep_ctx = xhci_get_ep_ctx(xhci, ctx, ep_index);
+
+ if ((d->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_ISOC) {
+ if (d->bEndpointAddress & USB_ENDPOINT_DIR_MASK)
+ xhci_exynos->in_ep = d->bEndpointAddress;
+ else
+ xhci_exynos->out_ep = d->bEndpointAddress;
+ }
+}
+
+static void xhci_exynos_segment_free_skip(struct xhci_hcd *xhci, struct xhci_segment *seg)
+{
+ kfree(seg->bounce_buf);
+ kfree(seg);
+}
+
+static void xhci_exynos_free_segments_for_ring(struct xhci_hcd *xhci,
+ struct xhci_segment *first)
+{
+ struct xhci_segment *seg;
+
+ seg = first->next;
+
+ while (seg != first) {
+ struct xhci_segment *next = seg->next;
+
+ xhci_exynos_segment_free_skip(xhci, seg);
+ seg = next;
+ }
+ xhci_exynos_segment_free_skip(xhci, first);
+}
+
+static void xhci_exynos_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring)
+{
+ if (!ring)
+ return;
+
+ if (ring->first_seg) {
+ if (ring->type == TYPE_STREAM)
+ xhci_remove_stream_mapping(ring);
+
+ xhci_exynos_free_segments_for_ring(xhci, ring->first_seg);
+ }
+
+ kfree(ring);
+}
+
+static void xhci_exynos_free_event_ring(struct xhci_hcd *xhci)
+{
+ struct xhci_plat_priv *priv = xhci_to_priv(xhci);
+ struct xhci_hcd_exynos *xhci_exynos = priv->vendor_priv;
+
+ xhci_exynos_ring_free(xhci, xhci_exynos->event_ring_audio);
+}
+
+static void xhci_exynos_set_hc_event_deq_audio(struct xhci_hcd *xhci)
+{
+ struct xhci_plat_priv *priv = xhci_to_priv(xhci);
+ struct xhci_hcd_exynos *xhci_exynos = priv->vendor_priv;
+
+ u64 temp;
+ dma_addr_t deq;
+
+ deq = xhci_trb_virt_to_dma(xhci_exynos->event_ring_audio->deq_seg,
+ xhci_exynos->event_ring_audio->dequeue);
+ if (deq == 0 && !in_interrupt())
+ xhci_warn(xhci, "WARN something wrong with SW event ring "
+ "dequeue ptr.\n");
+ /* Update HC event ring dequeue pointer */
+ temp = xhci_read_64(xhci, &xhci_exynos->ir_set_audio->erst_dequeue);
+ temp &= ERST_PTR_MASK;
+ /* Don't clear the EHB bit (which is RW1C) because
+ * there might be more events to service.
+ */
+ temp &= ~ERST_EHB;
+ xhci_info(xhci, "//[%s] Write event ring dequeue pointer = 0x%llx, "
+ "preserving EHB bit", __func__,
+ ((u64) deq & (u64) ~ERST_PTR_MASK) | temp);
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+ &xhci_exynos->ir_set_audio->erst_dequeue);
+
+}
+
+
+
+static int xhci_exynos_alloc_event_ring(struct xhci_hcd *xhci, gfp_t flags)
+{
+ struct xhci_plat_priv *priv = xhci_to_priv(xhci);
+ struct xhci_hcd_exynos *xhci_exynos = priv->vendor_priv;
+
+ if (xhci_check_trb_in_td_math(xhci) < 0)
+ goto fail;
+
+ xhci_exynos->event_ring_audio = xhci_ring_alloc(xhci, ERST_NUM_SEGS, 1,
+ TYPE_EVENT, 0, flags);
+ /* Set the event ring dequeue address */
+ xhci_exynos_set_hc_event_deq_audio(xhci);
+
+ return 0;
+fail:
+ return -1;
+}
+
+static int xhci_alloc_segments_for_ring_uram(struct xhci_hcd *xhci,
+ struct xhci_segment **first, struct xhci_segment **last,
+ unsigned int num_segs, unsigned int cycle_state,
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags,
+ u32 endpoint_type)
+{
+ struct xhci_segment *prev;
+ bool chain_links = false;
+
+ while (num_segs > 0) {
+ struct xhci_segment *next = NULL;
+
+ if (!next) {
+ prev = *first;
+ while (prev) {
+ next = prev->next;
+ xhci_segment_free(xhci, prev);
+ prev = next;
+ }
+ return -ENOMEM;
+ }
+ xhci_link_segments(prev, next, type, chain_links);
+
+ prev = next;
+ num_segs--;
+ }
+ xhci_link_segments(prev, *first, type, chain_links);
+ *last = prev;
+
+ return 0;
+}
+
+static struct xhci_ring *xhci_ring_alloc_uram(struct xhci_hcd *xhci,
+ unsigned int num_segs, unsigned int cycle_state,
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags,
+ u32 endpoint_type)
+{
+ struct xhci_ring *ring;
+ int ret;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+ ring = kzalloc_node(sizeof(*ring), flags, dev_to_node(dev));
+ if (!ring)
+ return NULL;
+
+ ring->num_segs = num_segs;
+ ring->bounce_buf_len = max_packet;
+ INIT_LIST_HEAD(&ring->td_list);
+ ring->type = type;
+ if (num_segs == 0)
+ return ring;
+
+ ret = xhci_alloc_segments_for_ring_uram(xhci, &ring->first_seg,
+ &ring->last_seg, num_segs, cycle_state, type,
+ max_packet, flags, endpoint_type);
+ if (ret)
+ goto fail;
+
+ /* Only event ring does not use link TRB */
+ if (type != TYPE_EVENT) {
+ /* See section 4.9.2.1 and 6.4.4.1 */
+ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+ cpu_to_le32(LINK_TOGGLE);
+ }
+ xhci_initialize_ring_info(ring, cycle_state);
+
+ return ring;
+
+fail:
+ kfree(ring);
+ return NULL;
+}
+
+static int xhci_exynos_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ /* TODO: AP sleep scenario*/
+
+ return xhci_suspend(xhci, device_may_wakeup(dev));
+}
+
+static int xhci_exynos_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ int ret;
+
+ /* TODO: AP resume scenario*/
+
+ ret = xhci_resume(xhci, 0);
+ if (ret)
+ return ret;
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops xhci_exynos_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(xhci_exynos_suspend, xhci_exynos_resume)
+};
+
+MODULE_DESCRIPTION("xHCI Exynos Host Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/usb/host/xhci-exynos.h b/drivers/usb/host/xhci-exynos.h
new file mode 100644
index 000000000000..4428f8f3a5ff
--- /dev/null
+++ b/drivers/usb/host/xhci-exynos.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * xhci-exynos.h - xHCI host controller driver platform Bus Glue for Exynos.
+ *
+ * Copyright (C) 2022 Samsung Electronics Incorporated - http://www.samsung.com
+ * Author: Daehwan Jung <dh10.jung@samsung.com>
+ *
+ * A lot of code borrowed from the Linux xHCI driver.
+ */
+
+#include "xhci.h"
+
+/* EXYNOS uram memory map */
+#define EXYNOS_URAM_ABOX_EVT_RING_ADDR 0x02a00000
+#define EXYNOS_URAM_ISOC_OUT_RING_ADDR 0x02a01000
+#define EXYNOS_URAM_ISOC_IN_RING_ADDR 0x02a02000
+#define EXYNOS_URAM_DEVICE_CTX_ADDR 0x02a03000
+#define EXYNOS_URAM_DCBAA_ADDR 0x02a03880
+#define EXYNOS_URAM_ABOX_ERST_SEG_ADDR 0x02a03C80
+#define EXYNOS_URAM_CTX_SIZE 2112
+
+struct xhci_hcd_exynos {
+ struct xhci_intr_reg __iomem *ir_set_audio;
+
+ struct xhci_ring *event_ring_audio;
+ struct xhci_erst erst_audio;
+
+ struct device *dev;
+ struct usb_hcd *hcd;
+ struct usb_hcd *shared_hcd;
+
+ struct wakeup_source *main_wakelock; /* Wakelock for HS HCD */
+ struct wakeup_source *shared_wakelock; /* Wakelock for SS HCD */
+
+ u32 in_ep;
+ u32 out_ep;
+ u32 in_deq;
+ u32 out_deq;
+
+ unsigned long long quirks;
+};
+
+int xhci_check_trb_in_td_math(struct xhci_hcd *xhci);
+void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg);
+void xhci_link_segments(struct xhci_segment *prev,
+ struct xhci_segment *next,
+ enum xhci_ring_type type, bool chain_links);
+void xhci_initialize_ring_info(struct xhci_ring *ring,
+ unsigned int cycle_state);
+void xhci_remove_stream_mapping(struct xhci_ring *ring);
--
2.31.1
\
 
 \ /
  Last update: 2022-04-26 12:00    [W:0.277 / U:0.508 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site