lkml.org 
[lkml]   [2021]   [Jun]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH V4 RESEND] dmaengine: Loongson1: Add Loongson1 dmaengine driver
On 21-05-21, 07:02, Keguang Zhang wrote:

> +config LOONGSON1_DMA
> + tristate "Loongson1 DMA support"
> + depends on MACH_LOONGSON32

Why does it have to do that? The dma driver is generic..

> +static int ls1x_dma_alloc_chan_resources(struct dma_chan *dchan)
> +{
> + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
> +
> + chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
> + dchan->device->dev,
> + sizeof(struct ls1x_dma_lli),
> + __alignof__(struct ls1x_dma_lli), 0);
> + if (!chan->desc_pool) {
> + dev_err(chan2dev(dchan),
> + "failed to alloc DMA descriptor pool!\n");

This can be dropped, allocators will warn you for the allocation
failures

> + return -ENOMEM;
> + }
> +
> + return 0;
> +}
> +
> +static void ls1x_dma_free_desc(struct virt_dma_desc *vdesc)
> +{
> + struct ls1x_dma_desc *desc = to_ls1x_dma_desc(vdesc);
> +
> + if (desc->nr_descs) {
> + unsigned int i = desc->nr_descs;
> + struct ls1x_dma_hwdesc *hwdesc;
> +
> + do {
> + hwdesc = &desc->hwdesc[--i];
> + dma_pool_free(desc->chan->desc_pool, hwdesc->lli,
> + hwdesc->phys);
> + } while (i);
> + }
> +
> + kfree(desc);
> +}
> +
> +static struct ls1x_dma_desc *ls1x_dma_alloc_desc(struct ls1x_dma_chan *chan,
> + int sg_len)

single line now :)

> +{
> + struct ls1x_dma_desc *desc;
> + struct dma_chan *dchan = &chan->vchan.chan;
> +
> + desc = kzalloc(struct_size(desc, hwdesc, sg_len), GFP_NOWAIT);
> + if (!desc)
> + dev_err(chan2dev(dchan), "failed to alloc DMA descriptor!\n");

this can be dropped too..

> +static struct dma_async_tx_descriptor *
> +ls1x_dma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
> + unsigned int sg_len,
> + enum dma_transfer_direction direction,
> + unsigned long flags, void *context)
> +{
> + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
> + struct dma_slave_config *cfg = &chan->cfg;
> + struct ls1x_dma_desc *desc;
> + struct scatterlist *sg;
> + unsigned int dev_addr, bus_width, cmd, i;
> +
> + if (!is_slave_direction(direction)) {
> + dev_err(chan2dev(dchan), "invalid DMA direction!\n");
> + return NULL;
> + }
> +
> + dev_dbg(chan2dev(dchan), "sg_len=%d, dir=%s, flags=0x%lx\n", sg_len,
> + direction == DMA_MEM_TO_DEV ? "to device" : "from device",
> + flags);
> +
> + switch (direction) {
> + case DMA_MEM_TO_DEV:
> + dev_addr = cfg->dst_addr;
> + bus_width = cfg->dst_addr_width;
> + cmd = LS1X_DMA_RAM2DEV | LS1X_DMA_INT;
> + break;
> + case DMA_DEV_TO_MEM:
> + dev_addr = cfg->src_addr;
> + bus_width = cfg->src_addr_width;
> + cmd = LS1X_DMA_INT;
> + break;
> + default:
> + dev_err(chan2dev(dchan),
> + "unsupported DMA transfer mode! %d\n", direction);
> + return NULL;

will this be ever executed?

> +static int ls1x_dma_slave_config(struct dma_chan *dchan,
> + struct dma_slave_config *config)
> +{
> + struct ls1x_dma_chan *chan = to_ls1x_dma_chan(dchan);
> +
> + if (!dchan)
> + return -EINVAL;

should this not be checked before you dereference this to get chan

> +static void ls1x_dma_trigger(struct ls1x_dma_chan *chan)
> +{
> + struct dma_chan *dchan = &chan->vchan.chan;
> + struct ls1x_dma_desc *desc;
> + struct virt_dma_desc *vdesc;
> + unsigned int val;
> +
> + vdesc = vchan_next_desc(&chan->vchan);
> + if (!vdesc) {
> + dev_warn(chan2dev(dchan), "No pending descriptor\n");

Hmm, I would not log that... this is called from
ls1x_dma_issue_pending() and which can be called from client driver but
previous completion would push and this can find empty queue so it can
happen quite frequently

> +static irqreturn_t ls1x_dma_irq_handler(int irq, void *data)
> +{
> + struct ls1x_dma_chan *chan = data;
> + struct dma_chan *dchan = &chan->vchan.chan;
> +
> + dev_dbg(chan2dev(dchan), "DMA IRQ %d on channel %d\n", irq, chan->id);
> + if (!chan->desc) {
> + dev_warn(chan2dev(dchan),
> + "DMA IRQ with no active descriptor on channel %d\n",
> + chan->id);

single line pls

> + return IRQ_NONE;
> + }
> +
> + spin_lock(&chan->vchan.lock);
> +
> + if (chan->desc->type == DMA_CYCLIC) {
> + vchan_cyclic_callback(&chan->desc->vdesc);
> + } else {
> + list_del(&chan->desc->vdesc.node);
> + vchan_cookie_complete(&chan->desc->vdesc);
> + chan->desc = NULL;
> + }

not submitting next txn, defeats the purpose of dma if we dont push txns
as fast as possible..

> +static struct platform_driver ls1x_dma_driver = {
> + .probe = ls1x_dma_probe,
> + .remove = ls1x_dma_remove,
> + .driver = {
> + .name = "ls1x-dma",
> + },

No device tree?

--
~Vinod

\
 
 \ /
  Last update: 2021-06-07 13:07    [W:0.058 / U:2.180 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site