lkml.org 
[lkml]   [2020]   [Nov]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v5 14/16] dmaengine: dw-axi-dmac: Add Intel KeemBay AxiDMA BYTE and HALFWORD registers
    Date
    Add support for Intel KeemBay AxiDMA BYTE and HALFWORD registers
    programming.

    Intel KeemBay AxiDMA supports data transfer between device to memory
    and memory to device operations.

    This code is needed by I2C, I3C, I2S, SPI and UART which uses FIFO
    size of 8bits and 16bits to perform memory to device data transfer
    operation. 0-padding functionality is provided to avoid
    pre-processing of data on CPU.

    Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
    Signed-off-by: Sia Jee Heng <jee.heng.sia@intel.com>
    ---
    .../dma/dw-axi-dmac/dw-axi-dmac-platform.c | 44 ++++++++++++++++---
    1 file changed, 39 insertions(+), 5 deletions(-)

    diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
    index 440f9a8b25da..dc7ddf98fd04 100644
    --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
    +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
    @@ -312,7 +312,7 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
    struct axi_dma_desc *first)
    {
    u32 priority = chan->chip->dw->hdata->priority[chan->id];
    - u32 reg, irq_mask;
    + u32 reg, irq_mask, reg_width, offset, val;
    u8 lms = 0; /* Select AXI0 master for LLI fetching */

    if (unlikely(axi_chan_is_hw_enable(chan))) {
    @@ -334,6 +334,25 @@ static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
    DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
    switch (chan->direction) {
    case DMA_MEM_TO_DEV:
    + if (chan->chip->apb_regs) {
    + reg_width = __ffs(chan->config.dst_addr_width);
    + /*
    + * Configure Byte and Halfword register
    + * for MEM_TO_DEV only.
    + */
    + if (reg_width == DWAXIDMAC_TRANS_WIDTH_16) {
    + offset = DMAC_APB_HALFWORD_WR_CH_EN;
    + val = ioread32(chan->chip->apb_regs + offset);
    + val |= BIT(chan->id);
    + iowrite32(val, chan->chip->apb_regs + offset);
    + } else if (reg_width == DWAXIDMAC_TRANS_WIDTH_8) {
    + offset = DMAC_APB_BYTE_WR_CH_EN;
    + val = ioread32(chan->chip->apb_regs + offset);
    + val |= BIT(chan->id);
    + iowrite32(val, chan->chip->apb_regs + offset);
    + }
    + }
    +
    reg |= (chan->config.device_fc ?
    DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
    DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC)
    @@ -994,8 +1013,9 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
    {
    struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
    u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
    + u32 reg_width = __ffs(chan->config.dst_addr_width);
    unsigned long flags;
    - u32 val;
    + u32 offset, val;
    int ret;
    LIST_HEAD(head);

    @@ -1007,9 +1027,23 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
    dev_warn(dchan2dev(dchan),
    "%s failed to stop\n", axi_chan_name(chan));

    - if (chan->direction != DMA_MEM_TO_MEM)
    - dw_axi_dma_set_hw_channel(chan->chip,
    - chan->hw_handshake_num, false);
    + if (chan->direction != DMA_MEM_TO_MEM) {
    + ret = dw_axi_dma_set_hw_channel(chan->chip,
    + chan->hw_handshake_num, false);
    + if (ret == 0 && chan->direction == DMA_MEM_TO_DEV) {
    + if (reg_width == DWAXIDMAC_TRANS_WIDTH_8) {
    + offset = DMAC_APB_BYTE_WR_CH_EN;
    + val = ioread32(chan->chip->apb_regs + offset);
    + val &= ~BIT(chan->id);
    + iowrite32(val, chan->chip->apb_regs + offset);
    + } else if (reg_width == DWAXIDMAC_TRANS_WIDTH_16) {
    + offset = DMAC_APB_HALFWORD_WR_CH_EN;
    + val = ioread32(chan->chip->apb_regs + offset);
    + val &= ~BIT(chan->id);
    + iowrite32(val, chan->chip->apb_regs + offset);
    + }
    + }
    + }

    spin_lock_irqsave(&chan->vc.lock, flags);

    --
    2.18.0
    \
     
     \ /
      Last update: 2020-11-23 04:10    [W:5.020 / U:0.536 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site