lkml.org 
[lkml]   [2022]   [Aug]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 09/11] spi: dw: prepare the transfer routine for enhanced mode
On Tue, Aug 02, 2022 at 06:57:53PM +0100, Sudip Mukherjee wrote:
> The transfer routine of dual/quad/octal is similar to standard SPI mode
> except that we do not need to worry about CS being de-asserted and we
> will be writing the address to a single FIFO location.

Please redesign this patch to having the IRQ-based transfers. For
instance you can just create a new dw_spi_enh_write_then_read() method
which would initialize the IRQs, set up a custom
dw_spi_enh_transfer_handler() method as transfer_handler (or perhaps
re-use the already available dw_spi_transfer_handler() method?) and
initiate the transfer by writing the command and address data to the
Tx FIFO.

Feel free to create some preparation patches if it's needed to reach the
goal.

-Sergey

>
> Signed-off-by: Sudip Mukherjee <sudip.mukherjee@sifive.com>
> ---
> drivers/spi/spi-dw-core.c | 141 +++++++++++++++++++++++++++++++++-----
> 1 file changed, 125 insertions(+), 16 deletions(-)
>
> diff --git a/drivers/spi/spi-dw-core.c b/drivers/spi/spi-dw-core.c
> index 2564a2276572..d6afa75e7023 100644
> --- a/drivers/spi/spi-dw-core.c
> +++ b/drivers/spi/spi-dw-core.c
> @@ -712,6 +712,28 @@ static int dw_spi_wait_mem_op_done(struct dw_spi *dws)
> return 0;
> }
>
> +static void ext_transfer_delay(struct dw_spi *dws)
> +{
> + struct spi_delay delay;
> + unsigned long ns, us;
> + u32 nents;
> +
> + nents = dw_readl(dws, DW_SPI_TXFLR);
> + ns = NSEC_PER_SEC / dws->current_freq * nents;
> + ns *= dws->n_bytes * BITS_PER_BYTE;
> + if (ns <= NSEC_PER_USEC) {
> + delay.unit = SPI_DELAY_UNIT_NSECS;
> + delay.value = ns;
> + } else {
> + us = DIV_ROUND_UP(ns, NSEC_PER_USEC);
> + delay.unit = SPI_DELAY_UNIT_USECS;
> + delay.value = clamp_val(us, 0, USHRT_MAX);
> + }
> + /* wait until there is some space in TX FIFO */
> + while (!(dw_readl(dws, DW_SPI_SR) & DW_SPI_SR_TF_NOT_FULL))
> + spi_delay_exec(&delay, NULL);
> +}
> +
> static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
> {
> dw_spi_enable_chip(dws, 0);
> @@ -719,6 +741,82 @@ static void dw_spi_stop_mem_op(struct dw_spi *dws, struct spi_device *spi)
> dw_spi_enable_chip(dws, 1);
> }
>
> +static int enhanced_transfer(struct dw_spi *dws, struct spi_device *spi,
> + const struct spi_mem_op *op)
> +{
> + u32 max, txw = 0, rxw;
> + bool cs_done = false;
> + void *buf = dws->tx;
> + int ret;
> +
> + /* Send cmd as 32 bit value */
> + if (buf) {
> + txw = *(u32 *)(buf);
> + dw_write_io_reg(dws, DW_SPI_DR, txw);
> + buf += 4;
> + dws->tx_len--;
> + if (op->addr.nbytes) {
> + /*
> + * Send address as 32 bit value if address
> + * is present in the instruction.
> + */
> + txw = *(u32 *)(buf);
> + dw_write_io_reg(dws, DW_SPI_DR, txw);
> + buf += 4;
> + dws->tx_len--;
> + }
> + }
> +
> + do {
> + max = min_t(u32, dws->tx_len, dws->fifo_len -
> + dw_readl(dws, DW_SPI_TXFLR));
> + while (max--) {
> + if (buf) {
> + txw = *(u8 *)(buf);
> + buf += dws->n_bytes;
> + }
> + dw_write_io_reg(dws, DW_SPI_DR, txw);
> + --dws->tx_len;
> + }
> + /* Enable CS after filling up FIFO */
> + if (!cs_done) {
> + dw_spi_set_cs(spi, false);
> + cs_done = true;
> + }
> + ext_transfer_delay(dws);
> + if (!dws->tx_len && !dws->rx_len) {
> + /*
> + * We only need to wait for done if there is
> + * nothing to receive and there is nothing more
> + * to transmit. If we are receiving, then the
> + * wait cycles will make sure we wait.
> + */
> + ret = dw_spi_wait_mem_op_done(dws);
> + if (ret)
> + return ret;
> + }
> + } while (dws->tx_len);
> +
> + buf = dws->rx;
> + while (dws->rx_len) {
> + max = dw_spi_rx_max(dws);
> +
> + while (max--) {
> + rxw = dw_read_io_reg(dws, DW_SPI_DR);
> + if (buf) {
> + *(u8 *)(buf) = rxw;
> + buf += dws->n_bytes;
> + }
> + --dws->rx_len;
> + }
> +
> + ret = dw_spi_check_status(dws, true);
> + if (ret)
> + return ret;
> + }
> + return 0;
> +}
> +
> static void update_spi_ctrl0(struct dw_spi *dws, const struct spi_mem_op *op, bool enable)
> {
> u32 spi_ctrlr0;
> @@ -846,25 +944,36 @@ static int dw_spi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op)
> * manually restricting the SPI bus frequency using the
> * dws->max_mem_freq parameter.
> */
> - local_irq_save(flags);
> - preempt_disable();
> + if (!enhanced_spi) {
> + local_irq_save(flags);
> + preempt_disable();
>
> - ret = dw_spi_write_then_read(dws, mem->spi);
> + ret = dw_spi_write_then_read(dws, mem->spi);
>
> - local_irq_restore(flags);
> - preempt_enable();
> + local_irq_restore(flags);
> + preempt_enable();
>
> - /*
> - * Wait for the operation being finished and check the controller
> - * status only if there hasn't been any run-time error detected. In the
> - * former case it's just pointless. In the later one to prevent an
> - * additional error message printing since any hw error flag being set
> - * would be due to an error detected on the data transfer.
> - */
> - if (!ret) {
> - ret = dw_spi_wait_mem_op_done(dws);
> - if (!ret)
> - ret = dw_spi_check_status(dws, true);
> + /*
> + * Wait for the operation being finished and check the
> + * controller status only if there hasn't been any
> + * run-time error detected. In the former case it's
> + * just pointless. In the later one to prevent an
> + * additional error message printing since any hw error
> + * flag being set would be due to an error detected on
> + * the data transfer.
> + */
> + if (!ret) {
> + ret = dw_spi_wait_mem_op_done(dws);
> + if (!ret)
> + ret = dw_spi_check_status(dws, true);
> + }
> + } else {
> + /*
> + * We donot need to disable IRQs as clock stretching will
> + * be enabled in enhanced mode which will prevent CS
> + * from being de-assert.
> + */
> + ret = enhanced_transfer(dws, mem->spi, op);
> }
>
> dw_spi_stop_mem_op(dws, mem->spi);
> --
> 2.30.2
>

\
 
 \ /
  Last update: 2022-08-27 01:21    [W:0.176 / U:0.128 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site