1 From 028f915b20ec343dda88f1bcc99f07f6b428b4aa Mon Sep 17 00:00:00 2001
2 From: Matthew McClintock <mmcclint@codeaurora.org>
3 Date: Thu, 5 May 2016 10:07:11 -0500
4 Subject: [PATCH 13/69] spi: qup: allow mulitple DMA transactions per spi xfer
6 Much like the block mode changes, we are breaking up DMA transactions
7 into 64K chunks so we can reset the QUP engine.
9 Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
11 drivers/spi/spi-qup.c | 120 ++++++++++++++++++++++++++++++++++++--------------
12 1 file changed, 86 insertions(+), 34 deletions(-)
14 --- a/drivers/spi/spi-qup.c
15 +++ b/drivers/spi/spi-qup.c
16 @@ -566,6 +566,21 @@ static int spi_qup_io_config(struct spi_
20 +static unsigned int spi_qup_sgl_get_size(struct scatterlist *sgl, unsigned int nents)
22 + struct scatterlist *sg;
24 + unsigned int length = 0;
29 + for_each_sg(sgl, sg, nents, i)
30 + length += sg_dma_len(sg);
35 static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
36 unsigned long timeout)
38 @@ -573,53 +588,90 @@ unsigned long timeout)
39 struct spi_qup *qup = spi_master_get_devdata(master);
40 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
42 + struct scatterlist *tx_sgl, *rx_sgl;
44 - ret = spi_qup_io_config(spi, xfer);
48 - /* before issuing the descriptors, set the QUP to run */
49 - ret = spi_qup_set_state(qup, QUP_STATE_RUN);
51 - dev_warn(qup->dev, "cannot set RUN state\n");
57 - rx_done = spi_qup_dma_done;
60 - tx_done = spi_qup_dma_done;
64 - ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl,
65 - xfer->rx_sg.nents, DMA_DEV_TO_MEM,
66 - rx_done, &qup->done);
69 + rx_sgl = xfer->rx_sg.sgl;
70 + tx_sgl = xfer->tx_sg.sgl;
72 - dma_async_issue_pending(master->dma_rx);
75 + int rx_nents = 0, tx_nents = 0;
78 - ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl,
79 - xfer->tx_sg.nents, DMA_MEM_TO_DEV,
80 - tx_done, &qup->dma_tx_done);
82 + rx_nents = sg_nents_for_len(rx_sgl, SPI_MAX_XFER);
84 + rx_nents = sg_nents(rx_sgl);
86 + qup->n_words = spi_qup_sgl_get_size(rx_sgl, rx_nents) /
91 + tx_nents = sg_nents_for_len(tx_sgl, SPI_MAX_XFER);
93 + tx_nents = sg_nents(tx_sgl);
95 + qup->n_words = spi_qup_sgl_get_size(tx_sgl, tx_nents) /
100 + ret = spi_qup_io_config(spi, xfer);
104 - dma_async_issue_pending(master->dma_tx);
106 + /* before issuing the descriptors, set the QUP to run */
107 + ret = spi_qup_set_state(qup, QUP_STATE_RUN);
109 + dev_warn(qup->dev, "cannot set RUN state\n");
113 + if (!qup->qup_v1) {
115 + rx_done = spi_qup_dma_done;
119 + tx_done = spi_qup_dma_done;
124 + ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
125 + DMA_DEV_TO_MEM, rx_done,
130 + dma_async_issue_pending(master->dma_rx);
134 + ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
135 + DMA_MEM_TO_DEV, tx_done,
136 + &qup->dma_tx_done);
140 + dma_async_issue_pending(master->dma_tx);
143 + if (rx_sgl && !wait_for_completion_timeout(&qup->done, timeout)) {
144 + pr_emerg(" rx timed out");
148 + if (tx_sgl && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) {
149 + pr_emerg(" tx timed out\n");
153 - if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout))
155 + for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl));
156 + for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl));
158 - if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout))
160 + } while (rx_sgl || tx_sgl);
166 static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,