ce59d492e276a43708b346e9bf29da7cd19958a7
[openwrt/openwrt.git] / target / linux / ipq806x / patches-4.9 / 0016-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch
1 From 6b2bb8803f19116bad41a271f9035d4c853f4553 Mon Sep 17 00:00:00 2001
2 From: Matthew McClintock <mmcclint@codeaurora.org>
3 Date: Thu, 5 May 2016 10:07:11 -0500
4 Subject: [PATCH 16/37] spi: qup: allow mulitple DMA transactions per spi xfer
5
6 Much like the block mode changes, we are breaking up DMA transactions
7 into 64K chunks so we can reset the QUP engine.
8
9 Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
10 ---
11 drivers/spi/spi-qup.c | 120 +++++++++++++++++++++++++++++++++++--------------
12 1 file changed, 86 insertions(+), 34 deletions(-)
13
14 --- a/drivers/spi/spi-qup.c
15 +++ b/drivers/spi/spi-qup.c
16 @@ -566,6 +566,21 @@ static int spi_qup_io_config(struct spi_
17 return 0;
18 }
19
20 +static unsigned int spi_qup_sgl_get_size(struct scatterlist *sgl, unsigned int nents)
21 +{
22 + struct scatterlist *sg;
23 + int i;
24 + unsigned int length = 0;
25 +
26 + if (!nents)
27 + return 0;
28 +
29 + for_each_sg(sgl, sg, nents, i)
30 + length += sg_dma_len(sg);
31 +
32 + return length;
33 +}
34 +
35 static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
36 unsigned long timeout)
37 {
38 @@ -573,53 +588,90 @@ unsigned long timeout)
39 struct spi_qup *qup = spi_master_get_devdata(master);
40 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
41 int ret;
42 + struct scatterlist *tx_sgl, *rx_sgl;
43
44 - ret = spi_qup_io_config(spi, xfer);
45 - if (ret)
46 - return ret;
47 + rx_sgl = xfer->rx_sg.sgl;
48 + tx_sgl = xfer->tx_sg.sgl;
49
50 - /* before issuing the descriptors, set the QUP to run */
51 - ret = spi_qup_set_state(qup, QUP_STATE_RUN);
52 - if (ret) {
53 - dev_warn(qup->dev, "cannot set RUN state\n");
54 - return ret;
55 - }
56 + do {
57 + int rx_nents = 0, tx_nents = 0;
58
59 - if (!qup->qup_v1) {
60 - if (xfer->rx_buf)
61 - rx_done = spi_qup_dma_done;
62 + if (rx_sgl) {
63 + rx_nents = sg_nents_for_len(rx_sgl, SPI_MAX_XFER);
64 + if (rx_nents < 0)
65 + rx_nents = sg_nents(rx_sgl);
66
67 - if (xfer->tx_buf)
68 - tx_done = spi_qup_dma_done;
69 - }
70 + qup->n_words = spi_qup_sgl_get_size(rx_sgl, rx_nents) /
71 + qup->w_size;
72 + }
73
74 - if (xfer->rx_buf) {
75 - ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl,
76 - xfer->rx_sg.nents, DMA_DEV_TO_MEM,
77 - rx_done, &qup->done);
78 - if (ret)
79 - return ret;
80 + if (tx_sgl) {
81 + tx_nents = sg_nents_for_len(tx_sgl, SPI_MAX_XFER);
82 + if (tx_nents < 0)
83 + tx_nents = sg_nents(tx_sgl);
84
85 - dma_async_issue_pending(master->dma_rx);
86 - }
87 + qup->n_words = spi_qup_sgl_get_size(tx_sgl, tx_nents) /
88 + qup->w_size;
89 + }
90
91 - if (xfer->tx_buf) {
92 - ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl,
93 - xfer->tx_sg.nents, DMA_MEM_TO_DEV,
94 - tx_done, &qup->dma_tx_done);
95 +
96 + ret = spi_qup_io_config(spi, xfer);
97 if (ret)
98 return ret;
99
100 - dma_async_issue_pending(master->dma_tx);
101 - }
102 + /* before issuing the descriptors, set the QUP to run */
103 + ret = spi_qup_set_state(qup, QUP_STATE_RUN);
104 + if (ret) {
105 + dev_warn(qup->dev, "cannot set RUN state\n");
106 + return ret;
107 + }
108
109 - if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout))
110 - return -ETIMEDOUT;
111 + if (!qup->qup_v1) {
112 + if (rx_sgl) {
113 + rx_done = spi_qup_dma_done;
114 + }
115
116 - if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout))
117 - ret = -ETIMEDOUT;
118 + if (tx_sgl) {
119 + tx_done = spi_qup_dma_done;
120 + }
121 + }
122
123 - return ret;
124 + if (rx_sgl) {
125 + ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
126 + DMA_DEV_TO_MEM, rx_done,
127 + &qup->done);
128 + if (ret)
129 + return ret;
130 +
131 + dma_async_issue_pending(master->dma_rx);
132 + }
133 +
134 + if (tx_sgl) {
135 + ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
136 + DMA_MEM_TO_DEV, tx_done,
137 + &qup->dma_tx_done);
138 + if (ret)
139 + return ret;
140 +
141 + dma_async_issue_pending(master->dma_tx);
142 + }
143 +
144 + if (rx_sgl && !wait_for_completion_timeout(&qup->done, timeout)) {
145 + pr_emerg(" rx timed out");
146 + return -ETIMEDOUT;
147 + }
148 +
149 + if (tx_sgl && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) {
150 + pr_emerg(" tx timed out\n");
151 + return -ETIMEDOUT;
152 + }
153 +
154 + for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl));
155 + for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl));
156 +
157 + } while (rx_sgl || tx_sgl);
158 +
159 + return 0;
160 }
161
162 static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,