kernel: bump 4.9 to 4.9.77
[openwrt/openwrt.git] / target / linux / ipq806x / patches-4.9 / 0013-spi-qup-allow-mulitple-DMA-transactions-per-spi-xfer.patch
1 From 028f915b20ec343dda88f1bcc99f07f6b428b4aa Mon Sep 17 00:00:00 2001
2 From: Matthew McClintock <mmcclint@codeaurora.org>
3 Date: Thu, 5 May 2016 10:07:11 -0500
4 Subject: [PATCH 13/69] spi: qup: allow mulitple DMA transactions per spi xfer
5
6 Much like the block mode changes, we are breaking up DMA transactions
7 into 64K chunks so we can reset the QUP engine.
8
9 Signed-off-by: Matthew McClintock <mmcclint@codeaurora.org>
10 ---
11 drivers/spi/spi-qup.c | 120 ++++++++++++++++++++++++++++++++++++--------------
12 1 file changed, 86 insertions(+), 34 deletions(-)
13
14 --- a/drivers/spi/spi-qup.c
15 +++ b/drivers/spi/spi-qup.c
16 @@ -566,6 +566,21 @@ static int spi_qup_io_config(struct spi_
17 return 0;
18 }
19
20 +static unsigned int spi_qup_sgl_get_size(struct scatterlist *sgl, unsigned int nents)
21 +{
22 + struct scatterlist *sg;
23 + int i;
24 + unsigned int length = 0;
25 +
26 + if (!nents)
27 + return 0;
28 +
29 + for_each_sg(sgl, sg, nents, i)
30 + length += sg_dma_len(sg);
31 +
32 + return length;
33 +}
34 +
35 static int spi_qup_do_dma(struct spi_device *spi, struct spi_transfer *xfer,
36 unsigned long timeout)
37 {
38 @@ -573,53 +588,90 @@ unsigned long timeout)
39 struct spi_qup *qup = spi_master_get_devdata(master);
40 dma_async_tx_callback rx_done = NULL, tx_done = NULL;
41 int ret;
42 + struct scatterlist *tx_sgl, *rx_sgl;
43
44 - ret = spi_qup_io_config(spi, xfer);
45 - if (ret)
46 - return ret;
47 -
48 - /* before issuing the descriptors, set the QUP to run */
49 - ret = spi_qup_set_state(qup, QUP_STATE_RUN);
50 - if (ret) {
51 - dev_warn(qup->dev, "cannot set RUN state\n");
52 - return ret;
53 - }
54 -
55 - if (!qup->qup_v1) {
56 - if (xfer->rx_buf)
57 - rx_done = spi_qup_dma_done;
58 -
59 - if (xfer->tx_buf)
60 - tx_done = spi_qup_dma_done;
61 - }
62 -
63 - if (xfer->rx_buf) {
64 - ret = spi_qup_prep_sg(master, xfer->rx_sg.sgl,
65 - xfer->rx_sg.nents, DMA_DEV_TO_MEM,
66 - rx_done, &qup->done);
67 - if (ret)
68 - return ret;
69 + rx_sgl = xfer->rx_sg.sgl;
70 + tx_sgl = xfer->tx_sg.sgl;
71
72 - dma_async_issue_pending(master->dma_rx);
73 - }
74 + do {
75 + int rx_nents = 0, tx_nents = 0;
76
77 - if (xfer->tx_buf) {
78 - ret = spi_qup_prep_sg(master, xfer->tx_sg.sgl,
79 - xfer->tx_sg.nents, DMA_MEM_TO_DEV,
80 - tx_done, &qup->dma_tx_done);
81 + if (rx_sgl) {
82 + rx_nents = sg_nents_for_len(rx_sgl, SPI_MAX_XFER);
83 + if (rx_nents < 0)
84 + rx_nents = sg_nents(rx_sgl);
85 +
86 + qup->n_words = spi_qup_sgl_get_size(rx_sgl, rx_nents) /
87 + qup->w_size;
88 + }
89 +
90 + if (tx_sgl) {
91 + tx_nents = sg_nents_for_len(tx_sgl, SPI_MAX_XFER);
92 + if (tx_nents < 0)
93 + tx_nents = sg_nents(tx_sgl);
94 +
95 + qup->n_words = spi_qup_sgl_get_size(tx_sgl, tx_nents) /
96 + qup->w_size;
97 + }
98 +
99 +
100 + ret = spi_qup_io_config(spi, xfer);
101 if (ret)
102 return ret;
103
104 - dma_async_issue_pending(master->dma_tx);
105 - }
106 + /* before issuing the descriptors, set the QUP to run */
107 + ret = spi_qup_set_state(qup, QUP_STATE_RUN);
108 + if (ret) {
109 + dev_warn(qup->dev, "cannot set RUN state\n");
110 + return ret;
111 + }
112 +
113 + if (!qup->qup_v1) {
114 + if (rx_sgl) {
115 + rx_done = spi_qup_dma_done;
116 + }
117 +
118 + if (tx_sgl) {
119 + tx_done = spi_qup_dma_done;
120 + }
121 + }
122 +
123 + if (rx_sgl) {
124 + ret = spi_qup_prep_sg(master, rx_sgl, rx_nents,
125 + DMA_DEV_TO_MEM, rx_done,
126 + &qup->done);
127 + if (ret)
128 + return ret;
129 +
130 + dma_async_issue_pending(master->dma_rx);
131 + }
132 +
133 + if (tx_sgl) {
134 + ret = spi_qup_prep_sg(master, tx_sgl, tx_nents,
135 + DMA_MEM_TO_DEV, tx_done,
136 + &qup->dma_tx_done);
137 + if (ret)
138 + return ret;
139 +
140 + dma_async_issue_pending(master->dma_tx);
141 + }
142 +
143 + if (rx_sgl && !wait_for_completion_timeout(&qup->done, timeout)) {
144 + pr_emerg(" rx timed out");
145 + return -ETIMEDOUT;
146 + }
147 +
148 + if (tx_sgl && !wait_for_completion_timeout(&qup->dma_tx_done, timeout)) {
149 + pr_emerg(" tx timed out\n");
150 + return -ETIMEDOUT;
151 + }
152
153 - if (xfer->rx_buf && !wait_for_completion_timeout(&qup->done, timeout))
154 - return -ETIMEDOUT;
155 + for (; rx_sgl && rx_nents--; rx_sgl = sg_next(rx_sgl));
156 + for (; tx_sgl && tx_nents--; tx_sgl = sg_next(tx_sgl));
157
158 - if (xfer->tx_buf && !wait_for_completion_timeout(&qup->dma_tx_done, timeout))
159 - ret = -ETIMEDOUT;
160 + } while (rx_sgl || tx_sgl);
161
162 - return ret;
163 + return 0;
164 }
165
166 static int spi_qup_do_pio(struct spi_device *spi, struct spi_transfer *xfer,