d9c843a132ef900603bd6776d087ab578f6caa98
[openwrt/openwrt.git] / target / linux / sunxi / patches-3.13 / 151-3-stmmac-fixup-3.patch
1 From 09f8d6960b69e474eef9d2aebdd0d536d00af0c8 Mon Sep 17 00:00:00 2001
2 From: Srinivas Kandagatla <srinivas.kandagatla@st.com>
3 Date: Thu, 16 Jan 2014 10:52:06 +0000
4 Subject: [PATCH] net: stmmac: move dma allocation to new function
5
6 This patch moves dma resource allocation to a new function
7 alloc_dma_desc_resources, the reason for moving this to a new function
8 is to keep the memory allocations in a separate function. One more reason
9 it to get suspend and hibernation cases working without releasing and
10 allocating these resources during suspend-resume and freeze-restore
11 cases.
12
13 Signed-off-by: Srinivas Kandagatla <srinivas.kandagatla@st.com>
14 Acked-by: Giuseppe Cavallaro <peppe.cavallaro@st.com>
15 Signed-off-by: David S. Miller <davem@davemloft.net>
16 ---
17 drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 169 +++++++++++-----------
18 1 file changed, 85 insertions(+), 84 deletions(-)
19
20 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
21 index 15192c0..532f2b4 100644
22 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
23 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
24 @@ -996,66 +996,6 @@ static int init_dma_desc_rings(struct net_device *dev)
25 pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
26 txsize, rxsize, bfsize);
27
28 - if (priv->extend_desc) {
29 - priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
30 - sizeof(struct
31 - dma_extended_desc),
32 - &priv->dma_rx_phy,
33 - GFP_KERNEL);
34 - if (!priv->dma_erx)
35 - goto err_dma;
36 -
37 - priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
38 - sizeof(struct
39 - dma_extended_desc),
40 - &priv->dma_tx_phy,
41 - GFP_KERNEL);
42 - if (!priv->dma_etx) {
43 - dma_free_coherent(priv->device, priv->dma_rx_size *
44 - sizeof(struct dma_extended_desc),
45 - priv->dma_erx, priv->dma_rx_phy);
46 - goto err_dma;
47 - }
48 - } else {
49 - priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
50 - sizeof(struct dma_desc),
51 - &priv->dma_rx_phy,
52 - GFP_KERNEL);
53 - if (!priv->dma_rx)
54 - goto err_dma;
55 -
56 - priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
57 - sizeof(struct dma_desc),
58 - &priv->dma_tx_phy,
59 - GFP_KERNEL);
60 - if (!priv->dma_tx) {
61 - dma_free_coherent(priv->device, priv->dma_rx_size *
62 - sizeof(struct dma_desc),
63 - priv->dma_rx, priv->dma_rx_phy);
64 - goto err_dma;
65 - }
66 - }
67 -
68 - priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
69 - GFP_KERNEL);
70 - if (!priv->rx_skbuff_dma)
71 - goto err_rx_skbuff_dma;
72 -
73 - priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
74 - GFP_KERNEL);
75 - if (!priv->rx_skbuff)
76 - goto err_rx_skbuff;
77 -
78 - priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
79 - GFP_KERNEL);
80 - if (!priv->tx_skbuff_dma)
81 - goto err_tx_skbuff_dma;
82 -
83 - priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
84 - GFP_KERNEL);
85 - if (!priv->tx_skbuff)
86 - goto err_tx_skbuff;
87 -
88 if (netif_msg_probe(priv)) {
89 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
90 (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
91 @@ -1123,30 +1063,6 @@ static int init_dma_desc_rings(struct net_device *dev)
92 err_init_rx_buffers:
93 while (--i >= 0)
94 stmmac_free_rx_buffers(priv, i);
95 - kfree(priv->tx_skbuff);
96 -err_tx_skbuff:
97 - kfree(priv->tx_skbuff_dma);
98 -err_tx_skbuff_dma:
99 - kfree(priv->rx_skbuff);
100 -err_rx_skbuff:
101 - kfree(priv->rx_skbuff_dma);
102 -err_rx_skbuff_dma:
103 - if (priv->extend_desc) {
104 - dma_free_coherent(priv->device, priv->dma_tx_size *
105 - sizeof(struct dma_extended_desc),
106 - priv->dma_etx, priv->dma_tx_phy);
107 - dma_free_coherent(priv->device, priv->dma_rx_size *
108 - sizeof(struct dma_extended_desc),
109 - priv->dma_erx, priv->dma_rx_phy);
110 - } else {
111 - dma_free_coherent(priv->device,
112 - priv->dma_tx_size * sizeof(struct dma_desc),
113 - priv->dma_tx, priv->dma_tx_phy);
114 - dma_free_coherent(priv->device,
115 - priv->dma_rx_size * sizeof(struct dma_desc),
116 - priv->dma_rx, priv->dma_rx_phy);
117 - }
118 -err_dma:
119 return ret;
120 }
121
122 @@ -1182,6 +1098,85 @@ static void dma_free_tx_skbufs(struct stmmac_priv *priv)
123 }
124 }
125
126 +static int alloc_dma_desc_resources(struct stmmac_priv *priv)
127 +{
128 + unsigned int txsize = priv->dma_tx_size;
129 + unsigned int rxsize = priv->dma_rx_size;
130 + int ret = -ENOMEM;
131 +
132 + priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
133 + GFP_KERNEL);
134 + if (!priv->rx_skbuff_dma)
135 + return -ENOMEM;
136 +
137 + priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
138 + GFP_KERNEL);
139 + if (!priv->rx_skbuff)
140 + goto err_rx_skbuff;
141 +
142 + priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
143 + GFP_KERNEL);
144 + if (!priv->tx_skbuff_dma)
145 + goto err_tx_skbuff_dma;
146 +
147 + priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
148 + GFP_KERNEL);
149 + if (!priv->tx_skbuff)
150 + goto err_tx_skbuff;
151 +
152 + if (priv->extend_desc) {
153 + priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
154 + sizeof(struct
155 + dma_extended_desc),
156 + &priv->dma_rx_phy,
157 + GFP_KERNEL);
158 + if (!priv->dma_erx)
159 + goto err_dma;
160 +
161 + priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
162 + sizeof(struct
163 + dma_extended_desc),
164 + &priv->dma_tx_phy,
165 + GFP_KERNEL);
166 + if (!priv->dma_etx) {
167 + dma_free_coherent(priv->device, priv->dma_rx_size *
168 + sizeof(struct dma_extended_desc),
169 + priv->dma_erx, priv->dma_rx_phy);
170 + goto err_dma;
171 + }
172 + } else {
173 + priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
174 + sizeof(struct dma_desc),
175 + &priv->dma_rx_phy,
176 + GFP_KERNEL);
177 + if (!priv->dma_rx)
178 + goto err_dma;
179 +
180 + priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
181 + sizeof(struct dma_desc),
182 + &priv->dma_tx_phy,
183 + GFP_KERNEL);
184 + if (!priv->dma_tx) {
185 + dma_free_coherent(priv->device, priv->dma_rx_size *
186 + sizeof(struct dma_desc),
187 + priv->dma_rx, priv->dma_rx_phy);
188 + goto err_dma;
189 + }
190 + }
191 +
192 + return 0;
193 +
194 +err_dma:
195 + kfree(priv->tx_skbuff);
196 +err_tx_skbuff:
197 + kfree(priv->tx_skbuff_dma);
198 +err_tx_skbuff_dma:
199 + kfree(priv->rx_skbuff);
200 +err_rx_skbuff:
201 + kfree(priv->rx_skbuff_dma);
202 + return ret;
203 +}
204 +
205 static void free_dma_desc_resources(struct stmmac_priv *priv)
206 {
207 /* Release the DMA TX/RX socket buffers */
208 @@ -1623,6 +1618,12 @@ static int stmmac_open(struct net_device *dev)
209 priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
210 priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
211
212 + alloc_dma_desc_resources(priv);
213 + if (ret < 0) {
214 + pr_err("%s: DMA descriptors allocation failed\n", __func__);
215 + goto dma_desc_error;
216 + }
217 +
218 ret = init_dma_desc_rings(dev);
219 if (ret < 0) {
220 pr_err("%s: DMA descriptors initialization failed\n", __func__);
221 --
222 1.8.5.5
223