kernel: bump 4.9 to 4.9.96
[openwrt/openwrt.git] / target / linux / sunxi / patches-4.9 / 0050-stmmac-form-4-10.patch
1 --- a/Documentation/devicetree/bindings/net/stmmac.txt
2 +++ b/Documentation/devicetree/bindings/net/stmmac.txt
3 @@ -1,7 +1,7 @@
4 * STMicroelectronics 10/100/1000 Ethernet driver (GMAC)
5
6 Required properties:
7 -- compatible: Should be "snps,dwmac-<ip_version>" "snps,dwmac"
8 +- compatible: Should be "snps,dwmac-<ip_version>", "snps,dwmac"
9 For backwards compatibility: "st,spear600-gmac" is also supported.
10 - reg: Address and length of the register set for the device
11 - interrupt-parent: Should be the phandle for the interrupt controller
12 @@ -34,7 +34,13 @@ Optional properties:
13 platforms.
14 - tx-fifo-depth: See ethernet.txt file in the same directory
15 - rx-fifo-depth: See ethernet.txt file in the same directory
16 -- snps,pbl Programmable Burst Length
17 +- snps,pbl Programmable Burst Length (tx and rx)
18 +- snps,txpbl Tx Programmable Burst Length. Only for GMAC and newer.
19 + If set, DMA tx will use this value rather than snps,pbl.
20 +- snps,rxpbl Rx Programmable Burst Length. Only for GMAC and newer.
21 + If set, DMA rx will use this value rather than snps,pbl.
22 +- snps,no-pbl-x8 Don't multiply the pbl/txpbl/rxpbl values by 8.
23 + For core rev < 3.50, don't multiply the values by 4.
24 - snps,aal Address-Aligned Beats
25 - snps,fixed-burst Program the DMA to use the fixed burst mode
26 - snps,mixed-burst Program the DMA to use the mixed burst mode
27 @@ -50,6 +56,8 @@ Optional properties:
28 - snps,ps-speed: port selection speed that can be passed to the core when
29 PCS is supported. For example, this is used in case of SGMII
30 and MAC2MAC connection.
31 +- snps,tso: this enables the TSO feature otherwise it will be managed by
32 + MAC HW capability register. Only for GMAC4 and newer.
33 - AXI BUS Mode parameters: below the list of all the parameters to program the
34 AXI register inside the DMA module:
35 - snps,lpi_en: enable Low Power Interface
36 @@ -62,8 +70,6 @@ Optional properties:
37 - snps,fb: fixed-burst
38 - snps,mb: mixed-burst
39 - snps,rb: rebuild INCRx Burst
40 - - snps,tso: this enables the TSO feature otherwise it will be managed by
41 - MAC HW capability register.
42 - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
43
44 Examples:
45 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
46 +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
47 @@ -69,6 +69,17 @@ config DWMAC_MESON
48 the stmmac device driver. This driver is used for Meson6,
49 Meson8, Meson8b and GXBB SoCs.
50
51 +config DWMAC_OXNAS
52 + tristate "Oxford Semiconductor OXNAS dwmac support"
53 + default ARCH_OXNAS
54 + depends on OF && COMMON_CLK && (ARCH_OXNAS || COMPILE_TEST)
55 + select MFD_SYSCON
56 + help
57 + Support for Ethernet controller on Oxford Semiconductor OXNAS SoCs.
58 +
59 + This selects the Oxford Semiconductor OXNASSoC glue layer support for
60 + the stmmac device driver. This driver is used for OX820.
61 +
62 config DWMAC_ROCKCHIP
63 tristate "Rockchip dwmac support"
64 default ARCH_ROCKCHIP
65 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile
66 +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
67 @@ -10,6 +10,7 @@ obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-
68 obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
69 obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
70 obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
71 +obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
72 obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
73 obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
74 obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
75 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
76 +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
77 @@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, str
78 unsigned int entry = priv->cur_tx;
79 struct dma_desc *desc = priv->dma_tx + entry;
80 unsigned int nopaged_len = skb_headlen(skb);
81 - unsigned int bmax;
82 + unsigned int bmax, des2;
83 unsigned int i = 1, len;
84
85 if (priv->plat->enh_desc)
86 @@ -44,11 +44,12 @@ static int stmmac_jumbo_frm(void *p, str
87
88 len = nopaged_len - bmax;
89
90 - desc->des2 = dma_map_single(priv->device, skb->data,
91 - bmax, DMA_TO_DEVICE);
92 - if (dma_mapping_error(priv->device, desc->des2))
93 + des2 = dma_map_single(priv->device, skb->data,
94 + bmax, DMA_TO_DEVICE);
95 + desc->des2 = cpu_to_le32(des2);
96 + if (dma_mapping_error(priv->device, des2))
97 return -1;
98 - priv->tx_skbuff_dma[entry].buf = desc->des2;
99 + priv->tx_skbuff_dma[entry].buf = des2;
100 priv->tx_skbuff_dma[entry].len = bmax;
101 /* do not close the descriptor and do not set own bit */
102 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
103 @@ -60,12 +61,13 @@ static int stmmac_jumbo_frm(void *p, str
104 desc = priv->dma_tx + entry;
105
106 if (len > bmax) {
107 - desc->des2 = dma_map_single(priv->device,
108 - (skb->data + bmax * i),
109 - bmax, DMA_TO_DEVICE);
110 - if (dma_mapping_error(priv->device, desc->des2))
111 + des2 = dma_map_single(priv->device,
112 + (skb->data + bmax * i),
113 + bmax, DMA_TO_DEVICE);
114 + desc->des2 = cpu_to_le32(des2);
115 + if (dma_mapping_error(priv->device, des2))
116 return -1;
117 - priv->tx_skbuff_dma[entry].buf = desc->des2;
118 + priv->tx_skbuff_dma[entry].buf = des2;
119 priv->tx_skbuff_dma[entry].len = bmax;
120 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
121 STMMAC_CHAIN_MODE, 1,
122 @@ -73,12 +75,13 @@ static int stmmac_jumbo_frm(void *p, str
123 len -= bmax;
124 i++;
125 } else {
126 - desc->des2 = dma_map_single(priv->device,
127 - (skb->data + bmax * i), len,
128 - DMA_TO_DEVICE);
129 - if (dma_mapping_error(priv->device, desc->des2))
130 + des2 = dma_map_single(priv->device,
131 + (skb->data + bmax * i), len,
132 + DMA_TO_DEVICE);
133 + desc->des2 = cpu_to_le32(des2);
134 + if (dma_mapping_error(priv->device, des2))
135 return -1;
136 - priv->tx_skbuff_dma[entry].buf = desc->des2;
137 + priv->tx_skbuff_dma[entry].buf = des2;
138 priv->tx_skbuff_dma[entry].len = len;
139 /* last descriptor can be set now */
140 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
141 @@ -119,19 +122,19 @@ static void stmmac_init_dma_chain(void *
142 struct dma_extended_desc *p = (struct dma_extended_desc *)des;
143 for (i = 0; i < (size - 1); i++) {
144 dma_phy += sizeof(struct dma_extended_desc);
145 - p->basic.des3 = (unsigned int)dma_phy;
146 + p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
147 p++;
148 }
149 - p->basic.des3 = (unsigned int)phy_addr;
150 + p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
151
152 } else {
153 struct dma_desc *p = (struct dma_desc *)des;
154 for (i = 0; i < (size - 1); i++) {
155 dma_phy += sizeof(struct dma_desc);
156 - p->des3 = (unsigned int)dma_phy;
157 + p->des3 = cpu_to_le32((unsigned int)dma_phy);
158 p++;
159 }
160 - p->des3 = (unsigned int)phy_addr;
161 + p->des3 = cpu_to_le32((unsigned int)phy_addr);
162 }
163 }
164
165 @@ -144,10 +147,10 @@ static void stmmac_refill_desc3(void *pr
166 * 1588-2002 time stamping is enabled, hence reinitialize it
167 * to keep explicit chaining in the descriptor.
168 */
169 - p->des3 = (unsigned int)(priv->dma_rx_phy +
170 - (((priv->dirty_rx) + 1) %
171 - DMA_RX_SIZE) *
172 - sizeof(struct dma_desc));
173 + p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
174 + (((priv->dirty_rx) + 1) %
175 + DMA_RX_SIZE) *
176 + sizeof(struct dma_desc)));
177 }
178
179 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
180 @@ -161,9 +164,9 @@ static void stmmac_clean_desc3(void *pri
181 * 1588-2002 time stamping is enabled, hence reinitialize it
182 * to keep explicit chaining in the descriptor.
183 */
184 - p->des3 = (unsigned int)((priv->dma_tx_phy +
185 - ((priv->dirty_tx + 1) % DMA_TX_SIZE))
186 - * sizeof(struct dma_desc));
187 + p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
188 + ((priv->dirty_tx + 1) % DMA_TX_SIZE))
189 + * sizeof(struct dma_desc)));
190 }
191
192 const struct stmmac_mode_ops chain_mode_ops = {
193 --- a/drivers/net/ethernet/stmicro/stmmac/common.h
194 +++ b/drivers/net/ethernet/stmicro/stmmac/common.h
195 @@ -44,6 +44,7 @@
196 #define DWMAC_CORE_4_00 0x40
197 #define STMMAC_CHAN0 0 /* Always supported and default for all chips */
198
199 +/* These need to be power of two, and >= 4 */
200 #define DMA_TX_SIZE 512
201 #define DMA_RX_SIZE 512
202 #define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
203 @@ -411,8 +412,8 @@ extern const struct stmmac_desc_ops ndes
204 struct stmmac_dma_ops {
205 /* DMA core initialization */
206 int (*reset)(void __iomem *ioaddr);
207 - void (*init)(void __iomem *ioaddr, int pbl, int fb, int mb,
208 - int aal, u32 dma_tx, u32 dma_rx, int atds);
209 + void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
210 + u32 dma_tx, u32 dma_rx, int atds);
211 /* Configure the AXI Bus Mode Register */
212 void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
213 /* Dump DMA registers */
214 @@ -506,6 +507,12 @@ struct mac_link {
215 struct mii_regs {
216 unsigned int addr; /* MII Address */
217 unsigned int data; /* MII Data */
218 + unsigned int addr_shift; /* MII address shift */
219 + unsigned int reg_shift; /* MII reg shift */
220 + unsigned int addr_mask; /* MII address mask */
221 + unsigned int reg_mask; /* MII reg mask */
222 + unsigned int clk_csr_shift;
223 + unsigned int clk_csr_mask;
224 };
225
226 /* Helpers to manage the descriptors for chain and ring modes */
227 --- a/drivers/net/ethernet/stmicro/stmmac/descs.h
228 +++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
229 @@ -87,7 +87,7 @@
230 #define TDES0_ERROR_SUMMARY BIT(15)
231 #define TDES0_IP_HEADER_ERROR BIT(16)
232 #define TDES0_TIME_STAMP_STATUS BIT(17)
233 -#define TDES0_OWN BIT(31)
234 +#define TDES0_OWN ((u32)BIT(31)) /* silence sparse */
235 /* TDES1 */
236 #define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
237 #define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
238 @@ -130,7 +130,7 @@
239 #define ETDES0_FIRST_SEGMENT BIT(28)
240 #define ETDES0_LAST_SEGMENT BIT(29)
241 #define ETDES0_INTERRUPT BIT(30)
242 -#define ETDES0_OWN BIT(31)
243 +#define ETDES0_OWN ((u32)BIT(31)) /* silence sparse */
244 /* TDES1 */
245 #define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
246 #define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
247 @@ -170,19 +170,19 @@
248
249 /* Basic descriptor structure for normal and alternate descriptors */
250 struct dma_desc {
251 - unsigned int des0;
252 - unsigned int des1;
253 - unsigned int des2;
254 - unsigned int des3;
255 + __le32 des0;
256 + __le32 des1;
257 + __le32 des2;
258 + __le32 des3;
259 };
260
261 /* Extended descriptor structure (e.g. >= databook 3.50a) */
262 struct dma_extended_desc {
263 struct dma_desc basic; /* Basic descriptors */
264 - unsigned int des4; /* Extended Status */
265 - unsigned int des5; /* Reserved */
266 - unsigned int des6; /* Tx/Rx Timestamp Low */
267 - unsigned int des7; /* Tx/Rx Timestamp High */
268 + __le32 des4; /* Extended Status */
269 + __le32 des5; /* Reserved */
270 + __le32 des6; /* Tx/Rx Timestamp Low */
271 + __le32 des7; /* Tx/Rx Timestamp High */
272 };
273
274 /* Transmit checksum insertion control */
275 --- a/drivers/net/ethernet/stmicro/stmmac/descs_com.h
276 +++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
277 @@ -35,47 +35,50 @@
278 /* Enhanced descriptors */
279 static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end)
280 {
281 - p->des1 |= ((BUF_SIZE_8KiB - 1) << ERDES1_BUFFER2_SIZE_SHIFT)
282 - & ERDES1_BUFFER2_SIZE_MASK;
283 + p->des1 |= cpu_to_le32(((BUF_SIZE_8KiB - 1)
284 + << ERDES1_BUFFER2_SIZE_SHIFT)
285 + & ERDES1_BUFFER2_SIZE_MASK);
286
287 if (end)
288 - p->des1 |= ERDES1_END_RING;
289 + p->des1 |= cpu_to_le32(ERDES1_END_RING);
290 }
291
292 static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
293 {
294 if (end)
295 - p->des0 |= ETDES0_END_RING;
296 + p->des0 |= cpu_to_le32(ETDES0_END_RING);
297 else
298 - p->des0 &= ~ETDES0_END_RING;
299 + p->des0 &= cpu_to_le32(~ETDES0_END_RING);
300 }
301
302 static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
303 {
304 if (unlikely(len > BUF_SIZE_4KiB)) {
305 - p->des1 |= (((len - BUF_SIZE_4KiB) << ETDES1_BUFFER2_SIZE_SHIFT)
306 + p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
307 + << ETDES1_BUFFER2_SIZE_SHIFT)
308 & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
309 - & ETDES1_BUFFER1_SIZE_MASK);
310 + & ETDES1_BUFFER1_SIZE_MASK));
311 } else
312 - p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
313 + p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
314 }
315
316 /* Normal descriptors */
317 static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end)
318 {
319 - p->des1 |= ((BUF_SIZE_2KiB - 1) << RDES1_BUFFER2_SIZE_SHIFT)
320 - & RDES1_BUFFER2_SIZE_MASK;
321 + p->des1 |= cpu_to_le32(((BUF_SIZE_2KiB - 1)
322 + << RDES1_BUFFER2_SIZE_SHIFT)
323 + & RDES1_BUFFER2_SIZE_MASK);
324
325 if (end)
326 - p->des1 |= RDES1_END_RING;
327 + p->des1 |= cpu_to_le32(RDES1_END_RING);
328 }
329
330 static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
331 {
332 if (end)
333 - p->des1 |= TDES1_END_RING;
334 + p->des1 |= cpu_to_le32(TDES1_END_RING);
335 else
336 - p->des1 &= ~TDES1_END_RING;
337 + p->des1 &= cpu_to_le32(~TDES1_END_RING);
338 }
339
340 static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
341 @@ -83,10 +86,11 @@ static inline void norm_set_tx_desc_len_
342 if (unlikely(len > BUF_SIZE_2KiB)) {
343 unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
344 & TDES1_BUFFER1_SIZE_MASK;
345 - p->des1 |= ((((len - buffer1) << TDES1_BUFFER2_SIZE_SHIFT)
346 - & TDES1_BUFFER2_SIZE_MASK) | buffer1);
347 + p->des1 |= cpu_to_le32((((len - buffer1)
348 + << TDES1_BUFFER2_SIZE_SHIFT)
349 + & TDES1_BUFFER2_SIZE_MASK) | buffer1);
350 } else
351 - p->des1 |= (len & TDES1_BUFFER1_SIZE_MASK);
352 + p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
353 }
354
355 /* Specific functions used for Chain mode */
356 @@ -94,32 +98,32 @@ static inline void norm_set_tx_desc_len_
357 /* Enhanced descriptors */
358 static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
359 {
360 - p->des1 |= ERDES1_SECOND_ADDRESS_CHAINED;
361 + p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED);
362 }
363
364 static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
365 {
366 - p->des0 |= ETDES0_SECOND_ADDRESS_CHAINED;
367 + p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED);
368 }
369
370 static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
371 {
372 - p->des1 |= (len & ETDES1_BUFFER1_SIZE_MASK);
373 + p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK);
374 }
375
376 /* Normal descriptors */
377 static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
378 {
379 - p->des1 |= RDES1_SECOND_ADDRESS_CHAINED;
380 + p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED);
381 }
382
383 static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
384 {
385 - p->des1 |= TDES1_SECOND_ADDRESS_CHAINED;
386 + p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED);
387 }
388
389 static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
390 {
391 - p->des1 |= len & TDES1_BUFFER1_SIZE_MASK;
392 + p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK);
393 }
394 #endif /* __DESC_COM_H__ */
395 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
396 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
397 @@ -71,9 +71,12 @@ err_remove_config_dt:
398
399 static const struct of_device_id dwmac_generic_match[] = {
400 { .compatible = "st,spear600-gmac"},
401 + { .compatible = "snps,dwmac-3.50a"},
402 { .compatible = "snps,dwmac-3.610"},
403 { .compatible = "snps,dwmac-3.70a"},
404 { .compatible = "snps,dwmac-3.710"},
405 + { .compatible = "snps,dwmac-4.00"},
406 + { .compatible = "snps,dwmac-4.10a"},
407 { .compatible = "snps,dwmac"},
408 { }
409 };
410 --- /dev/null
411 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
412 @@ -0,0 +1,194 @@
413 +/*
414 + * Oxford Semiconductor OXNAS DWMAC glue layer
415 + *
416 + * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
417 + * Copyright (C) 2014 Daniel Golle <daniel@makrotopia.org>
418 + * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
419 + * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
420 + *
421 + * This program is free software; you can redistribute it and/or modify
422 + * it under the terms of the GNU General Public License version 2 as
423 + * published by the Free Software Foundation.
424 + *
425 + * You should have received a copy of the GNU General Public License
426 + * along with this program. If not, see <http://www.gnu.org/licenses/>.
427 + */
428 +
429 +#include <linux/device.h>
430 +#include <linux/io.h>
431 +#include <linux/module.h>
432 +#include <linux/of.h>
433 +#include <linux/platform_device.h>
434 +#include <linux/regmap.h>
435 +#include <linux/mfd/syscon.h>
436 +#include <linux/stmmac.h>
437 +
438 +#include "stmmac_platform.h"
439 +
440 +/* System Control regmap offsets */
441 +#define OXNAS_DWMAC_CTRL_REGOFFSET 0x78
442 +#define OXNAS_DWMAC_DELAY_REGOFFSET 0x100
443 +
444 +/* Control Register */
445 +#define DWMAC_CKEN_RX_IN 14
446 +#define DWMAC_CKEN_RXN_OUT 13
447 +#define DWMAC_CKEN_RX_OUT 12
448 +#define DWMAC_CKEN_TX_IN 10
449 +#define DWMAC_CKEN_TXN_OUT 9
450 +#define DWMAC_CKEN_TX_OUT 8
451 +#define DWMAC_RX_SOURCE 7
452 +#define DWMAC_TX_SOURCE 6
453 +#define DWMAC_LOW_TX_SOURCE 4
454 +#define DWMAC_AUTO_TX_SOURCE 3
455 +#define DWMAC_RGMII 2
456 +#define DWMAC_SIMPLE_MUX 1
457 +#define DWMAC_CKEN_GTX 0
458 +
459 +/* Delay register */
460 +#define DWMAC_TX_VARDELAY_SHIFT 0
461 +#define DWMAC_TXN_VARDELAY_SHIFT 8
462 +#define DWMAC_RX_VARDELAY_SHIFT 16
463 +#define DWMAC_RXN_VARDELAY_SHIFT 24
464 +#define DWMAC_TX_VARDELAY(d) ((d) << DWMAC_TX_VARDELAY_SHIFT)
465 +#define DWMAC_TXN_VARDELAY(d) ((d) << DWMAC_TXN_VARDELAY_SHIFT)
466 +#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT)
467 +#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT)
468 +
469 +struct oxnas_dwmac {
470 + struct device *dev;
471 + struct clk *clk;
472 + struct regmap *regmap;
473 +};
474 +
475 +static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
476 +{
477 + struct oxnas_dwmac *dwmac = priv;
478 + unsigned int value;
479 + int ret;
480 +
481 + /* Reset HW here before changing the glue configuration */
482 + ret = device_reset(dwmac->dev);
483 + if (ret)
484 + return ret;
485 +
486 + ret = clk_prepare_enable(dwmac->clk);
487 + if (ret)
488 + return ret;
489 +
490 + ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
491 + if (ret < 0) {
492 + clk_disable_unprepare(dwmac->clk);
493 + return ret;
494 + }
495 +
496 + /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
497 + value |= BIT(DWMAC_CKEN_GTX) |
498 + /* Use simple mux for 25/125 Mhz clock switching */
499 + BIT(DWMAC_SIMPLE_MUX) |
500 + /* set auto switch tx clock source */
501 + BIT(DWMAC_AUTO_TX_SOURCE) |
502 + /* enable tx & rx vardelay */
503 + BIT(DWMAC_CKEN_TX_OUT) |
504 + BIT(DWMAC_CKEN_TXN_OUT) |
505 + BIT(DWMAC_CKEN_TX_IN) |
506 + BIT(DWMAC_CKEN_RX_OUT) |
507 + BIT(DWMAC_CKEN_RXN_OUT) |
508 + BIT(DWMAC_CKEN_RX_IN);
509 + regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
510 +
511 + /* set tx & rx vardelay */
512 + value = DWMAC_TX_VARDELAY(4) |
513 + DWMAC_TXN_VARDELAY(2) |
514 + DWMAC_RX_VARDELAY(10) |
515 + DWMAC_RXN_VARDELAY(8);
516 + regmap_write(dwmac->regmap, OXNAS_DWMAC_DELAY_REGOFFSET, value);
517 +
518 + return 0;
519 +}
520 +
521 +static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
522 +{
523 + struct oxnas_dwmac *dwmac = priv;
524 +
525 + clk_disable_unprepare(dwmac->clk);
526 +}
527 +
528 +static int oxnas_dwmac_probe(struct platform_device *pdev)
529 +{
530 + struct plat_stmmacenet_data *plat_dat;
531 + struct stmmac_resources stmmac_res;
532 + struct oxnas_dwmac *dwmac;
533 + int ret;
534 +
535 + ret = stmmac_get_platform_resources(pdev, &stmmac_res);
536 + if (ret)
537 + return ret;
538 +
539 + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac);
540 + if (IS_ERR(plat_dat))
541 + return PTR_ERR(plat_dat);
542 +
543 + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
544 + if (!dwmac) {
545 + ret = -ENOMEM;
546 + goto err_remove_config_dt;
547 + }
548 +
549 + dwmac->dev = &pdev->dev;
550 + plat_dat->bsp_priv = dwmac;
551 + plat_dat->init = oxnas_dwmac_init;
552 + plat_dat->exit = oxnas_dwmac_exit;
553 +
554 + dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
555 + "oxsemi,sys-ctrl");
556 + if (IS_ERR(dwmac->regmap)) {
557 + dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
558 + ret = PTR_ERR(dwmac->regmap);
559 + goto err_remove_config_dt;
560 + }
561 +
562 + dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
563 + if (IS_ERR(dwmac->clk)) {
564 + ret = PTR_ERR(dwmac->clk);
565 + goto err_remove_config_dt;
566 + }
567 +
568 + ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
569 + if (ret)
570 + goto err_remove_config_dt;
571 +
572 + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
573 + if (ret)
574 + goto err_dwmac_exit;
575 +
576 +
577 + return 0;
578 +
579 +err_dwmac_exit:
580 + oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
581 +err_remove_config_dt:
582 + stmmac_remove_config_dt(pdev, plat_dat);
583 +
584 + return ret;
585 +}
586 +
587 +static const struct of_device_id oxnas_dwmac_match[] = {
588 + { .compatible = "oxsemi,ox820-dwmac" },
589 + { }
590 +};
591 +MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
592 +
593 +static struct platform_driver oxnas_dwmac_driver = {
594 + .probe = oxnas_dwmac_probe,
595 + .remove = stmmac_pltfr_remove,
596 + .driver = {
597 + .name = "oxnas-dwmac",
598 + .pm = &stmmac_pltfr_pm_ops,
599 + .of_match_table = oxnas_dwmac_match,
600 + },
601 +};
602 +module_platform_driver(oxnas_dwmac_driver);
603 +
604 +MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
605 +MODULE_DESCRIPTION("Oxford Semiconductor OXNAS DWMAC glue layer");
606 +MODULE_LICENSE("GPL v2");
607 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
608 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
609 @@ -864,6 +864,10 @@ static int rk_gmac_powerup(struct rk_pri
610 int ret;
611 struct device *dev = &bsp_priv->pdev->dev;
612
613 + ret = gmac_clk_enable(bsp_priv, true);
614 + if (ret)
615 + return ret;
616 +
617 /*rmii or rgmii*/
618 if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
619 dev_info(dev, "init for RGMII\n");
620 @@ -880,10 +884,6 @@ static int rk_gmac_powerup(struct rk_pri
621 if (ret)
622 return ret;
623
624 - ret = gmac_clk_enable(bsp_priv, true);
625 - if (ret)
626 - return ret;
627 -
628 pm_runtime_enable(dev);
629 pm_runtime_get_sync(dev);
630
631 @@ -901,44 +901,6 @@ static void rk_gmac_powerdown(struct rk_
632 gmac_clk_enable(gmac, false);
633 }
634
635 -static int rk_gmac_init(struct platform_device *pdev, void *priv)
636 -{
637 - struct rk_priv_data *bsp_priv = priv;
638 -
639 - return rk_gmac_powerup(bsp_priv);
640 -}
641 -
642 -static void rk_gmac_exit(struct platform_device *pdev, void *priv)
643 -{
644 - struct rk_priv_data *bsp_priv = priv;
645 -
646 - rk_gmac_powerdown(bsp_priv);
647 -}
648 -
649 -static void rk_gmac_suspend(struct platform_device *pdev, void *priv)
650 -{
651 - struct rk_priv_data *bsp_priv = priv;
652 -
653 - /* Keep the PHY up if we use Wake-on-Lan. */
654 - if (device_may_wakeup(&pdev->dev))
655 - return;
656 -
657 - rk_gmac_powerdown(bsp_priv);
658 - bsp_priv->suspended = true;
659 -}
660 -
661 -static void rk_gmac_resume(struct platform_device *pdev, void *priv)
662 -{
663 - struct rk_priv_data *bsp_priv = priv;
664 -
665 - /* The PHY was up for Wake-on-Lan. */
666 - if (!bsp_priv->suspended)
667 - return;
668 -
669 - rk_gmac_powerup(bsp_priv);
670 - bsp_priv->suspended = false;
671 -}
672 -
673 static void rk_fix_speed(void *priv, unsigned int speed)
674 {
675 struct rk_priv_data *bsp_priv = priv;
676 @@ -974,11 +936,7 @@ static int rk_gmac_probe(struct platform
677 return PTR_ERR(plat_dat);
678
679 plat_dat->has_gmac = true;
680 - plat_dat->init = rk_gmac_init;
681 - plat_dat->exit = rk_gmac_exit;
682 plat_dat->fix_mac_speed = rk_fix_speed;
683 - plat_dat->suspend = rk_gmac_suspend;
684 - plat_dat->resume = rk_gmac_resume;
685
686 plat_dat->bsp_priv = rk_gmac_setup(pdev, data);
687 if (IS_ERR(plat_dat->bsp_priv)) {
688 @@ -986,24 +944,65 @@ static int rk_gmac_probe(struct platform
689 goto err_remove_config_dt;
690 }
691
692 - ret = rk_gmac_init(pdev, plat_dat->bsp_priv);
693 + ret = rk_gmac_powerup(plat_dat->bsp_priv);
694 if (ret)
695 goto err_remove_config_dt;
696
697 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
698 if (ret)
699 - goto err_gmac_exit;
700 + goto err_gmac_powerdown;
701
702 return 0;
703
704 -err_gmac_exit:
705 - rk_gmac_exit(pdev, plat_dat->bsp_priv);
706 +err_gmac_powerdown:
707 + rk_gmac_powerdown(plat_dat->bsp_priv);
708 err_remove_config_dt:
709 stmmac_remove_config_dt(pdev, plat_dat);
710
711 return ret;
712 }
713
714 +static int rk_gmac_remove(struct platform_device *pdev)
715 +{
716 + struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
717 + int ret = stmmac_dvr_remove(&pdev->dev);
718 +
719 + rk_gmac_powerdown(bsp_priv);
720 +
721 + return ret;
722 +}
723 +
724 +#ifdef CONFIG_PM_SLEEP
725 +static int rk_gmac_suspend(struct device *dev)
726 +{
727 + struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
728 + int ret = stmmac_suspend(dev);
729 +
730 + /* Keep the PHY up if we use Wake-on-Lan. */
731 + if (!device_may_wakeup(dev)) {
732 + rk_gmac_powerdown(bsp_priv);
733 + bsp_priv->suspended = true;
734 + }
735 +
736 + return ret;
737 +}
738 +
739 +static int rk_gmac_resume(struct device *dev)
740 +{
741 + struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
742 +
743 + /* The PHY was up for Wake-on-Lan. */
744 + if (bsp_priv->suspended) {
745 + rk_gmac_powerup(bsp_priv);
746 + bsp_priv->suspended = false;
747 + }
748 +
749 + return stmmac_resume(dev);
750 +}
751 +#endif /* CONFIG_PM_SLEEP */
752 +
753 +static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
754 +
755 static const struct of_device_id rk_gmac_dwmac_match[] = {
756 { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
757 { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
758 @@ -1016,10 +1015,10 @@ MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_ma
759
760 static struct platform_driver rk_gmac_dwmac_driver = {
761 .probe = rk_gmac_probe,
762 - .remove = stmmac_pltfr_remove,
763 + .remove = rk_gmac_remove,
764 .driver = {
765 .name = "rk_gmac-dwmac",
766 - .pm = &stmmac_pltfr_pm_ops,
767 + .pm = &rk_gmac_pm_ops,
768 .of_match_table = rk_gmac_dwmac_match,
769 },
770 };
771 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
772 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
773 @@ -380,8 +380,8 @@ static int socfpga_dwmac_resume(struct d
774 * control register 0, and can be modified by the phy driver
775 * framework.
776 */
777 - if (priv->phydev)
778 - phy_resume(priv->phydev);
779 + if (ndev->phydev)
780 + phy_resume(ndev->phydev);
781
782 return stmmac_resume(dev);
783 }
784 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
785 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
786 @@ -126,8 +126,8 @@ struct sti_dwmac {
787 struct clk *clk; /* PHY clock */
788 u32 ctrl_reg; /* GMAC glue-logic control register */
789 int clk_sel_reg; /* GMAC ext clk selection register */
790 - struct device *dev;
791 struct regmap *regmap;
792 + bool gmac_en;
793 u32 speed;
794 void (*fix_retime_src)(void *priv, unsigned int speed);
795 };
796 @@ -191,7 +191,7 @@ static void stih4xx_fix_retime_src(void
797 }
798 }
799
800 - if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk && freq)
801 + if (src == TX_RETIME_SRC_CLKGEN && freq)
802 clk_set_rate(dwmac->clk, freq);
803
804 regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
805 @@ -222,26 +222,20 @@ static void stid127_fix_retime_src(void
806 freq = DWMAC_2_5MHZ;
807 }
808
809 - if (dwmac->clk && freq)
810 + if (freq)
811 clk_set_rate(dwmac->clk, freq);
812
813 regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
814 }
815
816 -static int sti_dwmac_init(struct platform_device *pdev, void *priv)
817 +static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
818 {
819 - struct sti_dwmac *dwmac = priv;
820 struct regmap *regmap = dwmac->regmap;
821 int iface = dwmac->interface;
822 - struct device *dev = dwmac->dev;
823 - struct device_node *np = dev->of_node;
824 u32 reg = dwmac->ctrl_reg;
825 u32 val;
826
827 - if (dwmac->clk)
828 - clk_prepare_enable(dwmac->clk);
829 -
830 - if (of_property_read_bool(np, "st,gmac_en"))
831 + if (dwmac->gmac_en)
832 regmap_update_bits(regmap, reg, EN_MASK, EN);
833
834 regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
835 @@ -249,18 +243,11 @@ static int sti_dwmac_init(struct platfor
836 val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
837 regmap_update_bits(regmap, reg, ENMII_MASK, val);
838
839 - dwmac->fix_retime_src(priv, dwmac->speed);
840 + dwmac->fix_retime_src(dwmac, dwmac->speed);
841
842 return 0;
843 }
844
845 -static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
846 -{
847 - struct sti_dwmac *dwmac = priv;
848 -
849 - if (dwmac->clk)
850 - clk_disable_unprepare(dwmac->clk);
851 -}
852 static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
853 struct platform_device *pdev)
854 {
855 @@ -270,9 +257,6 @@ static int sti_dwmac_parse_data(struct s
856 struct regmap *regmap;
857 int err;
858
859 - if (!np)
860 - return -EINVAL;
861 -
862 /* clk selection from extra syscfg register */
863 dwmac->clk_sel_reg = -ENXIO;
864 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
865 @@ -289,9 +273,9 @@ static int sti_dwmac_parse_data(struct s
866 return err;
867 }
868
869 - dwmac->dev = dev;
870 dwmac->interface = of_get_phy_mode(np);
871 dwmac->regmap = regmap;
872 + dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
873 dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
874 dwmac->tx_retime_src = TX_RETIME_SRC_NA;
875 dwmac->speed = SPEED_100;
876 @@ -359,28 +343,65 @@ static int sti_dwmac_probe(struct platfo
877 dwmac->fix_retime_src = data->fix_retime_src;
878
879 plat_dat->bsp_priv = dwmac;
880 - plat_dat->init = sti_dwmac_init;
881 - plat_dat->exit = sti_dwmac_exit;
882 plat_dat->fix_mac_speed = data->fix_retime_src;
883
884 - ret = sti_dwmac_init(pdev, plat_dat->bsp_priv);
885 + ret = clk_prepare_enable(dwmac->clk);
886 if (ret)
887 goto err_remove_config_dt;
888
889 + ret = sti_dwmac_set_mode(dwmac);
890 + if (ret)
891 + goto disable_clk;
892 +
893 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
894 if (ret)
895 - goto err_dwmac_exit;
896 + goto disable_clk;
897
898 return 0;
899
900 -err_dwmac_exit:
901 - sti_dwmac_exit(pdev, plat_dat->bsp_priv);
902 +disable_clk:
903 + clk_disable_unprepare(dwmac->clk);
904 err_remove_config_dt:
905 stmmac_remove_config_dt(pdev, plat_dat);
906
907 return ret;
908 }
909
910 +static int sti_dwmac_remove(struct platform_device *pdev)
911 +{
912 + struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
913 + int ret = stmmac_dvr_remove(&pdev->dev);
914 +
915 + clk_disable_unprepare(dwmac->clk);
916 +
917 + return ret;
918 +}
919 +
920 +#ifdef CONFIG_PM_SLEEP
921 +static int sti_dwmac_suspend(struct device *dev)
922 +{
923 + struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
924 + int ret = stmmac_suspend(dev);
925 +
926 + clk_disable_unprepare(dwmac->clk);
927 +
928 + return ret;
929 +}
930 +
931 +static int sti_dwmac_resume(struct device *dev)
932 +{
933 + struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
934 +
935 + clk_prepare_enable(dwmac->clk);
936 + sti_dwmac_set_mode(dwmac);
937 +
938 + return stmmac_resume(dev);
939 +}
940 +#endif /* CONFIG_PM_SLEEP */
941 +
942 +static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
943 + sti_dwmac_resume);
944 +
945 static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
946 .fix_retime_src = stih4xx_fix_retime_src,
947 };
948 @@ -400,10 +421,10 @@ MODULE_DEVICE_TABLE(of, sti_dwmac_match)
949
950 static struct platform_driver sti_dwmac_driver = {
951 .probe = sti_dwmac_probe,
952 - .remove = stmmac_pltfr_remove,
953 + .remove = sti_dwmac_remove,
954 .driver = {
955 .name = "sti-dwmac",
956 - .pm = &stmmac_pltfr_pm_ops,
957 + .pm = &sti_dwmac_pm_ops,
958 .of_match_table = sti_dwmac_match,
959 },
960 };
961 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
962 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
963 @@ -225,7 +225,7 @@ enum rx_tx_priority_ratio {
964
965 #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
966 #define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */
967 -#define DMA_BUS_MODE_RPBL_MASK 0x003e0000 /* Rx-Programmable Burst Len */
968 +#define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */
969 #define DMA_BUS_MODE_RPBL_SHIFT 17
970 #define DMA_BUS_MODE_USP 0x00800000
971 #define DMA_BUS_MODE_MAXPBL 0x01000000
972 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
973 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
974 @@ -538,6 +538,12 @@ struct mac_device_info *dwmac1000_setup(
975 mac->link.speed = GMAC_CONTROL_FES;
976 mac->mii.addr = GMAC_MII_ADDR;
977 mac->mii.data = GMAC_MII_DATA;
978 + mac->mii.addr_shift = 11;
979 + mac->mii.addr_mask = 0x0000F800;
980 + mac->mii.reg_shift = 6;
981 + mac->mii.reg_mask = 0x000007C0;
982 + mac->mii.clk_csr_shift = 2;
983 + mac->mii.clk_csr_mask = GENMASK(5, 2);
984
985 /* Get and dump the chip ID */
986 *synopsys_id = stmmac_get_synopsys_id(hwid);
987 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
988 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
989 @@ -84,37 +84,39 @@ static void dwmac1000_dma_axi(void __iom
990 writel(value, ioaddr + DMA_AXI_BUS_MODE);
991 }
992
993 -static void dwmac1000_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
994 - int aal, u32 dma_tx, u32 dma_rx, int atds)
995 +static void dwmac1000_dma_init(void __iomem *ioaddr,
996 + struct stmmac_dma_cfg *dma_cfg,
997 + u32 dma_tx, u32 dma_rx, int atds)
998 {
999 u32 value = readl(ioaddr + DMA_BUS_MODE);
1000 + int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
1001 + int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
1002
1003 /*
1004 * Set the DMA PBL (Programmable Burst Length) mode.
1005 *
1006 * Note: before stmmac core 3.50 this mode bit was 4xPBL, and
1007 * post 3.5 mode bit acts as 8*PBL.
1008 - *
1009 - * This configuration doesn't take care about the Separate PBL
1010 - * so only the bits: 13-8 are programmed with the PBL passed from the
1011 - * platform.
1012 */
1013 - value |= DMA_BUS_MODE_MAXPBL;
1014 - value &= ~DMA_BUS_MODE_PBL_MASK;
1015 - value |= (pbl << DMA_BUS_MODE_PBL_SHIFT);
1016 + if (dma_cfg->pblx8)
1017 + value |= DMA_BUS_MODE_MAXPBL;
1018 + value |= DMA_BUS_MODE_USP;
1019 + value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
1020 + value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
1021 + value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
1022
1023 /* Set the Fixed burst mode */
1024 - if (fb)
1025 + if (dma_cfg->fixed_burst)
1026 value |= DMA_BUS_MODE_FB;
1027
1028 /* Mixed Burst has no effect when fb is set */
1029 - if (mb)
1030 + if (dma_cfg->mixed_burst)
1031 value |= DMA_BUS_MODE_MB;
1032
1033 if (atds)
1034 value |= DMA_BUS_MODE_ATDS;
1035
1036 - if (aal)
1037 + if (dma_cfg->aal)
1038 value |= DMA_BUS_MODE_AAL;
1039
1040 writel(value, ioaddr + DMA_BUS_MODE);
1041 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
1042 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
1043 @@ -192,6 +192,13 @@ struct mac_device_info *dwmac100_setup(v
1044 mac->link.speed = 0;
1045 mac->mii.addr = MAC_MII_ADDR;
1046 mac->mii.data = MAC_MII_DATA;
1047 + mac->mii.addr_shift = 11;
1048 + mac->mii.addr_mask = 0x0000F800;
1049 + mac->mii.reg_shift = 6;
1050 + mac->mii.reg_mask = 0x000007C0;
1051 + mac->mii.clk_csr_shift = 2;
1052 + mac->mii.clk_csr_mask = GENMASK(5, 2);
1053 +
1054 /* Synopsys Id is not available on old chips */
1055 *synopsys_id = 0;
1056
1057 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
1058 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
1059 @@ -32,11 +32,12 @@
1060 #include "dwmac100.h"
1061 #include "dwmac_dma.h"
1062
1063 -static void dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
1064 - int aal, u32 dma_tx, u32 dma_rx, int atds)
1065 +static void dwmac100_dma_init(void __iomem *ioaddr,
1066 + struct stmmac_dma_cfg *dma_cfg,
1067 + u32 dma_tx, u32 dma_rx, int atds)
1068 {
1069 /* Enable Application Access by writing to DMA CSR0 */
1070 - writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
1071 + writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
1072 ioaddr + DMA_BUS_MODE);
1073
1074 /* Mask interrupts by writing to CSR7 */
1075 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1076 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1077 @@ -155,8 +155,11 @@ enum power_event {
1078 #define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
1079
1080 #define MTL_OP_MODE_RSF BIT(5)
1081 +#define MTL_OP_MODE_TXQEN BIT(3)
1082 #define MTL_OP_MODE_TSF BIT(1)
1083
1084 +#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
1085 +
1086 #define MTL_OP_MODE_TTC_MASK 0x70
1087 #define MTL_OP_MODE_TTC_SHIFT 4
1088
1089 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1090 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1091 @@ -430,6 +430,12 @@ struct mac_device_info *dwmac4_setup(voi
1092 mac->link.speed = GMAC_CONFIG_FES;
1093 mac->mii.addr = GMAC_MDIO_ADDR;
1094 mac->mii.data = GMAC_MDIO_DATA;
1095 + mac->mii.addr_shift = 21;
1096 + mac->mii.addr_mask = GENMASK(25, 21);
1097 + mac->mii.reg_shift = 16;
1098 + mac->mii.reg_mask = GENMASK(20, 16);
1099 + mac->mii.clk_csr_shift = 8;
1100 + mac->mii.clk_csr_mask = GENMASK(11, 8);
1101
1102 /* Get and dump the chip ID */
1103 *synopsys_id = stmmac_get_synopsys_id(hwid);
1104 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1105 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1106 @@ -23,7 +23,7 @@ static int dwmac4_wrback_get_tx_status(v
1107 unsigned int tdes3;
1108 int ret = tx_done;
1109
1110 - tdes3 = p->des3;
1111 + tdes3 = le32_to_cpu(p->des3);
1112
1113 /* Get tx owner first */
1114 if (unlikely(tdes3 & TDES3_OWN))
1115 @@ -77,9 +77,9 @@ static int dwmac4_wrback_get_rx_status(v
1116 struct dma_desc *p)
1117 {
1118 struct net_device_stats *stats = (struct net_device_stats *)data;
1119 - unsigned int rdes1 = p->des1;
1120 - unsigned int rdes2 = p->des2;
1121 - unsigned int rdes3 = p->des3;
1122 + unsigned int rdes1 = le32_to_cpu(p->des1);
1123 + unsigned int rdes2 = le32_to_cpu(p->des2);
1124 + unsigned int rdes3 = le32_to_cpu(p->des3);
1125 int message_type;
1126 int ret = good_frame;
1127
1128 @@ -176,47 +176,48 @@ static int dwmac4_wrback_get_rx_status(v
1129
1130 static int dwmac4_rd_get_tx_len(struct dma_desc *p)
1131 {
1132 - return (p->des2 & TDES2_BUFFER1_SIZE_MASK);
1133 + return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
1134 }
1135
1136 static int dwmac4_get_tx_owner(struct dma_desc *p)
1137 {
1138 - return (p->des3 & TDES3_OWN) >> TDES3_OWN_SHIFT;
1139 + return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
1140 }
1141
1142 static void dwmac4_set_tx_owner(struct dma_desc *p)
1143 {
1144 - p->des3 |= TDES3_OWN;
1145 + p->des3 |= cpu_to_le32(TDES3_OWN);
1146 }
1147
1148 static void dwmac4_set_rx_owner(struct dma_desc *p)
1149 {
1150 - p->des3 |= RDES3_OWN;
1151 + p->des3 |= cpu_to_le32(RDES3_OWN);
1152 }
1153
1154 static int dwmac4_get_tx_ls(struct dma_desc *p)
1155 {
1156 - return (p->des3 & TDES3_LAST_DESCRIPTOR) >> TDES3_LAST_DESCRIPTOR_SHIFT;
1157 + return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
1158 + >> TDES3_LAST_DESCRIPTOR_SHIFT;
1159 }
1160
1161 static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
1162 {
1163 - return (p->des3 & RDES3_PACKET_SIZE_MASK);
1164 + return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
1165 }
1166
1167 static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
1168 {
1169 - p->des2 |= TDES2_TIMESTAMP_ENABLE;
1170 + p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
1171 }
1172
1173 static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
1174 {
1175 /* Context type from W/B descriptor must be zero */
1176 - if (p->des3 & TDES3_CONTEXT_TYPE)
1177 + if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
1178 return -EINVAL;
1179
1180 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
1181 - if (p->des3 & TDES3_TIMESTAMP_STATUS)
1182 + if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
1183 return 0;
1184
1185 return 1;
1186 @@ -227,9 +228,9 @@ static inline u64 dwmac4_get_timestamp(v
1187 struct dma_desc *p = (struct dma_desc *)desc;
1188 u64 ns;
1189
1190 - ns = p->des0;
1191 + ns = le32_to_cpu(p->des0);
1192 /* convert high/sec time stamp value to nanosecond */
1193 - ns += p->des1 * 1000000000ULL;
1194 + ns += le32_to_cpu(p->des1) * 1000000000ULL;
1195
1196 return ns;
1197 }
1198 @@ -264,7 +265,7 @@ static int dwmac4_wrback_get_rx_timestam
1199
1200 /* Get the status from normal w/b descriptor */
1201 if (likely(p->des3 & TDES3_RS1V)) {
1202 - if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
1203 + if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
1204 int i = 0;
1205
1206 /* Check if timestamp is OK from context descriptor */
1207 @@ -287,10 +288,10 @@ exit:
1208 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
1209 int mode, int end)
1210 {
1211 - p->des3 = RDES3_OWN | RDES3_BUFFER1_VALID_ADDR;
1212 + p->des3 = cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
1213
1214 if (!disable_rx_ic)
1215 - p->des3 |= RDES3_INT_ON_COMPLETION_EN;
1216 + p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
1217 }
1218
1219 static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
1220 @@ -305,9 +306,9 @@ static void dwmac4_rd_prepare_tx_desc(st
1221 bool csum_flag, int mode, bool tx_own,
1222 bool ls)
1223 {
1224 - unsigned int tdes3 = p->des3;
1225 + unsigned int tdes3 = le32_to_cpu(p->des3);
1226
1227 - p->des2 |= (len & TDES2_BUFFER1_SIZE_MASK);
1228 + p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
1229
1230 if (is_fs)
1231 tdes3 |= TDES3_FIRST_DESCRIPTOR;
1232 @@ -333,9 +334,9 @@ static void dwmac4_rd_prepare_tx_desc(st
1233 * descriptors for the same frame has to be set before, to
1234 * avoid race condition.
1235 */
1236 - wmb();
1237 + dma_wmb();
1238
1239 - p->des3 = tdes3;
1240 + p->des3 = cpu_to_le32(tdes3);
1241 }
1242
1243 static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
1244 @@ -343,14 +344,14 @@ static void dwmac4_rd_prepare_tso_tx_des
1245 bool ls, unsigned int tcphdrlen,
1246 unsigned int tcppayloadlen)
1247 {
1248 - unsigned int tdes3 = p->des3;
1249 + unsigned int tdes3 = le32_to_cpu(p->des3);
1250
1251 if (len1)
1252 - p->des2 |= (len1 & TDES2_BUFFER1_SIZE_MASK);
1253 + p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
1254
1255 if (len2)
1256 - p->des2 |= (len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
1257 - & TDES2_BUFFER2_SIZE_MASK;
1258 + p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
1259 + & TDES2_BUFFER2_SIZE_MASK);
1260
1261 if (is_fs) {
1262 tdes3 |= TDES3_FIRST_DESCRIPTOR |
1263 @@ -376,9 +377,9 @@ static void dwmac4_rd_prepare_tso_tx_des
1264 * descriptors for the same frame has to be set before, to
1265 * avoid race condition.
1266 */
1267 - wmb();
1268 + dma_wmb();
1269
1270 - p->des3 = tdes3;
1271 + p->des3 = cpu_to_le32(tdes3);
1272 }
1273
1274 static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
1275 @@ -389,7 +390,7 @@ static void dwmac4_release_tx_desc(struc
1276
1277 static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
1278 {
1279 - p->des2 |= TDES2_INTERRUPT_ON_COMPLETION;
1280 + p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
1281 }
1282
1283 static void dwmac4_display_ring(void *head, unsigned int size, bool rx)
1284 @@ -402,7 +403,8 @@ static void dwmac4_display_ring(void *he
1285 for (i = 0; i < size; i++) {
1286 pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
1287 i, (unsigned int)virt_to_phys(p),
1288 - p->des0, p->des1, p->des2, p->des3);
1289 + le32_to_cpu(p->des0), le32_to_cpu(p->des1),
1290 + le32_to_cpu(p->des2), le32_to_cpu(p->des3));
1291 p++;
1292 }
1293 }
1294 @@ -411,8 +413,8 @@ static void dwmac4_set_mss_ctxt(struct d
1295 {
1296 p->des0 = 0;
1297 p->des1 = 0;
1298 - p->des2 = mss;
1299 - p->des3 = TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV;
1300 + p->des2 = cpu_to_le32(mss);
1301 + p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
1302 }
1303
1304 const struct stmmac_desc_ops dwmac4_desc_ops = {
1305 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1306 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1307 @@ -71,25 +71,29 @@ static void dwmac4_dma_axi(void __iomem
1308 writel(value, ioaddr + DMA_SYS_BUS_MODE);
1309 }
1310
1311 -static void dwmac4_dma_init_channel(void __iomem *ioaddr, int pbl,
1312 +static void dwmac4_dma_init_channel(void __iomem *ioaddr,
1313 + struct stmmac_dma_cfg *dma_cfg,
1314 u32 dma_tx_phy, u32 dma_rx_phy,
1315 u32 channel)
1316 {
1317 u32 value;
1318 + int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
1319 + int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
1320
1321 /* set PBL for each channels. Currently we affect same configuration
1322 * on each channel
1323 */
1324 value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
1325 - value = value | DMA_BUS_MODE_PBL;
1326 + if (dma_cfg->pblx8)
1327 + value = value | DMA_BUS_MODE_PBL;
1328 writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
1329
1330 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
1331 - value = value | (pbl << DMA_BUS_MODE_PBL_SHIFT);
1332 + value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
1333 writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
1334
1335 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
1336 - value = value | (pbl << DMA_BUS_MODE_RPBL_SHIFT);
1337 + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
1338 writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
1339
1340 /* Mask interrupts by writing to CSR7 */
1341 @@ -99,27 +103,28 @@ static void dwmac4_dma_init_channel(void
1342 writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
1343 }
1344
1345 -static void dwmac4_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
1346 - int aal, u32 dma_tx, u32 dma_rx, int atds)
1347 +static void dwmac4_dma_init(void __iomem *ioaddr,
1348 + struct stmmac_dma_cfg *dma_cfg,
1349 + u32 dma_tx, u32 dma_rx, int atds)
1350 {
1351 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
1352 int i;
1353
1354 /* Set the Fixed burst mode */
1355 - if (fb)
1356 + if (dma_cfg->fixed_burst)
1357 value |= DMA_SYS_BUS_FB;
1358
1359 /* Mixed Burst has no effect when fb is set */
1360 - if (mb)
1361 + if (dma_cfg->mixed_burst)
1362 value |= DMA_SYS_BUS_MB;
1363
1364 - if (aal)
1365 + if (dma_cfg->aal)
1366 value |= DMA_SYS_BUS_AAL;
1367
1368 writel(value, ioaddr + DMA_SYS_BUS_MODE);
1369
1370 for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
1371 - dwmac4_dma_init_channel(ioaddr, pbl, dma_tx, dma_rx, i);
1372 + dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
1373 }
1374
1375 static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel)
1376 @@ -215,7 +220,17 @@ static void dwmac4_dma_chan_op_mode(void
1377 else
1378 mtl_tx_op |= MTL_OP_MODE_TTC_512;
1379 }
1380 -
1381 + /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
1382 + * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
1383 + * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
1384 + * with reset values: TXQEN off, TQS 256 bytes.
1385 + *
1386 + * Write the bits in both cases, since it will have no effect when RO.
1387 + * For DWC_EQOS_NUM_TXQ > 1, the top bits in MTL_OP_MODE_TQS_MASK might
1388 + * be RO, however, writing the whole TQS field will result in a value
1389 + * equal to DWC_EQOS_TXFIFO_SIZE, just like for DWC_EQOS_NUM_TXQ == 1.
1390 + */
1391 + mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
1392 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
1393
1394 mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
1395 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
1396 +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
1397 @@ -30,7 +30,7 @@ static int enh_desc_get_tx_status(void *
1398 struct dma_desc *p, void __iomem *ioaddr)
1399 {
1400 struct net_device_stats *stats = (struct net_device_stats *)data;
1401 - unsigned int tdes0 = p->des0;
1402 + unsigned int tdes0 = le32_to_cpu(p->des0);
1403 int ret = tx_done;
1404
1405 /* Get tx owner first */
1406 @@ -95,7 +95,7 @@ static int enh_desc_get_tx_status(void *
1407
1408 static int enh_desc_get_tx_len(struct dma_desc *p)
1409 {
1410 - return (p->des1 & ETDES1_BUFFER1_SIZE_MASK);
1411 + return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
1412 }
1413
1414 static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
1415 @@ -134,8 +134,8 @@ static int enh_desc_coe_rdes0(int ipc_er
1416 static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
1417 struct dma_extended_desc *p)
1418 {
1419 - unsigned int rdes0 = p->basic.des0;
1420 - unsigned int rdes4 = p->des4;
1421 + unsigned int rdes0 = le32_to_cpu(p->basic.des0);
1422 + unsigned int rdes4 = le32_to_cpu(p->des4);
1423
1424 if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
1425 int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
1426 @@ -199,7 +199,7 @@ static int enh_desc_get_rx_status(void *
1427 struct dma_desc *p)
1428 {
1429 struct net_device_stats *stats = (struct net_device_stats *)data;
1430 - unsigned int rdes0 = p->des0;
1431 + unsigned int rdes0 = le32_to_cpu(p->des0);
1432 int ret = good_frame;
1433
1434 if (unlikely(rdes0 & RDES0_OWN))
1435 @@ -265,8 +265,8 @@ static int enh_desc_get_rx_status(void *
1436 static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
1437 int mode, int end)
1438 {
1439 - p->des0 |= RDES0_OWN;
1440 - p->des1 |= ((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
1441 + p->des0 |= cpu_to_le32(RDES0_OWN);
1442 + p->des1 |= cpu_to_le32((BUF_SIZE_8KiB - 1) & ERDES1_BUFFER1_SIZE_MASK);
1443
1444 if (mode == STMMAC_CHAIN_MODE)
1445 ehn_desc_rx_set_on_chain(p);
1446 @@ -274,12 +274,12 @@ static void enh_desc_init_rx_desc(struct
1447 ehn_desc_rx_set_on_ring(p, end);
1448
1449 if (disable_rx_ic)
1450 - p->des1 |= ERDES1_DISABLE_IC;
1451 + p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
1452 }
1453
1454 static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
1455 {
1456 - p->des0 &= ~ETDES0_OWN;
1457 + p->des0 &= cpu_to_le32(~ETDES0_OWN);
1458 if (mode == STMMAC_CHAIN_MODE)
1459 enh_desc_end_tx_desc_on_chain(p);
1460 else
1461 @@ -288,27 +288,27 @@ static void enh_desc_init_tx_desc(struct
1462
1463 static int enh_desc_get_tx_owner(struct dma_desc *p)
1464 {
1465 - return (p->des0 & ETDES0_OWN) >> 31;
1466 + return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
1467 }
1468
1469 static void enh_desc_set_tx_owner(struct dma_desc *p)
1470 {
1471 - p->des0 |= ETDES0_OWN;
1472 + p->des0 |= cpu_to_le32(ETDES0_OWN);
1473 }
1474
1475 static void enh_desc_set_rx_owner(struct dma_desc *p)
1476 {
1477 - p->des0 |= RDES0_OWN;
1478 + p->des0 |= cpu_to_le32(RDES0_OWN);
1479 }
1480
1481 static int enh_desc_get_tx_ls(struct dma_desc *p)
1482 {
1483 - return (p->des0 & ETDES0_LAST_SEGMENT) >> 29;
1484 + return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
1485 }
1486
1487 static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
1488 {
1489 - int ter = (p->des0 & ETDES0_END_RING) >> 21;
1490 + int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
1491
1492 memset(p, 0, offsetof(struct dma_desc, des2));
1493 if (mode == STMMAC_CHAIN_MODE)
1494 @@ -321,7 +321,7 @@ static void enh_desc_prepare_tx_desc(str
1495 bool csum_flag, int mode, bool tx_own,
1496 bool ls)
1497 {
1498 - unsigned int tdes0 = p->des0;
1499 + unsigned int tdes0 = le32_to_cpu(p->des0);
1500
1501 if (mode == STMMAC_CHAIN_MODE)
1502 enh_set_tx_desc_len_on_chain(p, len);
1503 @@ -350,14 +350,14 @@ static void enh_desc_prepare_tx_desc(str
1504 * descriptors for the same frame has to be set before, to
1505 * avoid race condition.
1506 */
1507 - wmb();
1508 + dma_wmb();
1509
1510 - p->des0 = tdes0;
1511 + p->des0 = cpu_to_le32(tdes0);
1512 }
1513
1514 static void enh_desc_set_tx_ic(struct dma_desc *p)
1515 {
1516 - p->des0 |= ETDES0_INTERRUPT;
1517 + p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
1518 }
1519
1520 static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
1521 @@ -372,18 +372,18 @@ static int enh_desc_get_rx_frame_len(str
1522 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
1523 csum = 2;
1524
1525 - return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
1526 - csum);
1527 + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
1528 + >> RDES0_FRAME_LEN_SHIFT) - csum);
1529 }
1530
1531 static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
1532 {
1533 - p->des0 |= ETDES0_TIME_STAMP_ENABLE;
1534 + p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
1535 }
1536
1537 static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
1538 {
1539 - return (p->des0 & ETDES0_TIME_STAMP_STATUS) >> 17;
1540 + return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
1541 }
1542
1543 static u64 enh_desc_get_timestamp(void *desc, u32 ats)
1544 @@ -392,13 +392,13 @@ static u64 enh_desc_get_timestamp(void *
1545
1546 if (ats) {
1547 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
1548 - ns = p->des6;
1549 + ns = le32_to_cpu(p->des6);
1550 /* convert high/sec time stamp value to nanosecond */
1551 - ns += p->des7 * 1000000000ULL;
1552 + ns += le32_to_cpu(p->des7) * 1000000000ULL;
1553 } else {
1554 struct dma_desc *p = (struct dma_desc *)desc;
1555 - ns = p->des2;
1556 - ns += p->des3 * 1000000000ULL;
1557 + ns = le32_to_cpu(p->des2);
1558 + ns += le32_to_cpu(p->des3) * 1000000000ULL;
1559 }
1560
1561 return ns;
1562 @@ -408,10 +408,11 @@ static int enh_desc_get_rx_timestamp_sta
1563 {
1564 if (ats) {
1565 struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
1566 - return (p->basic.des0 & RDES0_IPC_CSUM_ERROR) >> 7;
1567 + return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
1568 } else {
1569 struct dma_desc *p = (struct dma_desc *)desc;
1570 - if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
1571 + if ((le32_to_cpu(p->des2) == 0xffffffff) &&
1572 + (le32_to_cpu(p->des3) == 0xffffffff))
1573 /* timestamp is corrupted, hence don't store it */
1574 return 0;
1575 else
1576 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
1577 +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
1578 @@ -30,8 +30,8 @@ static int ndesc_get_tx_status(void *dat
1579 struct dma_desc *p, void __iomem *ioaddr)
1580 {
1581 struct net_device_stats *stats = (struct net_device_stats *)data;
1582 - unsigned int tdes0 = p->des0;
1583 - unsigned int tdes1 = p->des1;
1584 + unsigned int tdes0 = le32_to_cpu(p->des0);
1585 + unsigned int tdes1 = le32_to_cpu(p->des1);
1586 int ret = tx_done;
1587
1588 /* Get tx owner first */
1589 @@ -77,7 +77,7 @@ static int ndesc_get_tx_status(void *dat
1590
1591 static int ndesc_get_tx_len(struct dma_desc *p)
1592 {
1593 - return (p->des1 & RDES1_BUFFER1_SIZE_MASK);
1594 + return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
1595 }
1596
1597 /* This function verifies if each incoming frame has some errors
1598 @@ -88,7 +88,7 @@ static int ndesc_get_rx_status(void *dat
1599 struct dma_desc *p)
1600 {
1601 int ret = good_frame;
1602 - unsigned int rdes0 = p->des0;
1603 + unsigned int rdes0 = le32_to_cpu(p->des0);
1604 struct net_device_stats *stats = (struct net_device_stats *)data;
1605
1606 if (unlikely(rdes0 & RDES0_OWN))
1607 @@ -141,8 +141,8 @@ static int ndesc_get_rx_status(void *dat
1608 static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
1609 int end)
1610 {
1611 - p->des0 |= RDES0_OWN;
1612 - p->des1 |= (BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK;
1613 + p->des0 |= cpu_to_le32(RDES0_OWN);
1614 + p->des1 |= cpu_to_le32((BUF_SIZE_2KiB - 1) & RDES1_BUFFER1_SIZE_MASK);
1615
1616 if (mode == STMMAC_CHAIN_MODE)
1617 ndesc_rx_set_on_chain(p, end);
1618 @@ -150,12 +150,12 @@ static void ndesc_init_rx_desc(struct dm
1619 ndesc_rx_set_on_ring(p, end);
1620
1621 if (disable_rx_ic)
1622 - p->des1 |= RDES1_DISABLE_IC;
1623 + p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
1624 }
1625
1626 static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
1627 {
1628 - p->des0 &= ~TDES0_OWN;
1629 + p->des0 &= cpu_to_le32(~TDES0_OWN);
1630 if (mode == STMMAC_CHAIN_MODE)
1631 ndesc_tx_set_on_chain(p);
1632 else
1633 @@ -164,27 +164,27 @@ static void ndesc_init_tx_desc(struct dm
1634
1635 static int ndesc_get_tx_owner(struct dma_desc *p)
1636 {
1637 - return (p->des0 & TDES0_OWN) >> 31;
1638 + return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
1639 }
1640
1641 static void ndesc_set_tx_owner(struct dma_desc *p)
1642 {
1643 - p->des0 |= TDES0_OWN;
1644 + p->des0 |= cpu_to_le32(TDES0_OWN);
1645 }
1646
1647 static void ndesc_set_rx_owner(struct dma_desc *p)
1648 {
1649 - p->des0 |= RDES0_OWN;
1650 + p->des0 |= cpu_to_le32(RDES0_OWN);
1651 }
1652
1653 static int ndesc_get_tx_ls(struct dma_desc *p)
1654 {
1655 - return (p->des1 & TDES1_LAST_SEGMENT) >> 30;
1656 + return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
1657 }
1658
1659 static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
1660 {
1661 - int ter = (p->des1 & TDES1_END_RING) >> 25;
1662 + int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
1663
1664 memset(p, 0, offsetof(struct dma_desc, des2));
1665 if (mode == STMMAC_CHAIN_MODE)
1666 @@ -197,7 +197,7 @@ static void ndesc_prepare_tx_desc(struct
1667 bool csum_flag, int mode, bool tx_own,
1668 bool ls)
1669 {
1670 - unsigned int tdes1 = p->des1;
1671 + unsigned int tdes1 = le32_to_cpu(p->des1);
1672
1673 if (is_fs)
1674 tdes1 |= TDES1_FIRST_SEGMENT;
1675 @@ -212,7 +212,7 @@ static void ndesc_prepare_tx_desc(struct
1676 if (ls)
1677 tdes1 |= TDES1_LAST_SEGMENT;
1678
1679 - p->des1 = tdes1;
1680 + p->des1 = cpu_to_le32(tdes1);
1681
1682 if (mode == STMMAC_CHAIN_MODE)
1683 norm_set_tx_desc_len_on_chain(p, len);
1684 @@ -220,12 +220,12 @@ static void ndesc_prepare_tx_desc(struct
1685 norm_set_tx_desc_len_on_ring(p, len);
1686
1687 if (tx_own)
1688 - p->des0 |= TDES0_OWN;
1689 + p->des0 |= cpu_to_le32(TDES0_OWN);
1690 }
1691
1692 static void ndesc_set_tx_ic(struct dma_desc *p)
1693 {
1694 - p->des1 |= TDES1_INTERRUPT;
1695 + p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
1696 }
1697
1698 static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
1699 @@ -241,19 +241,20 @@ static int ndesc_get_rx_frame_len(struct
1700 if (rx_coe_type == STMMAC_RX_COE_TYPE1)
1701 csum = 2;
1702
1703 - return (((p->des0 & RDES0_FRAME_LEN_MASK) >> RDES0_FRAME_LEN_SHIFT) -
1704 + return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
1705 + >> RDES0_FRAME_LEN_SHIFT) -
1706 csum);
1707
1708 }
1709
1710 static void ndesc_enable_tx_timestamp(struct dma_desc *p)
1711 {
1712 - p->des1 |= TDES1_TIME_STAMP_ENABLE;
1713 + p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
1714 }
1715
1716 static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
1717 {
1718 - return (p->des0 & TDES0_TIME_STAMP_STATUS) >> 17;
1719 + return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
1720 }
1721
1722 static u64 ndesc_get_timestamp(void *desc, u32 ats)
1723 @@ -261,9 +262,9 @@ static u64 ndesc_get_timestamp(void *des
1724 struct dma_desc *p = (struct dma_desc *)desc;
1725 u64 ns;
1726
1727 - ns = p->des2;
1728 + ns = le32_to_cpu(p->des2);
1729 /* convert high/sec time stamp value to nanosecond */
1730 - ns += p->des3 * 1000000000ULL;
1731 + ns += le32_to_cpu(p->des3) * 1000000000ULL;
1732
1733 return ns;
1734 }
1735 @@ -272,7 +273,8 @@ static int ndesc_get_rx_timestamp_status
1736 {
1737 struct dma_desc *p = (struct dma_desc *)desc;
1738
1739 - if ((p->des2 == 0xffffffff) && (p->des3 == 0xffffffff))
1740 + if ((le32_to_cpu(p->des2) == 0xffffffff) &&
1741 + (le32_to_cpu(p->des3) == 0xffffffff))
1742 /* timestamp is corrupted, hence don't store it */
1743 return 0;
1744 else
1745 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1746 +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
1747 @@ -34,7 +34,7 @@ static int stmmac_jumbo_frm(void *p, str
1748 unsigned int entry = priv->cur_tx;
1749 struct dma_desc *desc;
1750 unsigned int nopaged_len = skb_headlen(skb);
1751 - unsigned int bmax, len;
1752 + unsigned int bmax, len, des2;
1753
1754 if (priv->extend_desc)
1755 desc = (struct dma_desc *)(priv->dma_etx + entry);
1756 @@ -50,16 +50,17 @@ static int stmmac_jumbo_frm(void *p, str
1757
1758 if (nopaged_len > BUF_SIZE_8KiB) {
1759
1760 - desc->des2 = dma_map_single(priv->device, skb->data,
1761 - bmax, DMA_TO_DEVICE);
1762 - if (dma_mapping_error(priv->device, desc->des2))
1763 + des2 = dma_map_single(priv->device, skb->data, bmax,
1764 + DMA_TO_DEVICE);
1765 + desc->des2 = cpu_to_le32(des2);
1766 + if (dma_mapping_error(priv->device, des2))
1767 return -1;
1768
1769 - priv->tx_skbuff_dma[entry].buf = desc->des2;
1770 + priv->tx_skbuff_dma[entry].buf = des2;
1771 priv->tx_skbuff_dma[entry].len = bmax;
1772 priv->tx_skbuff_dma[entry].is_jumbo = true;
1773
1774 - desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1775 + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
1776 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
1777 STMMAC_RING_MODE, 0, false);
1778 priv->tx_skbuff[entry] = NULL;
1779 @@ -70,26 +71,28 @@ static int stmmac_jumbo_frm(void *p, str
1780 else
1781 desc = priv->dma_tx + entry;
1782
1783 - desc->des2 = dma_map_single(priv->device, skb->data + bmax,
1784 - len, DMA_TO_DEVICE);
1785 - if (dma_mapping_error(priv->device, desc->des2))
1786 + des2 = dma_map_single(priv->device, skb->data + bmax, len,
1787 + DMA_TO_DEVICE);
1788 + desc->des2 = cpu_to_le32(des2);
1789 + if (dma_mapping_error(priv->device, des2))
1790 return -1;
1791 - priv->tx_skbuff_dma[entry].buf = desc->des2;
1792 + priv->tx_skbuff_dma[entry].buf = des2;
1793 priv->tx_skbuff_dma[entry].len = len;
1794 priv->tx_skbuff_dma[entry].is_jumbo = true;
1795
1796 - desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1797 + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
1798 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
1799 STMMAC_RING_MODE, 1, true);
1800 } else {
1801 - desc->des2 = dma_map_single(priv->device, skb->data,
1802 - nopaged_len, DMA_TO_DEVICE);
1803 - if (dma_mapping_error(priv->device, desc->des2))
1804 + des2 = dma_map_single(priv->device, skb->data,
1805 + nopaged_len, DMA_TO_DEVICE);
1806 + desc->des2 = cpu_to_le32(des2);
1807 + if (dma_mapping_error(priv->device, des2))
1808 return -1;
1809 - priv->tx_skbuff_dma[entry].buf = desc->des2;
1810 + priv->tx_skbuff_dma[entry].buf = des2;
1811 priv->tx_skbuff_dma[entry].len = nopaged_len;
1812 priv->tx_skbuff_dma[entry].is_jumbo = true;
1813 - desc->des3 = desc->des2 + BUF_SIZE_4KiB;
1814 + desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
1815 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
1816 STMMAC_RING_MODE, 0, true);
1817 }
1818 @@ -115,13 +118,13 @@ static void stmmac_refill_desc3(void *pr
1819
1820 /* Fill DES3 in case of RING mode */
1821 if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
1822 - p->des3 = p->des2 + BUF_SIZE_8KiB;
1823 + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
1824 }
1825
1826 /* In ring mode we need to fill the desc3 because it is used as buffer */
1827 static void stmmac_init_desc3(struct dma_desc *p)
1828 {
1829 - p->des3 = p->des2 + BUF_SIZE_8KiB;
1830 + p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
1831 }
1832
1833 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
1834 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
1835 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
1836 @@ -64,7 +64,6 @@ struct stmmac_priv {
1837 dma_addr_t dma_tx_phy;
1838 int tx_coalesce;
1839 int hwts_tx_en;
1840 - spinlock_t tx_lock;
1841 bool tx_path_in_lpi_mode;
1842 struct timer_list txtimer;
1843 bool tso;
1844 @@ -90,7 +89,6 @@ struct stmmac_priv {
1845 struct mac_device_info *hw;
1846 spinlock_t lock;
1847
1848 - struct phy_device *phydev ____cacheline_aligned_in_smp;
1849 int oldlink;
1850 int speed;
1851 int oldduplex;
1852 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
1853 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
1854 @@ -263,7 +263,7 @@ static void stmmac_ethtool_getdrvinfo(st
1855 {
1856 struct stmmac_priv *priv = netdev_priv(dev);
1857
1858 - if (priv->plat->has_gmac)
1859 + if (priv->plat->has_gmac || priv->plat->has_gmac4)
1860 strlcpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
1861 else
1862 strlcpy(info->driver, MAC100_ETHTOOL_NAME,
1863 @@ -272,25 +272,26 @@ static void stmmac_ethtool_getdrvinfo(st
1864 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
1865 }
1866
1867 -static int stmmac_ethtool_getsettings(struct net_device *dev,
1868 - struct ethtool_cmd *cmd)
1869 +static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
1870 + struct ethtool_link_ksettings *cmd)
1871 {
1872 struct stmmac_priv *priv = netdev_priv(dev);
1873 - struct phy_device *phy = priv->phydev;
1874 + struct phy_device *phy = dev->phydev;
1875 int rc;
1876
1877 if (priv->hw->pcs & STMMAC_PCS_RGMII ||
1878 priv->hw->pcs & STMMAC_PCS_SGMII) {
1879 struct rgmii_adv adv;
1880 + u32 supported, advertising, lp_advertising;
1881
1882 if (!priv->xstats.pcs_link) {
1883 - ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
1884 - cmd->duplex = DUPLEX_UNKNOWN;
1885 + cmd->base.speed = SPEED_UNKNOWN;
1886 + cmd->base.duplex = DUPLEX_UNKNOWN;
1887 return 0;
1888 }
1889 - cmd->duplex = priv->xstats.pcs_duplex;
1890 + cmd->base.duplex = priv->xstats.pcs_duplex;
1891
1892 - ethtool_cmd_speed_set(cmd, priv->xstats.pcs_speed);
1893 + cmd->base.speed = priv->xstats.pcs_speed;
1894
1895 /* Get and convert ADV/LP_ADV from the HW AN registers */
1896 if (!priv->hw->mac->pcs_get_adv_lp)
1897 @@ -300,45 +301,59 @@ static int stmmac_ethtool_getsettings(st
1898
1899 /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
1900
1901 + ethtool_convert_link_mode_to_legacy_u32(
1902 + &supported, cmd->link_modes.supported);
1903 + ethtool_convert_link_mode_to_legacy_u32(
1904 + &advertising, cmd->link_modes.advertising);
1905 + ethtool_convert_link_mode_to_legacy_u32(
1906 + &lp_advertising, cmd->link_modes.lp_advertising);
1907 +
1908 if (adv.pause & STMMAC_PCS_PAUSE)
1909 - cmd->advertising |= ADVERTISED_Pause;
1910 + advertising |= ADVERTISED_Pause;
1911 if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
1912 - cmd->advertising |= ADVERTISED_Asym_Pause;
1913 + advertising |= ADVERTISED_Asym_Pause;
1914 if (adv.lp_pause & STMMAC_PCS_PAUSE)
1915 - cmd->lp_advertising |= ADVERTISED_Pause;
1916 + lp_advertising |= ADVERTISED_Pause;
1917 if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
1918 - cmd->lp_advertising |= ADVERTISED_Asym_Pause;
1919 + lp_advertising |= ADVERTISED_Asym_Pause;
1920
1921 /* Reg49[3] always set because ANE is always supported */
1922 - cmd->autoneg = ADVERTISED_Autoneg;
1923 - cmd->supported |= SUPPORTED_Autoneg;
1924 - cmd->advertising |= ADVERTISED_Autoneg;
1925 - cmd->lp_advertising |= ADVERTISED_Autoneg;
1926 + cmd->base.autoneg = ADVERTISED_Autoneg;
1927 + supported |= SUPPORTED_Autoneg;
1928 + advertising |= ADVERTISED_Autoneg;
1929 + lp_advertising |= ADVERTISED_Autoneg;
1930
1931 if (adv.duplex) {
1932 - cmd->supported |= (SUPPORTED_1000baseT_Full |
1933 - SUPPORTED_100baseT_Full |
1934 - SUPPORTED_10baseT_Full);
1935 - cmd->advertising |= (ADVERTISED_1000baseT_Full |
1936 - ADVERTISED_100baseT_Full |
1937 - ADVERTISED_10baseT_Full);
1938 + supported |= (SUPPORTED_1000baseT_Full |
1939 + SUPPORTED_100baseT_Full |
1940 + SUPPORTED_10baseT_Full);
1941 + advertising |= (ADVERTISED_1000baseT_Full |
1942 + ADVERTISED_100baseT_Full |
1943 + ADVERTISED_10baseT_Full);
1944 } else {
1945 - cmd->supported |= (SUPPORTED_1000baseT_Half |
1946 - SUPPORTED_100baseT_Half |
1947 - SUPPORTED_10baseT_Half);
1948 - cmd->advertising |= (ADVERTISED_1000baseT_Half |
1949 - ADVERTISED_100baseT_Half |
1950 - ADVERTISED_10baseT_Half);
1951 + supported |= (SUPPORTED_1000baseT_Half |
1952 + SUPPORTED_100baseT_Half |
1953 + SUPPORTED_10baseT_Half);
1954 + advertising |= (ADVERTISED_1000baseT_Half |
1955 + ADVERTISED_100baseT_Half |
1956 + ADVERTISED_10baseT_Half);
1957 }
1958 if (adv.lp_duplex)
1959 - cmd->lp_advertising |= (ADVERTISED_1000baseT_Full |
1960 - ADVERTISED_100baseT_Full |
1961 - ADVERTISED_10baseT_Full);
1962 + lp_advertising |= (ADVERTISED_1000baseT_Full |
1963 + ADVERTISED_100baseT_Full |
1964 + ADVERTISED_10baseT_Full);
1965 else
1966 - cmd->lp_advertising |= (ADVERTISED_1000baseT_Half |
1967 - ADVERTISED_100baseT_Half |
1968 - ADVERTISED_10baseT_Half);
1969 - cmd->port = PORT_OTHER;
1970 + lp_advertising |= (ADVERTISED_1000baseT_Half |
1971 + ADVERTISED_100baseT_Half |
1972 + ADVERTISED_10baseT_Half);
1973 + cmd->base.port = PORT_OTHER;
1974 +
1975 + ethtool_convert_legacy_u32_to_link_mode(
1976 + cmd->link_modes.supported, supported);
1977 + ethtool_convert_legacy_u32_to_link_mode(
1978 + cmd->link_modes.advertising, advertising);
1979 + ethtool_convert_legacy_u32_to_link_mode(
1980 + cmd->link_modes.lp_advertising, lp_advertising);
1981
1982 return 0;
1983 }
1984 @@ -353,16 +368,16 @@ static int stmmac_ethtool_getsettings(st
1985 "link speed / duplex setting\n", dev->name);
1986 return -EBUSY;
1987 }
1988 - cmd->transceiver = XCVR_INTERNAL;
1989 - rc = phy_ethtool_gset(phy, cmd);
1990 + rc = phy_ethtool_ksettings_get(phy, cmd);
1991 return rc;
1992 }
1993
1994 -static int stmmac_ethtool_setsettings(struct net_device *dev,
1995 - struct ethtool_cmd *cmd)
1996 +static int
1997 +stmmac_ethtool_set_link_ksettings(struct net_device *dev,
1998 + const struct ethtool_link_ksettings *cmd)
1999 {
2000 struct stmmac_priv *priv = netdev_priv(dev);
2001 - struct phy_device *phy = priv->phydev;
2002 + struct phy_device *phy = dev->phydev;
2003 int rc;
2004
2005 if (priv->hw->pcs & STMMAC_PCS_RGMII ||
2006 @@ -370,7 +385,7 @@ static int stmmac_ethtool_setsettings(st
2007 u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
2008
2009 /* Only support ANE */
2010 - if (cmd->autoneg != AUTONEG_ENABLE)
2011 + if (cmd->base.autoneg != AUTONEG_ENABLE)
2012 return -EINVAL;
2013
2014 mask &= (ADVERTISED_1000baseT_Half |
2015 @@ -391,9 +406,7 @@ static int stmmac_ethtool_setsettings(st
2016 return 0;
2017 }
2018
2019 - spin_lock(&priv->lock);
2020 - rc = phy_ethtool_sset(phy, cmd);
2021 - spin_unlock(&priv->lock);
2022 + rc = phy_ethtool_ksettings_set(phy, cmd);
2023
2024 return rc;
2025 }
2026 @@ -433,7 +446,7 @@ static void stmmac_ethtool_gregs(struct
2027
2028 memset(reg_space, 0x0, REG_SPACE_SIZE);
2029
2030 - if (!priv->plat->has_gmac) {
2031 + if (!(priv->plat->has_gmac || priv->plat->has_gmac4)) {
2032 /* MAC registers */
2033 for (i = 0; i < 12; i++)
2034 reg_space[i] = readl(priv->ioaddr + (i * 4));
2035 @@ -471,12 +484,12 @@ stmmac_get_pauseparam(struct net_device
2036 if (!adv_lp.pause)
2037 return;
2038 } else {
2039 - if (!(priv->phydev->supported & SUPPORTED_Pause) ||
2040 - !(priv->phydev->supported & SUPPORTED_Asym_Pause))
2041 + if (!(netdev->phydev->supported & SUPPORTED_Pause) ||
2042 + !(netdev->phydev->supported & SUPPORTED_Asym_Pause))
2043 return;
2044 }
2045
2046 - pause->autoneg = priv->phydev->autoneg;
2047 + pause->autoneg = netdev->phydev->autoneg;
2048
2049 if (priv->flow_ctrl & FLOW_RX)
2050 pause->rx_pause = 1;
2051 @@ -490,7 +503,7 @@ stmmac_set_pauseparam(struct net_device
2052 struct ethtool_pauseparam *pause)
2053 {
2054 struct stmmac_priv *priv = netdev_priv(netdev);
2055 - struct phy_device *phy = priv->phydev;
2056 + struct phy_device *phy = netdev->phydev;
2057 int new_pause = FLOW_OFF;
2058
2059 if (priv->hw->pcs && priv->hw->mac->pcs_get_adv_lp) {
2060 @@ -550,7 +563,7 @@ static void stmmac_get_ethtool_stats(str
2061 }
2062 }
2063 if (priv->eee_enabled) {
2064 - int val = phy_get_eee_err(priv->phydev);
2065 + int val = phy_get_eee_err(dev->phydev);
2066 if (val)
2067 priv->xstats.phy_eee_wakeup_error_n = val;
2068 }
2069 @@ -669,7 +682,7 @@ static int stmmac_ethtool_op_get_eee(str
2070 edata->eee_active = priv->eee_active;
2071 edata->tx_lpi_timer = priv->tx_lpi_timer;
2072
2073 - return phy_ethtool_get_eee(priv->phydev, edata);
2074 + return phy_ethtool_get_eee(dev->phydev, edata);
2075 }
2076
2077 static int stmmac_ethtool_op_set_eee(struct net_device *dev,
2078 @@ -694,7 +707,7 @@ static int stmmac_ethtool_op_set_eee(str
2079 priv->tx_lpi_timer = edata->tx_lpi_timer;
2080 }
2081
2082 - return phy_ethtool_set_eee(priv->phydev, edata);
2083 + return phy_ethtool_set_eee(dev->phydev, edata);
2084 }
2085
2086 static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
2087 @@ -853,8 +866,6 @@ static int stmmac_set_tunable(struct net
2088 static const struct ethtool_ops stmmac_ethtool_ops = {
2089 .begin = stmmac_check_if_running,
2090 .get_drvinfo = stmmac_ethtool_getdrvinfo,
2091 - .get_settings = stmmac_ethtool_getsettings,
2092 - .set_settings = stmmac_ethtool_setsettings,
2093 .get_msglevel = stmmac_ethtool_getmsglevel,
2094 .set_msglevel = stmmac_ethtool_setmsglevel,
2095 .get_regs = stmmac_ethtool_gregs,
2096 @@ -874,6 +885,8 @@ static const struct ethtool_ops stmmac_e
2097 .set_coalesce = stmmac_set_coalesce,
2098 .get_tunable = stmmac_get_tunable,
2099 .set_tunable = stmmac_set_tunable,
2100 + .get_link_ksettings = stmmac_ethtool_get_link_ksettings,
2101 + .set_link_ksettings = stmmac_ethtool_set_link_ksettings,
2102 };
2103
2104 void stmmac_set_ethtool_ops(struct net_device *netdev)
2105 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2106 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2107 @@ -105,8 +105,8 @@ module_param(eee_timer, int, S_IRUGO | S
2108 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
2109 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
2110
2111 -/* By default the driver will use the ring mode to manage tx and rx descriptors
2112 - * but passing this value so user can force to use the chain instead of the ring
2113 +/* By default the driver will use the ring mode to manage tx and rx descriptors,
2114 + * but allow user to force to use the chain instead of the ring
2115 */
2116 static unsigned int chain_mode;
2117 module_param(chain_mode, int, S_IRUGO);
2118 @@ -221,7 +221,8 @@ static inline u32 stmmac_rx_dirty(struct
2119 */
2120 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
2121 {
2122 - struct phy_device *phydev = priv->phydev;
2123 + struct net_device *ndev = priv->dev;
2124 + struct phy_device *phydev = ndev->phydev;
2125
2126 if (likely(priv->plat->fix_mac_speed))
2127 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
2128 @@ -279,6 +280,7 @@ static void stmmac_eee_ctrl_timer(unsign
2129 */
2130 bool stmmac_eee_init(struct stmmac_priv *priv)
2131 {
2132 + struct net_device *ndev = priv->dev;
2133 unsigned long flags;
2134 int interface = priv->plat->interface;
2135 bool ret = false;
2136 @@ -301,7 +303,7 @@ bool stmmac_eee_init(struct stmmac_priv
2137 int tx_lpi_timer = priv->tx_lpi_timer;
2138
2139 /* Check if the PHY supports EEE */
2140 - if (phy_init_eee(priv->phydev, 1)) {
2141 + if (phy_init_eee(ndev->phydev, 1)) {
2142 /* To manage at run-time if the EEE cannot be supported
2143 * anymore (for example because the lp caps have been
2144 * changed).
2145 @@ -309,7 +311,7 @@ bool stmmac_eee_init(struct stmmac_priv
2146 */
2147 spin_lock_irqsave(&priv->lock, flags);
2148 if (priv->eee_active) {
2149 - pr_debug("stmmac: disable EEE\n");
2150 + netdev_dbg(priv->dev, "disable EEE\n");
2151 del_timer_sync(&priv->eee_ctrl_timer);
2152 priv->hw->mac->set_eee_timer(priv->hw, 0,
2153 tx_lpi_timer);
2154 @@ -333,12 +335,12 @@ bool stmmac_eee_init(struct stmmac_priv
2155 tx_lpi_timer);
2156 }
2157 /* Set HW EEE according to the speed */
2158 - priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
2159 + priv->hw->mac->set_eee_pls(priv->hw, ndev->phydev->link);
2160
2161 ret = true;
2162 spin_unlock_irqrestore(&priv->lock, flags);
2163
2164 - pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
2165 + netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
2166 }
2167 out:
2168 return ret;
2169 @@ -456,8 +458,8 @@ static int stmmac_hwtstamp_ioctl(struct
2170 sizeof(struct hwtstamp_config)))
2171 return -EFAULT;
2172
2173 - pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
2174 - __func__, config.flags, config.tx_type, config.rx_filter);
2175 + netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
2176 + __func__, config.flags, config.tx_type, config.rx_filter);
2177
2178 /* reserved for future extensions */
2179 if (config.flags)
2180 @@ -712,7 +714,7 @@ static void stmmac_release_ptp(struct st
2181 static void stmmac_adjust_link(struct net_device *dev)
2182 {
2183 struct stmmac_priv *priv = netdev_priv(dev);
2184 - struct phy_device *phydev = priv->phydev;
2185 + struct phy_device *phydev = dev->phydev;
2186 unsigned long flags;
2187 int new_state = 0;
2188 unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
2189 @@ -765,9 +767,9 @@ static void stmmac_adjust_link(struct ne
2190 stmmac_hw_fix_mac_speed(priv);
2191 break;
2192 default:
2193 - if (netif_msg_link(priv))
2194 - pr_warn("%s: Speed (%d) not 10/100\n",
2195 - dev->name, phydev->speed);
2196 + netif_warn(priv, link, priv->dev,
2197 + "Speed (%d) not 10/100\n",
2198 + phydev->speed);
2199 break;
2200 }
2201
2202 @@ -820,10 +822,10 @@ static void stmmac_check_pcs_mode(struct
2203 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
2204 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
2205 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
2206 - pr_debug("STMMAC: PCS RGMII support enable\n");
2207 + netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
2208 priv->hw->pcs = STMMAC_PCS_RGMII;
2209 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
2210 - pr_debug("STMMAC: PCS SGMII support enable\n");
2211 + netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
2212 priv->hw->pcs = STMMAC_PCS_SGMII;
2213 }
2214 }
2215 @@ -858,15 +860,15 @@ static int stmmac_init_phy(struct net_de
2216
2217 snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
2218 priv->plat->phy_addr);
2219 - pr_debug("stmmac_init_phy: trying to attach to %s\n",
2220 - phy_id_fmt);
2221 + netdev_dbg(priv->dev, "%s: trying to attach to %s\n", __func__,
2222 + phy_id_fmt);
2223
2224 phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
2225 interface);
2226 }
2227
2228 if (IS_ERR_OR_NULL(phydev)) {
2229 - pr_err("%s: Could not attach to PHY\n", dev->name);
2230 + netdev_err(priv->dev, "Could not attach to PHY\n");
2231 if (!phydev)
2232 return -ENODEV;
2233
2234 @@ -899,10 +901,8 @@ static int stmmac_init_phy(struct net_de
2235 if (phydev->is_pseudo_fixed_link)
2236 phydev->irq = PHY_POLL;
2237
2238 - pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)"
2239 - " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
2240 -
2241 - priv->phydev = phydev;
2242 + netdev_dbg(priv->dev, "%s: attached to PHY (UID 0x%x) Link = %d\n",
2243 + __func__, phydev->phy_id, phydev->link);
2244
2245 return 0;
2246 }
2247 @@ -988,7 +988,8 @@ static int stmmac_init_rx_buffers(struct
2248
2249 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
2250 if (!skb) {
2251 - pr_err("%s: Rx init fails; skb is NULL\n", __func__);
2252 + netdev_err(priv->dev,
2253 + "%s: Rx init fails; skb is NULL\n", __func__);
2254 return -ENOMEM;
2255 }
2256 priv->rx_skbuff[i] = skb;
2257 @@ -996,15 +997,15 @@ static int stmmac_init_rx_buffers(struct
2258 priv->dma_buf_sz,
2259 DMA_FROM_DEVICE);
2260 if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
2261 - pr_err("%s: DMA mapping error\n", __func__);
2262 + netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
2263 dev_kfree_skb_any(skb);
2264 return -EINVAL;
2265 }
2266
2267 if (priv->synopsys_id >= DWMAC_CORE_4_00)
2268 - p->des0 = priv->rx_skbuff_dma[i];
2269 + p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
2270 else
2271 - p->des2 = priv->rx_skbuff_dma[i];
2272 + p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
2273
2274 if ((priv->hw->mode->init_desc3) &&
2275 (priv->dma_buf_sz == BUF_SIZE_16KiB))
2276 @@ -1046,13 +1047,14 @@ static int init_dma_desc_rings(struct ne
2277
2278 priv->dma_buf_sz = bfsize;
2279
2280 - if (netif_msg_probe(priv)) {
2281 - pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
2282 - (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
2283 + netif_dbg(priv, probe, priv->dev,
2284 + "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
2285 + __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
2286 +
2287 + /* RX INITIALIZATION */
2288 + netif_dbg(priv, probe, priv->dev,
2289 + "SKB addresses:\nskb\t\tskb data\tdma data\n");
2290
2291 - /* RX INITIALIZATION */
2292 - pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
2293 - }
2294 for (i = 0; i < DMA_RX_SIZE; i++) {
2295 struct dma_desc *p;
2296 if (priv->extend_desc)
2297 @@ -1064,10 +1066,9 @@ static int init_dma_desc_rings(struct ne
2298 if (ret)
2299 goto err_init_rx_buffers;
2300
2301 - if (netif_msg_probe(priv))
2302 - pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
2303 - priv->rx_skbuff[i]->data,
2304 - (unsigned int)priv->rx_skbuff_dma[i]);
2305 + netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
2306 + priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
2307 + (unsigned int)priv->rx_skbuff_dma[i]);
2308 }
2309 priv->cur_rx = 0;
2310 priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
2311 @@ -1322,7 +1323,7 @@ static void stmmac_tx_clean(struct stmma
2312 unsigned int bytes_compl = 0, pkts_compl = 0;
2313 unsigned int entry = priv->dirty_tx;
2314
2315 - spin_lock(&priv->tx_lock);
2316 + netif_tx_lock(priv->dev);
2317
2318 priv->xstats.tx_clean++;
2319
2320 @@ -1393,22 +1394,17 @@ static void stmmac_tx_clean(struct stmma
2321 netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
2322
2323 if (unlikely(netif_queue_stopped(priv->dev) &&
2324 - stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
2325 - netif_tx_lock(priv->dev);
2326 - if (netif_queue_stopped(priv->dev) &&
2327 - stmmac_tx_avail(priv) > STMMAC_TX_THRESH) {
2328 - if (netif_msg_tx_done(priv))
2329 - pr_debug("%s: restart transmit\n", __func__);
2330 - netif_wake_queue(priv->dev);
2331 - }
2332 - netif_tx_unlock(priv->dev);
2333 + stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
2334 + netif_dbg(priv, tx_done, priv->dev,
2335 + "%s: restart transmit\n", __func__);
2336 + netif_wake_queue(priv->dev);
2337 }
2338
2339 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
2340 stmmac_enable_eee_mode(priv);
2341 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
2342 }
2343 - spin_unlock(&priv->tx_lock);
2344 + netif_tx_unlock(priv->dev);
2345 }
2346
2347 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
2348 @@ -1512,7 +1508,7 @@ static void stmmac_mmc_setup(struct stmm
2349 dwmac_mmc_ctrl(priv->mmcaddr, mode);
2350 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2351 } else
2352 - pr_info(" No MAC Management Counters available\n");
2353 + netdev_info(priv->dev, "No MAC Management Counters available\n");
2354 }
2355
2356 /**
2357 @@ -1525,18 +1521,18 @@ static void stmmac_mmc_setup(struct stmm
2358 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
2359 {
2360 if (priv->plat->enh_desc) {
2361 - pr_info(" Enhanced/Alternate descriptors\n");
2362 + dev_info(priv->device, "Enhanced/Alternate descriptors\n");
2363
2364 /* GMAC older than 3.50 has no extended descriptors */
2365 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
2366 - pr_info("\tEnabled extended descriptors\n");
2367 + dev_info(priv->device, "Enabled extended descriptors\n");
2368 priv->extend_desc = 1;
2369 } else
2370 - pr_warn("Extended descriptors not supported\n");
2371 + dev_warn(priv->device, "Extended descriptors not supported\n");
2372
2373 priv->hw->desc = &enh_desc_ops;
2374 } else {
2375 - pr_info(" Normal descriptors\n");
2376 + dev_info(priv->device, "Normal descriptors\n");
2377 priv->hw->desc = &ndesc_ops;
2378 }
2379 }
2380 @@ -1577,8 +1573,8 @@ static void stmmac_check_ether_addr(stru
2381 priv->dev->dev_addr, 0);
2382 if (!is_valid_ether_addr(priv->dev->dev_addr))
2383 eth_hw_addr_random(priv->dev);
2384 - pr_info("%s: device MAC address %pM\n", priv->dev->name,
2385 - priv->dev->dev_addr);
2386 + netdev_info(priv->dev, "device MAC address %pM\n",
2387 + priv->dev->dev_addr);
2388 }
2389 }
2390
2391 @@ -1592,16 +1588,12 @@ static void stmmac_check_ether_addr(stru
2392 */
2393 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
2394 {
2395 - int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, aal = 0;
2396 - int mixed_burst = 0;
2397 int atds = 0;
2398 int ret = 0;
2399
2400 - if (priv->plat->dma_cfg) {
2401 - pbl = priv->plat->dma_cfg->pbl;
2402 - fixed_burst = priv->plat->dma_cfg->fixed_burst;
2403 - mixed_burst = priv->plat->dma_cfg->mixed_burst;
2404 - aal = priv->plat->dma_cfg->aal;
2405 + if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
2406 + dev_err(priv->device, "Invalid DMA configuration\n");
2407 + return -EINVAL;
2408 }
2409
2410 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
2411 @@ -1613,8 +1605,8 @@ static int stmmac_init_dma_engine(struct
2412 return ret;
2413 }
2414
2415 - priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
2416 - aal, priv->dma_tx_phy, priv->dma_rx_phy, atds);
2417 + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
2418 + priv->dma_tx_phy, priv->dma_rx_phy, atds);
2419
2420 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
2421 priv->rx_tail_addr = priv->dma_rx_phy +
2422 @@ -1686,7 +1678,8 @@ static int stmmac_hw_setup(struct net_de
2423 /* DMA initialization and SW reset */
2424 ret = stmmac_init_dma_engine(priv);
2425 if (ret < 0) {
2426 - pr_err("%s: DMA engine initialization failed\n", __func__);
2427 + netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
2428 + __func__);
2429 return ret;
2430 }
2431
2432 @@ -1715,7 +1708,7 @@ static int stmmac_hw_setup(struct net_de
2433
2434 ret = priv->hw->mac->rx_ipc(priv->hw);
2435 if (!ret) {
2436 - pr_warn(" RX IPC Checksum Offload disabled\n");
2437 + netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
2438 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2439 priv->hw->rx_csum = 0;
2440 }
2441 @@ -1740,10 +1733,11 @@ static int stmmac_hw_setup(struct net_de
2442 #ifdef CONFIG_DEBUG_FS
2443 ret = stmmac_init_fs(dev);
2444 if (ret < 0)
2445 - pr_warn("%s: failed debugFS registration\n", __func__);
2446 + netdev_warn(priv->dev, "%s: failed debugFS registration\n",
2447 + __func__);
2448 #endif
2449 /* Start the ball rolling... */
2450 - pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
2451 + netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
2452 priv->hw->dma->start_tx(priv->ioaddr);
2453 priv->hw->dma->start_rx(priv->ioaddr);
2454
2455 @@ -1798,8 +1792,9 @@ static int stmmac_open(struct net_device
2456 priv->hw->pcs != STMMAC_PCS_RTBI) {
2457 ret = stmmac_init_phy(dev);
2458 if (ret) {
2459 - pr_err("%s: Cannot attach to PHY (error: %d)\n",
2460 - __func__, ret);
2461 + netdev_err(priv->dev,
2462 + "%s: Cannot attach to PHY (error: %d)\n",
2463 + __func__, ret);
2464 return ret;
2465 }
2466 }
2467 @@ -1814,33 +1809,36 @@ static int stmmac_open(struct net_device
2468
2469 ret = alloc_dma_desc_resources(priv);
2470 if (ret < 0) {
2471 - pr_err("%s: DMA descriptors allocation failed\n", __func__);
2472 + netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
2473 + __func__);
2474 goto dma_desc_error;
2475 }
2476
2477 ret = init_dma_desc_rings(dev, GFP_KERNEL);
2478 if (ret < 0) {
2479 - pr_err("%s: DMA descriptors initialization failed\n", __func__);
2480 + netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
2481 + __func__);
2482 goto init_error;
2483 }
2484
2485 ret = stmmac_hw_setup(dev, true);
2486 if (ret < 0) {
2487 - pr_err("%s: Hw setup failed\n", __func__);
2488 + netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
2489 goto init_error;
2490 }
2491
2492 stmmac_init_tx_coalesce(priv);
2493
2494 - if (priv->phydev)
2495 - phy_start(priv->phydev);
2496 + if (dev->phydev)
2497 + phy_start(dev->phydev);
2498
2499 /* Request the IRQ lines */
2500 ret = request_irq(dev->irq, stmmac_interrupt,
2501 IRQF_SHARED, dev->name, dev);
2502 if (unlikely(ret < 0)) {
2503 - pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
2504 - __func__, dev->irq, ret);
2505 + netdev_err(priv->dev,
2506 + "%s: ERROR: allocating the IRQ %d (error: %d)\n",
2507 + __func__, dev->irq, ret);
2508 goto init_error;
2509 }
2510
2511 @@ -1849,8 +1847,9 @@ static int stmmac_open(struct net_device
2512 ret = request_irq(priv->wol_irq, stmmac_interrupt,
2513 IRQF_SHARED, dev->name, dev);
2514 if (unlikely(ret < 0)) {
2515 - pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2516 - __func__, priv->wol_irq, ret);
2517 + netdev_err(priv->dev,
2518 + "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
2519 + __func__, priv->wol_irq, ret);
2520 goto wolirq_error;
2521 }
2522 }
2523 @@ -1860,8 +1859,9 @@ static int stmmac_open(struct net_device
2524 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
2525 dev->name, dev);
2526 if (unlikely(ret < 0)) {
2527 - pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2528 - __func__, priv->lpi_irq, ret);
2529 + netdev_err(priv->dev,
2530 + "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
2531 + __func__, priv->lpi_irq, ret);
2532 goto lpiirq_error;
2533 }
2534 }
2535 @@ -1880,8 +1880,8 @@ wolirq_error:
2536 init_error:
2537 free_dma_desc_resources(priv);
2538 dma_desc_error:
2539 - if (priv->phydev)
2540 - phy_disconnect(priv->phydev);
2541 + if (dev->phydev)
2542 + phy_disconnect(dev->phydev);
2543
2544 return ret;
2545 }
2546 @@ -1900,10 +1900,9 @@ static int stmmac_release(struct net_dev
2547 del_timer_sync(&priv->eee_ctrl_timer);
2548
2549 /* Stop and disconnect the PHY */
2550 - if (priv->phydev) {
2551 - phy_stop(priv->phydev);
2552 - phy_disconnect(priv->phydev);
2553 - priv->phydev = NULL;
2554 + if (dev->phydev) {
2555 + phy_stop(dev->phydev);
2556 + phy_disconnect(dev->phydev);
2557 }
2558
2559 netif_stop_queue(dev);
2560 @@ -1963,13 +1962,13 @@ static void stmmac_tso_allocator(struct
2561 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2562 desc = priv->dma_tx + priv->cur_tx;
2563
2564 - desc->des0 = des + (total_len - tmp_len);
2565 + desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
2566 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
2567 TSO_MAX_BUFF_SIZE : tmp_len;
2568
2569 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
2570 0, 1,
2571 - (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
2572 + (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
2573 0, 0);
2574
2575 tmp_len -= TSO_MAX_BUFF_SIZE;
2576 @@ -2014,8 +2013,6 @@ static netdev_tx_t stmmac_tso_xmit(struc
2577 u8 proto_hdr_len;
2578 int i;
2579
2580 - spin_lock(&priv->tx_lock);
2581 -
2582 /* Compute header lengths */
2583 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2584
2585 @@ -2025,9 +2022,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
2586 if (!netif_queue_stopped(dev)) {
2587 netif_stop_queue(dev);
2588 /* This is a hard error, log it. */
2589 - pr_err("%s: Tx Ring full when queue awake\n", __func__);
2590 + netdev_err(priv->dev,
2591 + "%s: Tx Ring full when queue awake\n",
2592 + __func__);
2593 }
2594 - spin_unlock(&priv->tx_lock);
2595 return NETDEV_TX_BUSY;
2596 }
2597
2598 @@ -2065,11 +2063,11 @@ static netdev_tx_t stmmac_tso_xmit(struc
2599 priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
2600 priv->tx_skbuff[first_entry] = skb;
2601
2602 - first->des0 = des;
2603 + first->des0 = cpu_to_le32(des);
2604
2605 /* Fill start of payload in buff2 of first descriptor */
2606 if (pay_len)
2607 - first->des1 = des + proto_hdr_len;
2608 + first->des1 = cpu_to_le32(des + proto_hdr_len);
2609
2610 /* If needed take extra descriptors to fill the remaining payload */
2611 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
2612 @@ -2098,8 +2096,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
2613 priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
2614
2615 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2616 - if (netif_msg_hw(priv))
2617 - pr_debug("%s: stop transmitted packets\n", __func__);
2618 + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2619 + __func__);
2620 netif_stop_queue(dev);
2621 }
2622
2623 @@ -2143,7 +2141,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
2624 * descriptor and then barrier is needed to make sure that
2625 * all is coherent before granting the DMA engine.
2626 */
2627 - smp_wmb();
2628 + dma_wmb();
2629
2630 if (netif_msg_pktdata(priv)) {
2631 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
2632 @@ -2162,11 +2160,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
2633 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2634 STMMAC_CHAN0);
2635
2636 - spin_unlock(&priv->tx_lock);
2637 return NETDEV_TX_OK;
2638
2639 dma_map_err:
2640 - spin_unlock(&priv->tx_lock);
2641 dev_err(priv->device, "Tx dma map failed\n");
2642 dev_kfree_skb(skb);
2643 priv->dev->stats.tx_dropped++;
2644 @@ -2198,14 +2194,13 @@ static netdev_tx_t stmmac_xmit(struct sk
2645 return stmmac_tso_xmit(skb, dev);
2646 }
2647
2648 - spin_lock(&priv->tx_lock);
2649 -
2650 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
2651 - spin_unlock(&priv->tx_lock);
2652 if (!netif_queue_stopped(dev)) {
2653 netif_stop_queue(dev);
2654 /* This is a hard error, log it. */
2655 - pr_err("%s: Tx Ring full when queue awake\n", __func__);
2656 + netdev_err(priv->dev,
2657 + "%s: Tx Ring full when queue awake\n",
2658 + __func__);
2659 }
2660 return NETDEV_TX_BUSY;
2661 }
2662 @@ -2258,13 +2253,11 @@ static netdev_tx_t stmmac_xmit(struct sk
2663
2664 priv->tx_skbuff[entry] = NULL;
2665
2666 - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2667 - desc->des0 = des;
2668 - priv->tx_skbuff_dma[entry].buf = desc->des0;
2669 - } else {
2670 - desc->des2 = des;
2671 - priv->tx_skbuff_dma[entry].buf = desc->des2;
2672 - }
2673 + priv->tx_skbuff_dma[entry].buf = des;
2674 + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2675 + desc->des0 = cpu_to_le32(des);
2676 + else
2677 + desc->des2 = cpu_to_le32(des);
2678
2679 priv->tx_skbuff_dma[entry].map_as_page = true;
2680 priv->tx_skbuff_dma[entry].len = len;
2681 @@ -2282,9 +2275,10 @@ static netdev_tx_t stmmac_xmit(struct sk
2682 if (netif_msg_pktdata(priv)) {
2683 void *tx_head;
2684
2685 - pr_debug("%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2686 - __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2687 - entry, first, nfrags);
2688 + netdev_dbg(priv->dev,
2689 + "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
2690 + __func__, priv->cur_tx, priv->dirty_tx, first_entry,
2691 + entry, first, nfrags);
2692
2693 if (priv->extend_desc)
2694 tx_head = (void *)priv->dma_etx;
2695 @@ -2293,13 +2287,13 @@ static netdev_tx_t stmmac_xmit(struct sk
2696
2697 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
2698
2699 - pr_debug(">>> frame to be transmitted: ");
2700 + netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
2701 print_pkt(skb->data, skb->len);
2702 }
2703
2704 if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
2705 - if (netif_msg_hw(priv))
2706 - pr_debug("%s: stop transmitted packets\n", __func__);
2707 + netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
2708 + __func__);
2709 netif_stop_queue(dev);
2710 }
2711
2712 @@ -2335,13 +2329,11 @@ static netdev_tx_t stmmac_xmit(struct sk
2713 if (dma_mapping_error(priv->device, des))
2714 goto dma_map_err;
2715
2716 - if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2717 - first->des0 = des;
2718 - priv->tx_skbuff_dma[first_entry].buf = first->des0;
2719 - } else {
2720 - first->des2 = des;
2721 - priv->tx_skbuff_dma[first_entry].buf = first->des2;
2722 - }
2723 + priv->tx_skbuff_dma[first_entry].buf = des;
2724 + if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2725 + first->des0 = cpu_to_le32(des);
2726 + else
2727 + first->des2 = cpu_to_le32(des);
2728
2729 priv->tx_skbuff_dma[first_entry].len = nopaged_len;
2730 priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
2731 @@ -2362,7 +2354,7 @@ static netdev_tx_t stmmac_xmit(struct sk
2732 * descriptor and then barrier is needed to make sure that
2733 * all is coherent before granting the DMA engine.
2734 */
2735 - smp_wmb();
2736 + dma_wmb();
2737 }
2738
2739 netdev_sent_queue(dev, skb->len);
2740 @@ -2373,12 +2365,10 @@ static netdev_tx_t stmmac_xmit(struct sk
2741 priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
2742 STMMAC_CHAN0);
2743
2744 - spin_unlock(&priv->tx_lock);
2745 return NETDEV_TX_OK;
2746
2747 dma_map_err:
2748 - spin_unlock(&priv->tx_lock);
2749 - dev_err(priv->device, "Tx dma map failed\n");
2750 + netdev_err(priv->dev, "Tx DMA map failed\n");
2751 dev_kfree_skb(skb);
2752 priv->dev->stats.tx_dropped++;
2753 return NETDEV_TX_OK;
2754 @@ -2449,16 +2439,16 @@ static inline void stmmac_rx_refill(stru
2755 DMA_FROM_DEVICE);
2756 if (dma_mapping_error(priv->device,
2757 priv->rx_skbuff_dma[entry])) {
2758 - dev_err(priv->device, "Rx dma map failed\n");
2759 + netdev_err(priv->dev, "Rx DMA map failed\n");
2760 dev_kfree_skb(skb);
2761 break;
2762 }
2763
2764 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
2765 - p->des0 = priv->rx_skbuff_dma[entry];
2766 + p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2767 p->des1 = 0;
2768 } else {
2769 - p->des2 = priv->rx_skbuff_dma[entry];
2770 + p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
2771 }
2772 if (priv->hw->mode->refill_desc3)
2773 priv->hw->mode->refill_desc3(priv, p);
2774 @@ -2466,17 +2456,17 @@ static inline void stmmac_rx_refill(stru
2775 if (priv->rx_zeroc_thresh > 0)
2776 priv->rx_zeroc_thresh--;
2777
2778 - if (netif_msg_rx_status(priv))
2779 - pr_debug("\trefill entry #%d\n", entry);
2780 + netif_dbg(priv, rx_status, priv->dev,
2781 + "refill entry #%d\n", entry);
2782 }
2783 - wmb();
2784 + dma_wmb();
2785
2786 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2787 priv->hw->desc->init_rx_desc(p, priv->use_riwt, 0, 0);
2788 else
2789 priv->hw->desc->set_rx_owner(p);
2790
2791 - wmb();
2792 + dma_wmb();
2793
2794 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
2795 }
2796 @@ -2500,7 +2490,7 @@ static int stmmac_rx(struct stmmac_priv
2797 if (netif_msg_rx_status(priv)) {
2798 void *rx_head;
2799
2800 - pr_info(">>>>>> %s: descriptor ring:\n", __func__);
2801 + netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
2802 if (priv->extend_desc)
2803 rx_head = (void *)priv->dma_erx;
2804 else
2805 @@ -2562,9 +2552,9 @@ static int stmmac_rx(struct stmmac_priv
2806 unsigned int des;
2807
2808 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
2809 - des = p->des0;
2810 + des = le32_to_cpu(p->des0);
2811 else
2812 - des = p->des2;
2813 + des = le32_to_cpu(p->des2);
2814
2815 frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2816
2817 @@ -2573,9 +2563,9 @@ static int stmmac_rx(struct stmmac_priv
2818 * ignored
2819 */
2820 if (frame_len > priv->dma_buf_sz) {
2821 - pr_err("%s: len %d larger than size (%d)\n",
2822 - priv->dev->name, frame_len,
2823 - priv->dma_buf_sz);
2824 + netdev_err(priv->dev,
2825 + "len %d larger than size (%d)\n",
2826 + frame_len, priv->dma_buf_sz);
2827 priv->dev->stats.rx_length_errors++;
2828 break;
2829 }
2830 @@ -2587,11 +2577,11 @@ static int stmmac_rx(struct stmmac_priv
2831 frame_len -= ETH_FCS_LEN;
2832
2833 if (netif_msg_rx_status(priv)) {
2834 - pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
2835 - p, entry, des);
2836 + netdev_dbg(priv->dev, "\tdesc: %p [entry %d] buff=0x%x\n",
2837 + p, entry, des);
2838 if (frame_len > ETH_FRAME_LEN)
2839 - pr_debug("\tframe size %d, COE: %d\n",
2840 - frame_len, status);
2841 + netdev_dbg(priv->dev, "frame size %d, COE: %d\n",
2842 + frame_len, status);
2843 }
2844
2845 /* The zero-copy is always used for all the sizes
2846 @@ -2628,8 +2618,9 @@ static int stmmac_rx(struct stmmac_priv
2847 } else {
2848 skb = priv->rx_skbuff[entry];
2849 if (unlikely(!skb)) {
2850 - pr_err("%s: Inconsistent Rx chain\n",
2851 - priv->dev->name);
2852 + netdev_err(priv->dev,
2853 + "%s: Inconsistent Rx chain\n",
2854 + priv->dev->name);
2855 priv->dev->stats.rx_dropped++;
2856 break;
2857 }
2858 @@ -2645,7 +2636,8 @@ static int stmmac_rx(struct stmmac_priv
2859 }
2860
2861 if (netif_msg_pktdata(priv)) {
2862 - pr_debug("frame received (%dbytes)", frame_len);
2863 + netdev_dbg(priv->dev, "frame received (%dbytes)",
2864 + frame_len);
2865 print_pkt(skb->data, frame_len);
2866 }
2867
2868 @@ -2748,7 +2740,7 @@ static int stmmac_change_mtu(struct net_
2869 int max_mtu;
2870
2871 if (netif_running(dev)) {
2872 - pr_err("%s: must be stopped to change its MTU\n", dev->name);
2873 + netdev_err(priv->dev, "must be stopped to change its MTU\n");
2874 return -EBUSY;
2875 }
2876
2877 @@ -2840,7 +2832,7 @@ static irqreturn_t stmmac_interrupt(int
2878 pm_wakeup_event(priv->device, 0);
2879
2880 if (unlikely(!dev)) {
2881 - pr_err("%s: invalid dev pointer\n", __func__);
2882 + netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
2883 return IRQ_NONE;
2884 }
2885
2886 @@ -2898,7 +2890,6 @@ static void stmmac_poll_controller(struc
2887 */
2888 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2889 {
2890 - struct stmmac_priv *priv = netdev_priv(dev);
2891 int ret = -EOPNOTSUPP;
2892
2893 if (!netif_running(dev))
2894 @@ -2908,9 +2899,9 @@ static int stmmac_ioctl(struct net_devic
2895 case SIOCGMIIPHY:
2896 case SIOCGMIIREG:
2897 case SIOCSMIIREG:
2898 - if (!priv->phydev)
2899 + if (!dev->phydev)
2900 return -EINVAL;
2901 - ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2902 + ret = phy_mii_ioctl(dev->phydev, rq, cmd);
2903 break;
2904 case SIOCSHWTSTAMP:
2905 ret = stmmac_hwtstamp_ioctl(dev, rq);
2906 @@ -2938,14 +2929,17 @@ static void sysfs_display_ring(void *hea
2907 x = *(u64 *) ep;
2908 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2909 i, (unsigned int)virt_to_phys(ep),
2910 - ep->basic.des0, ep->basic.des1,
2911 - ep->basic.des2, ep->basic.des3);
2912 + le32_to_cpu(ep->basic.des0),
2913 + le32_to_cpu(ep->basic.des1),
2914 + le32_to_cpu(ep->basic.des2),
2915 + le32_to_cpu(ep->basic.des3));
2916 ep++;
2917 } else {
2918 x = *(u64 *) p;
2919 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2920 i, (unsigned int)virt_to_phys(ep),
2921 - p->des0, p->des1, p->des2, p->des3);
2922 + le32_to_cpu(p->des0), le32_to_cpu(p->des1),
2923 + le32_to_cpu(p->des2), le32_to_cpu(p->des3));
2924 p++;
2925 }
2926 seq_printf(seq, "\n");
2927 @@ -2977,6 +2971,8 @@ static int stmmac_sysfs_ring_open(struct
2928 return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2929 }
2930
2931 +/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */
2932 +
2933 static const struct file_operations stmmac_rings_status_fops = {
2934 .owner = THIS_MODULE,
2935 .open = stmmac_sysfs_ring_open,
2936 @@ -2999,11 +2995,11 @@ static int stmmac_sysfs_dma_cap_read(str
2937 seq_printf(seq, "\tDMA HW features\n");
2938 seq_printf(seq, "==============================\n");
2939
2940 - seq_printf(seq, "\t10/100 Mbps %s\n",
2941 + seq_printf(seq, "\t10/100 Mbps: %s\n",
2942 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2943 - seq_printf(seq, "\t1000 Mbps %s\n",
2944 + seq_printf(seq, "\t1000 Mbps: %s\n",
2945 (priv->dma_cap.mbps_1000) ? "Y" : "N");
2946 - seq_printf(seq, "\tHalf duple %s\n",
2947 + seq_printf(seq, "\tHalf duplex: %s\n",
2948 (priv->dma_cap.half_duplex) ? "Y" : "N");
2949 seq_printf(seq, "\tHash Filter: %s\n",
2950 (priv->dma_cap.hash_filter) ? "Y" : "N");
2951 @@ -3021,9 +3017,9 @@ static int stmmac_sysfs_dma_cap_read(str
2952 (priv->dma_cap.rmon) ? "Y" : "N");
2953 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2954 (priv->dma_cap.time_stamp) ? "Y" : "N");
2955 - seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
2956 + seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
2957 (priv->dma_cap.atime_stamp) ? "Y" : "N");
2958 - seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
2959 + seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
2960 (priv->dma_cap.eee) ? "Y" : "N");
2961 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2962 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2963 @@ -3070,8 +3066,7 @@ static int stmmac_init_fs(struct net_dev
2964 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
2965
2966 if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
2967 - pr_err("ERROR %s/%s, debugfs create directory failed\n",
2968 - STMMAC_RESOURCE_NAME, dev->name);
2969 + netdev_err(priv->dev, "ERROR failed to create debugfs directory\n");
2970
2971 return -ENOMEM;
2972 }
2973 @@ -3083,7 +3078,7 @@ static int stmmac_init_fs(struct net_dev
2974 &stmmac_rings_status_fops);
2975
2976 if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
2977 - pr_info("ERROR creating stmmac ring debugfs file\n");
2978 + netdev_err(priv->dev, "ERROR creating stmmac ring debugfs file\n");
2979 debugfs_remove_recursive(priv->dbgfs_dir);
2980
2981 return -ENOMEM;
2982 @@ -3095,7 +3090,7 @@ static int stmmac_init_fs(struct net_dev
2983 dev, &stmmac_dma_cap_fops);
2984
2985 if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
2986 - pr_info("ERROR creating stmmac MMC debugfs file\n");
2987 + netdev_err(priv->dev, "ERROR creating stmmac MMC debugfs file\n");
2988 debugfs_remove_recursive(priv->dbgfs_dir);
2989
2990 return -ENOMEM;
2991 @@ -3167,11 +3162,11 @@ static int stmmac_hw_init(struct stmmac_
2992 } else {
2993 if (chain_mode) {
2994 priv->hw->mode = &chain_mode_ops;
2995 - pr_info(" Chain mode enabled\n");
2996 + dev_info(priv->device, "Chain mode enabled\n");
2997 priv->mode = STMMAC_CHAIN_MODE;
2998 } else {
2999 priv->hw->mode = &ring_mode_ops;
3000 - pr_info(" Ring mode enabled\n");
3001 + dev_info(priv->device, "Ring mode enabled\n");
3002 priv->mode = STMMAC_RING_MODE;
3003 }
3004 }
3005 @@ -3179,7 +3174,7 @@ static int stmmac_hw_init(struct stmmac_
3006 /* Get the HW capability (new GMAC newer than 3.50a) */
3007 priv->hw_cap_support = stmmac_get_hw_features(priv);
3008 if (priv->hw_cap_support) {
3009 - pr_info(" DMA HW capability register supported");
3010 + dev_info(priv->device, "DMA HW capability register supported\n");
3011
3012 /* We can override some gmac/dma configuration fields: e.g.
3013 * enh_desc, tx_coe (e.g. that are passed through the
3014 @@ -3204,8 +3199,9 @@ static int stmmac_hw_init(struct stmmac_
3015 else if (priv->dma_cap.rx_coe_type1)
3016 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
3017
3018 - } else
3019 - pr_info(" No HW DMA feature register supported");
3020 + } else {
3021 + dev_info(priv->device, "No HW DMA feature register supported\n");
3022 + }
3023
3024 /* To use alternate (extended), normal or GMAC4 descriptor structures */
3025 if (priv->synopsys_id >= DWMAC_CORE_4_00)
3026 @@ -3215,20 +3211,20 @@ static int stmmac_hw_init(struct stmmac_
3027
3028 if (priv->plat->rx_coe) {
3029 priv->hw->rx_csum = priv->plat->rx_coe;
3030 - pr_info(" RX Checksum Offload Engine supported\n");
3031 + dev_info(priv->device, "RX Checksum Offload Engine supported\n");
3032 if (priv->synopsys_id < DWMAC_CORE_4_00)
3033 - pr_info("\tCOE Type %d\n", priv->hw->rx_csum);
3034 + dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
3035 }
3036 if (priv->plat->tx_coe)
3037 - pr_info(" TX Checksum insertion supported\n");
3038 + dev_info(priv->device, "TX Checksum insertion supported\n");
3039
3040 if (priv->plat->pmt) {
3041 - pr_info(" Wake-Up On Lan supported\n");
3042 + dev_info(priv->device, "Wake-Up On Lan supported\n");
3043 device_set_wakeup_capable(priv->device, 1);
3044 }
3045
3046 if (priv->dma_cap.tsoen)
3047 - pr_info(" TSO supported\n");
3048 + dev_info(priv->device, "TSO supported\n");
3049
3050 return 0;
3051 }
3052 @@ -3287,8 +3283,8 @@ int stmmac_dvr_probe(struct device *devi
3053
3054 priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
3055 if (IS_ERR(priv->stmmac_clk)) {
3056 - dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
3057 - __func__);
3058 + netdev_warn(priv->dev, "%s: warning: cannot get CSR clock\n",
3059 + __func__);
3060 /* If failed to obtain stmmac_clk and specific clk_csr value
3061 * is NOT passed from the platform, probe fail.
3062 */
3063 @@ -3337,7 +3333,7 @@ int stmmac_dvr_probe(struct device *devi
3064 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
3065 ndev->hw_features |= NETIF_F_TSO;
3066 priv->tso = true;
3067 - pr_info(" TSO feature enabled\n");
3068 + dev_info(priv->device, "TSO feature enabled\n");
3069 }
3070 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
3071 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
3072 @@ -3357,13 +3353,13 @@ int stmmac_dvr_probe(struct device *devi
3073 */
3074 if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
3075 priv->use_riwt = 1;
3076 - pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
3077 + dev_info(priv->device,
3078 + "Enable RX Mitigation via HW Watchdog Timer\n");
3079 }
3080
3081 netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
3082
3083 spin_lock_init(&priv->lock);
3084 - spin_lock_init(&priv->tx_lock);
3085
3086 /* If a specific clk_csr value is passed from the platform
3087 * this means that the CSR Clock Range selection cannot be
3088 @@ -3384,15 +3380,17 @@ int stmmac_dvr_probe(struct device *devi
3089 /* MDIO bus Registration */
3090 ret = stmmac_mdio_register(ndev);
3091 if (ret < 0) {
3092 - pr_debug("%s: MDIO bus (id: %d) registration failed",
3093 - __func__, priv->plat->bus_id);
3094 - goto error_napi_register;
3095 + dev_err(priv->device,
3096 + "%s: MDIO bus (id: %d) registration failed",
3097 + __func__, priv->plat->bus_id);
3098 + goto error_mdio_register;
3099 }
3100 }
3101
3102 ret = register_netdev(ndev);
3103 if (ret) {
3104 - pr_err("%s: ERROR %i registering the device\n", __func__, ret);
3105 + dev_err(priv->device, "%s: ERROR %i registering the device\n",
3106 + __func__, ret);
3107 goto error_netdev_register;
3108 }
3109
3110 @@ -3403,7 +3401,7 @@ error_netdev_register:
3111 priv->hw->pcs != STMMAC_PCS_TBI &&
3112 priv->hw->pcs != STMMAC_PCS_RTBI)
3113 stmmac_mdio_unregister(ndev);
3114 -error_napi_register:
3115 +error_mdio_register:
3116 netif_napi_del(&priv->napi);
3117 error_hw_init:
3118 clk_disable_unprepare(priv->pclk);
3119 @@ -3427,7 +3425,7 @@ int stmmac_dvr_remove(struct device *dev
3120 struct net_device *ndev = dev_get_drvdata(dev);
3121 struct stmmac_priv *priv = netdev_priv(ndev);
3122
3123 - pr_info("%s:\n\tremoving driver", __func__);
3124 + netdev_info(priv->dev, "%s: removing driver", __func__);
3125
3126 priv->hw->dma->stop_rx(priv->ioaddr);
3127 priv->hw->dma->stop_tx(priv->ioaddr);
3128 @@ -3465,8 +3463,8 @@ int stmmac_suspend(struct device *dev)
3129 if (!ndev || !netif_running(ndev))
3130 return 0;
3131
3132 - if (priv->phydev)
3133 - phy_stop(priv->phydev);
3134 + if (ndev->phydev)
3135 + phy_stop(ndev->phydev);
3136
3137 spin_lock_irqsave(&priv->lock, flags);
3138
3139 @@ -3560,8 +3558,8 @@ int stmmac_resume(struct device *dev)
3140
3141 spin_unlock_irqrestore(&priv->lock, flags);
3142
3143 - if (priv->phydev)
3144 - phy_start(priv->phydev);
3145 + if (ndev->phydev)
3146 + phy_start(ndev->phydev);
3147
3148 return 0;
3149 }
3150 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
3151 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
3152 @@ -42,13 +42,6 @@
3153 #define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
3154 #define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
3155
3156 -#define MII_PHY_ADDR_GMAC4_SHIFT 21
3157 -#define MII_PHY_ADDR_GMAC4_MASK GENMASK(25, 21)
3158 -#define MII_PHY_REG_GMAC4_SHIFT 16
3159 -#define MII_PHY_REG_GMAC4_MASK GENMASK(20, 16)
3160 -#define MII_CSR_CLK_GMAC4_SHIFT 8
3161 -#define MII_CSR_CLK_GMAC4_MASK GENMASK(11, 8)
3162 -
3163 static int stmmac_mdio_busy_wait(void __iomem *ioaddr, unsigned int mii_addr)
3164 {
3165 unsigned long curr;
3166 @@ -68,8 +61,8 @@ static int stmmac_mdio_busy_wait(void __
3167 /**
3168 * stmmac_mdio_read
3169 * @bus: points to the mii_bus structure
3170 - * @phyaddr: MII addr reg bits 15-11
3171 - * @phyreg: MII addr reg bits 10-6
3172 + * @phyaddr: MII addr
3173 + * @phyreg: MII reg
3174 * Description: it reads data from the MII register from within the phy device.
3175 * For the 7111 GMAC, we must set the bit 0 in the MII address register while
3176 * accessing the PHY registers.
3177 @@ -83,14 +76,20 @@ static int stmmac_mdio_read(struct mii_b
3178 unsigned int mii_data = priv->hw->mii.data;
3179
3180 int data;
3181 - u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
3182 - ((phyreg << 6) & (0x000007C0)));
3183 - regValue |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
3184 + u32 value = MII_BUSY;
3185 +
3186 + value |= (phyaddr << priv->hw->mii.addr_shift)
3187 + & priv->hw->mii.addr_mask;
3188 + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
3189 + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
3190 + & priv->hw->mii.clk_csr_mask;
3191 + if (priv->plat->has_gmac4)
3192 + value |= MII_GMAC4_READ;
3193
3194 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
3195 return -EBUSY;
3196
3197 - writel(regValue, priv->ioaddr + mii_address);
3198 + writel(value, priv->ioaddr + mii_address);
3199
3200 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
3201 return -EBUSY;
3202 @@ -104,8 +103,8 @@ static int stmmac_mdio_read(struct mii_b
3203 /**
3204 * stmmac_mdio_write
3205 * @bus: points to the mii_bus structure
3206 - * @phyaddr: MII addr reg bits 15-11
3207 - * @phyreg: MII addr reg bits 10-6
3208 + * @phyaddr: MII addr
3209 + * @phyreg: MII reg
3210 * @phydata: phy data
3211 * Description: it writes the data into the MII register from within the device.
3212 */
3213 @@ -117,85 +116,18 @@ static int stmmac_mdio_write(struct mii_
3214 unsigned int mii_address = priv->hw->mii.addr;
3215 unsigned int mii_data = priv->hw->mii.data;
3216
3217 - u16 value =
3218 - (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
3219 - | MII_WRITE;
3220 -
3221 - value |= MII_BUSY | ((priv->clk_csr & 0xF) << 2);
3222 -
3223 - /* Wait until any existing MII operation is complete */
3224 - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
3225 - return -EBUSY;
3226 -
3227 - /* Set the MII address register to write */
3228 - writel(phydata, priv->ioaddr + mii_data);
3229 - writel(value, priv->ioaddr + mii_address);
3230 -
3231 - /* Wait until any existing MII operation is complete */
3232 - return stmmac_mdio_busy_wait(priv->ioaddr, mii_address);
3233 -}
3234 -
3235 -/**
3236 - * stmmac_mdio_read_gmac4
3237 - * @bus: points to the mii_bus structure
3238 - * @phyaddr: MII addr reg bits 25-21
3239 - * @phyreg: MII addr reg bits 20-16
3240 - * Description: it reads data from the MII register of GMAC4 from within
3241 - * the phy device.
3242 - */
3243 -static int stmmac_mdio_read_gmac4(struct mii_bus *bus, int phyaddr, int phyreg)
3244 -{
3245 - struct net_device *ndev = bus->priv;
3246 - struct stmmac_priv *priv = netdev_priv(ndev);
3247 - unsigned int mii_address = priv->hw->mii.addr;
3248 - unsigned int mii_data = priv->hw->mii.data;
3249 - int data;
3250 - u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
3251 - (MII_PHY_ADDR_GMAC4_MASK)) |
3252 - ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
3253 - (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_READ;
3254 -
3255 - value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
3256 - << MII_CSR_CLK_GMAC4_SHIFT);
3257 -
3258 - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
3259 - return -EBUSY;
3260 -
3261 - writel(value, priv->ioaddr + mii_address);
3262 -
3263 - if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
3264 - return -EBUSY;
3265 -
3266 - /* Read the data from the MII data register */
3267 - data = (int)readl(priv->ioaddr + mii_data);
3268 -
3269 - return data;
3270 -}
3271 -
3272 -/**
3273 - * stmmac_mdio_write_gmac4
3274 - * @bus: points to the mii_bus structure
3275 - * @phyaddr: MII addr reg bits 25-21
3276 - * @phyreg: MII addr reg bits 20-16
3277 - * @phydata: phy data
3278 - * Description: it writes the data into the MII register of GMAC4 from within
3279 - * the device.
3280 - */
3281 -static int stmmac_mdio_write_gmac4(struct mii_bus *bus, int phyaddr, int phyreg,
3282 - u16 phydata)
3283 -{
3284 - struct net_device *ndev = bus->priv;
3285 - struct stmmac_priv *priv = netdev_priv(ndev);
3286 - unsigned int mii_address = priv->hw->mii.addr;
3287 - unsigned int mii_data = priv->hw->mii.data;
3288 -
3289 - u32 value = (((phyaddr << MII_PHY_ADDR_GMAC4_SHIFT) &
3290 - (MII_PHY_ADDR_GMAC4_MASK)) |
3291 - ((phyreg << MII_PHY_REG_GMAC4_SHIFT) &
3292 - (MII_PHY_REG_GMAC4_MASK))) | MII_GMAC4_WRITE;
3293 + u32 value = MII_BUSY;
3294
3295 - value |= MII_BUSY | ((priv->clk_csr & MII_CSR_CLK_GMAC4_MASK)
3296 - << MII_CSR_CLK_GMAC4_SHIFT);
3297 + value |= (phyaddr << priv->hw->mii.addr_shift)
3298 + & priv->hw->mii.addr_mask;
3299 + value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
3300 +
3301 + value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
3302 + & priv->hw->mii.clk_csr_mask;
3303 + if (priv->plat->has_gmac4)
3304 + value |= MII_GMAC4_WRITE;
3305 + else
3306 + value |= MII_WRITE;
3307
3308 /* Wait until any existing MII operation is complete */
3309 if (stmmac_mdio_busy_wait(priv->ioaddr, mii_address))
3310 @@ -260,7 +192,7 @@ int stmmac_mdio_reset(struct mii_bus *bu
3311 #endif
3312
3313 if (data->phy_reset) {
3314 - pr_debug("stmmac_mdio_reset: calling phy_reset\n");
3315 + netdev_dbg(ndev, "stmmac_mdio_reset: calling phy_reset\n");
3316 data->phy_reset(priv->plat->bsp_priv);
3317 }
3318
3319 @@ -305,13 +237,8 @@ int stmmac_mdio_register(struct net_devi
3320 #endif
3321
3322 new_bus->name = "stmmac";
3323 - if (priv->plat->has_gmac4) {
3324 - new_bus->read = &stmmac_mdio_read_gmac4;
3325 - new_bus->write = &stmmac_mdio_write_gmac4;
3326 - } else {
3327 - new_bus->read = &stmmac_mdio_read;
3328 - new_bus->write = &stmmac_mdio_write;
3329 - }
3330 + new_bus->read = &stmmac_mdio_read;
3331 + new_bus->write = &stmmac_mdio_write;
3332
3333 new_bus->reset = &stmmac_mdio_reset;
3334 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
3335 @@ -325,7 +252,7 @@ int stmmac_mdio_register(struct net_devi
3336 else
3337 err = mdiobus_register(new_bus);
3338 if (err != 0) {
3339 - pr_err("%s: Cannot register as MDIO bus\n", new_bus->name);
3340 + netdev_err(ndev, "Cannot register the MDIO bus\n");
3341 goto bus_register_fail;
3342 }
3343
3344 @@ -372,16 +299,16 @@ int stmmac_mdio_register(struct net_devi
3345 irq_str = irq_num;
3346 break;
3347 }
3348 - pr_info("%s: PHY ID %08x at %d IRQ %s (%s)%s\n",
3349 - ndev->name, phydev->phy_id, addr,
3350 - irq_str, phydev_name(phydev),
3351 - act ? " active" : "");
3352 + netdev_info(ndev, "PHY ID %08x at %d IRQ %s (%s)%s\n",
3353 + phydev->phy_id, addr,
3354 + irq_str, phydev_name(phydev),
3355 + act ? " active" : "");
3356 found = 1;
3357 }
3358 }
3359
3360 if (!found && !mdio_node) {
3361 - pr_warn("%s: No PHY found\n", ndev->name);
3362 + netdev_warn(ndev, "No PHY found\n");
3363 mdiobus_unregister(new_bus);
3364 mdiobus_free(new_bus);
3365 return -ENODEV;
3366 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
3367 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
3368 @@ -81,6 +81,7 @@ static void stmmac_default_data(struct p
3369 plat->mdio_bus_data->phy_mask = 0;
3370
3371 plat->dma_cfg->pbl = 32;
3372 + plat->dma_cfg->pblx8 = true;
3373 /* TODO: AXI */
3374
3375 /* Set default value for multicast hash bins */
3376 @@ -88,6 +89,9 @@ static void stmmac_default_data(struct p
3377
3378 /* Set default value for unicast filter entries */
3379 plat->unicast_filter_entries = 1;
3380 +
3381 + /* Set the maxmtu to a default of JUMBO_LEN */
3382 + plat->maxmtu = JUMBO_LEN;
3383 }
3384
3385 static int quark_default_data(struct plat_stmmacenet_data *plat,
3386 @@ -115,6 +119,7 @@ static int quark_default_data(struct pla
3387 plat->mdio_bus_data->phy_mask = 0;
3388
3389 plat->dma_cfg->pbl = 16;
3390 + plat->dma_cfg->pblx8 = true;
3391 plat->dma_cfg->fixed_burst = 1;
3392 /* AXI (TODO) */
3393
3394 @@ -124,6 +129,9 @@ static int quark_default_data(struct pla
3395 /* Set default value for unicast filter entries */
3396 plat->unicast_filter_entries = 1;
3397
3398 + /* Set the maxmtu to a default of JUMBO_LEN */
3399 + plat->maxmtu = JUMBO_LEN;
3400 +
3401 return 0;
3402 }
3403
3404 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
3405 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
3406 @@ -292,6 +292,7 @@ stmmac_probe_config_dt(struct platform_d
3407 if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
3408 of_device_is_compatible(np, "snps,dwmac-4.10a")) {
3409 plat->has_gmac4 = 1;
3410 + plat->has_gmac = 0;
3411 plat->pmt = 1;
3412 plat->tso_en = of_property_read_bool(np, "snps,tso");
3413 }
3414 @@ -303,21 +304,25 @@ stmmac_probe_config_dt(struct platform_d
3415 plat->force_sf_dma_mode = 1;
3416 }
3417
3418 - if (of_find_property(np, "snps,pbl", NULL)) {
3419 - dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
3420 - GFP_KERNEL);
3421 - if (!dma_cfg) {
3422 - stmmac_remove_config_dt(pdev, plat);
3423 - return ERR_PTR(-ENOMEM);
3424 - }
3425 - plat->dma_cfg = dma_cfg;
3426 - of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
3427 - dma_cfg->aal = of_property_read_bool(np, "snps,aal");
3428 - dma_cfg->fixed_burst =
3429 - of_property_read_bool(np, "snps,fixed-burst");
3430 - dma_cfg->mixed_burst =
3431 - of_property_read_bool(np, "snps,mixed-burst");
3432 - }
3433 + dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
3434 + GFP_KERNEL);
3435 + if (!dma_cfg) {
3436 + stmmac_remove_config_dt(pdev, plat);
3437 + return ERR_PTR(-ENOMEM);
3438 + }
3439 + plat->dma_cfg = dma_cfg;
3440 +
3441 + of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
3442 + if (!dma_cfg->pbl)
3443 + dma_cfg->pbl = DEFAULT_DMA_PBL;
3444 + of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
3445 + of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
3446 + dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
3447 +
3448 + dma_cfg->aal = of_property_read_bool(np, "snps,aal");
3449 + dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
3450 + dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
3451 +
3452 plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
3453 if (plat->force_thresh_dma_mode) {
3454 plat->force_sf_dma_mode = 0;
3455 @@ -445,9 +450,7 @@ static int stmmac_pltfr_suspend(struct d
3456 struct platform_device *pdev = to_platform_device(dev);
3457
3458 ret = stmmac_suspend(dev);
3459 - if (priv->plat->suspend)
3460 - priv->plat->suspend(pdev, priv->plat->bsp_priv);
3461 - else if (priv->plat->exit)
3462 + if (priv->plat->exit)
3463 priv->plat->exit(pdev, priv->plat->bsp_priv);
3464
3465 return ret;
3466 @@ -466,9 +469,7 @@ static int stmmac_pltfr_resume(struct de
3467 struct stmmac_priv *priv = netdev_priv(ndev);
3468 struct platform_device *pdev = to_platform_device(dev);
3469
3470 - if (priv->plat->resume)
3471 - priv->plat->resume(pdev, priv->plat->bsp_priv);
3472 - else if (priv->plat->init)
3473 + if (priv->plat->init)
3474 priv->plat->init(pdev, priv->plat->bsp_priv);
3475
3476 return stmmac_resume(dev);
3477 --- a/include/linux/stmmac.h
3478 +++ b/include/linux/stmmac.h
3479 @@ -88,6 +88,9 @@ struct stmmac_mdio_bus_data {
3480
3481 struct stmmac_dma_cfg {
3482 int pbl;
3483 + int txpbl;
3484 + int rxpbl;
3485 + bool pblx8;
3486 int fixed_burst;
3487 int mixed_burst;
3488 bool aal;
3489 @@ -135,8 +138,6 @@ struct plat_stmmacenet_data {
3490 void (*bus_setup)(void __iomem *ioaddr);
3491 int (*init)(struct platform_device *pdev, void *priv);
3492 void (*exit)(struct platform_device *pdev, void *priv);
3493 - void (*suspend)(struct platform_device *pdev, void *priv);
3494 - void (*resume)(struct platform_device *pdev, void *priv);
3495 void *bsp_priv;
3496 struct stmmac_axi *axi;
3497 int has_gmac4;