kernel: bump 4.9 to 4.9.63
[openwrt/openwrt.git] / target / linux / sunxi / patches-4.9 / 0052-stmmac-form-4-12.patch
1 --- a/Documentation/devicetree/bindings/net/stmmac.txt
2 +++ b/Documentation/devicetree/bindings/net/stmmac.txt
3 @@ -7,9 +7,12 @@ Required properties:
4 - interrupt-parent: Should be the phandle for the interrupt controller
5 that services interrupts for this device
6 - interrupts: Should contain the STMMAC interrupts
7 -- interrupt-names: Should contain the interrupt names "macirq"
8 - "eth_wake_irq" if this interrupt is supported in the "interrupts"
9 - property
10 +- interrupt-names: Should contain a list of interrupt names corresponding to
11 + the interrupts in the interrupts property, if available.
12 + Valid interrupt names are:
13 + - "macirq" (combined signal for various interrupt events)
14 + - "eth_wake_irq" (the interrupt to manage the remote wake-up packet detection)
15 + - "eth_lpi" (the interrupt that occurs when Tx or Rx enters/exits LPI state)
16 - phy-mode: See ethernet.txt file in the same directory.
17 - snps,reset-gpio gpio number for phy reset.
18 - snps,reset-active-low boolean flag to indicate if phy reset is active low.
19 @@ -28,9 +31,9 @@ Optional properties:
20 clocks may be specified in derived bindings.
21 - clock-names: One name for each entry in the clocks property, the
22 first one should be "stmmaceth" and the second one should be "pclk".
23 -- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
24 - available this clock is used for programming the Timestamp Addend Register.
25 - If not passed then the system clock will be used and this is fine on some
26 +- ptp_ref: this is the PTP reference clock; in case of the PTP is available
27 + this clock is used for programming the Timestamp Addend Register. If not
28 + passed then the system clock will be used and this is fine on some
29 platforms.
30 - tx-fifo-depth: See ethernet.txt file in the same directory
31 - rx-fifo-depth: See ethernet.txt file in the same directory
32 @@ -72,7 +75,45 @@ Optional properties:
33 - snps,mb: mixed-burst
34 - snps,rb: rebuild INCRx Burst
35 - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
36 -
37 +- Multiple RX Queues parameters: below the list of all the parameters to
38 + configure the multiple RX queues:
39 + - snps,rx-queues-to-use: number of RX queues to be used in the driver
40 + - Choose one of these RX scheduling algorithms:
41 + - snps,rx-sched-sp: Strict priority
42 + - snps,rx-sched-wsp: Weighted Strict priority
43 + - For each RX queue
44 + - Choose one of these modes:
45 + - snps,dcb-algorithm: Queue to be enabled as DCB
46 + - snps,avb-algorithm: Queue to be enabled as AVB
47 + - snps,map-to-dma-channel: Channel to map
48 + - Specifiy specific packet routing:
49 + - snps,route-avcp: AV Untagged Control packets
50 + - snps,route-ptp: PTP Packets
51 + - snps,route-dcbcp: DCB Control Packets
52 + - snps,route-up: Untagged Packets
53 + - snps,route-multi-broad: Multicast & Broadcast Packets
54 + - snps,priority: RX queue priority (Range: 0x0 to 0xF)
55 +- Multiple TX Queues parameters: below the list of all the parameters to
56 + configure the multiple TX queues:
57 + - snps,tx-queues-to-use: number of TX queues to be used in the driver
58 + - Choose one of these TX scheduling algorithms:
59 + - snps,tx-sched-wrr: Weighted Round Robin
60 + - snps,tx-sched-wfq: Weighted Fair Queuing
61 + - snps,tx-sched-dwrr: Deficit Weighted Round Robin
62 + - snps,tx-sched-sp: Strict priority
63 + - For each TX queue
64 + - snps,weight: TX queue weight (if using a DCB weight algorithm)
65 + - Choose one of these modes:
66 + - snps,dcb-algorithm: TX queue will be working in DCB
67 + - snps,avb-algorithm: TX queue will be working in AVB
68 + [Attention] Queue 0 is reserved for legacy traffic
69 + and so no AVB is available in this queue.
70 + - Configure Credit Base Shaper (if AVB Mode selected):
71 + - snps,send_slope: enable Low Power Interface
72 + - snps,idle_slope: unlock on WoL
73 + - snps,high_credit: max write outstanding req. limit
74 + - snps,low_credit: max read outstanding req. limit
75 + - snps,priority: TX queue priority (Range: 0x0 to 0xF)
76 Examples:
77
78 stmmac_axi_setup: stmmac-axi-config {
79 @@ -81,12 +122,41 @@ Examples:
80 snps,blen = <256 128 64 32 0 0 0>;
81 };
82
83 + mtl_rx_setup: rx-queues-config {
84 + snps,rx-queues-to-use = <1>;
85 + snps,rx-sched-sp;
86 + queue0 {
87 + snps,dcb-algorithm;
88 + snps,map-to-dma-channel = <0x0>;
89 + snps,priority = <0x0>;
90 + };
91 + };
92 +
93 + mtl_tx_setup: tx-queues-config {
94 + snps,tx-queues-to-use = <2>;
95 + snps,tx-sched-wrr;
96 + queue0 {
97 + snps,weight = <0x10>;
98 + snps,dcb-algorithm;
99 + snps,priority = <0x0>;
100 + };
101 +
102 + queue1 {
103 + snps,avb-algorithm;
104 + snps,send_slope = <0x1000>;
105 + snps,idle_slope = <0x1000>;
106 + snps,high_credit = <0x3E800>;
107 + snps,low_credit = <0xFFC18000>;
108 + snps,priority = <0x1>;
109 + };
110 + };
111 +
112 gmac0: ethernet@e0800000 {
113 compatible = "st,spear600-gmac";
114 reg = <0xe0800000 0x8000>;
115 interrupt-parent = <&vic1>;
116 - interrupts = <24 23>;
117 - interrupt-names = "macirq", "eth_wake_irq";
118 + interrupts = <24 23 22>;
119 + interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
120 mac-address = [000000000000]; /* Filled in by U-Boot */
121 max-frame-size = <3800>;
122 phy-mode = "gmii";
123 @@ -104,4 +174,6 @@ Examples:
124 phy1: ethernet-phy@0 {
125 };
126 };
127 + snps,mtl-rx-config = <&mtl_rx_setup>;
128 + snps,mtl-tx-config = <&mtl_tx_setup>;
129 };
130 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
131 +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
132 @@ -37,6 +37,7 @@
133 #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
134 #define TSE_PCS_CONTROL_REG 0x00
135 #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
136 +#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
137 #define TSE_PCS_IF_MODE_REG 0x28
138 #define TSE_PCS_LINK_TIMER_0_REG 0x24
139 #define TSE_PCS_LINK_TIMER_1_REG 0x26
140 @@ -65,6 +66,7 @@
141 #define TSE_PCS_SW_RESET_TIMEOUT 100
142 #define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
143 #define TSE_PCS_USE_SGMII_ENA BIT(0)
144 +#define TSE_PCS_IF_USE_SGMII 0x03
145
146 #define SGMII_ADAPTER_CTRL_REG 0x00
147 #define SGMII_ADAPTER_DISABLE 0x0001
148 @@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, str
149 {
150 int ret = 0;
151
152 - writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
153 + writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
154 +
155 + writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
156
157 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
158 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
159 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
160 +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
161 @@ -26,12 +26,15 @@
162
163 static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
164 {
165 - struct stmmac_priv *priv = (struct stmmac_priv *)p;
166 - unsigned int entry = priv->cur_tx;
167 - struct dma_desc *desc = priv->dma_tx + entry;
168 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
169 unsigned int nopaged_len = skb_headlen(skb);
170 + struct stmmac_priv *priv = tx_q->priv_data;
171 + unsigned int entry = tx_q->cur_tx;
172 unsigned int bmax, des2;
173 unsigned int i = 1, len;
174 + struct dma_desc *desc;
175 +
176 + desc = tx_q->dma_tx + entry;
177
178 if (priv->plat->enh_desc)
179 bmax = BUF_SIZE_8KiB;
180 @@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, str
181 desc->des2 = cpu_to_le32(des2);
182 if (dma_mapping_error(priv->device, des2))
183 return -1;
184 - priv->tx_skbuff_dma[entry].buf = des2;
185 - priv->tx_skbuff_dma[entry].len = bmax;
186 + tx_q->tx_skbuff_dma[entry].buf = des2;
187 + tx_q->tx_skbuff_dma[entry].len = bmax;
188 /* do not close the descriptor and do not set own bit */
189 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
190 - 0, false);
191 + 0, false, skb->len);
192
193 while (len != 0) {
194 - priv->tx_skbuff[entry] = NULL;
195 + tx_q->tx_skbuff[entry] = NULL;
196 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
197 - desc = priv->dma_tx + entry;
198 + desc = tx_q->dma_tx + entry;
199
200 if (len > bmax) {
201 des2 = dma_map_single(priv->device,
202 @@ -63,11 +66,11 @@ static int stmmac_jumbo_frm(void *p, str
203 desc->des2 = cpu_to_le32(des2);
204 if (dma_mapping_error(priv->device, des2))
205 return -1;
206 - priv->tx_skbuff_dma[entry].buf = des2;
207 - priv->tx_skbuff_dma[entry].len = bmax;
208 + tx_q->tx_skbuff_dma[entry].buf = des2;
209 + tx_q->tx_skbuff_dma[entry].len = bmax;
210 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
211 STMMAC_CHAIN_MODE, 1,
212 - false);
213 + false, skb->len);
214 len -= bmax;
215 i++;
216 } else {
217 @@ -77,17 +80,17 @@ static int stmmac_jumbo_frm(void *p, str
218 desc->des2 = cpu_to_le32(des2);
219 if (dma_mapping_error(priv->device, des2))
220 return -1;
221 - priv->tx_skbuff_dma[entry].buf = des2;
222 - priv->tx_skbuff_dma[entry].len = len;
223 + tx_q->tx_skbuff_dma[entry].buf = des2;
224 + tx_q->tx_skbuff_dma[entry].len = len;
225 /* last descriptor can be set now */
226 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
227 STMMAC_CHAIN_MODE, 1,
228 - true);
229 + true, skb->len);
230 len = 0;
231 }
232 }
233
234 - priv->cur_tx = entry;
235 + tx_q->cur_tx = entry;
236
237 return entry;
238 }
239 @@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void *
240
241 static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
242 {
243 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
244 + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
245 + struct stmmac_priv *priv = rx_q->priv_data;
246
247 if (priv->hwts_rx_en && !priv->extend_desc)
248 /* NOTE: Device will overwrite des3 with timestamp value if
249 * 1588-2002 time stamping is enabled, hence reinitialize it
250 * to keep explicit chaining in the descriptor.
251 */
252 - p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
253 - (((priv->dirty_rx) + 1) %
254 + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
255 + (((rx_q->dirty_rx) + 1) %
256 DMA_RX_SIZE) *
257 sizeof(struct dma_desc)));
258 }
259
260 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
261 {
262 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
263 - unsigned int entry = priv->dirty_tx;
264 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
265 + struct stmmac_priv *priv = tx_q->priv_data;
266 + unsigned int entry = tx_q->dirty_tx;
267
268 - if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
269 + if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
270 priv->hwts_tx_en)
271 /* NOTE: Device will overwrite des3 with timestamp value if
272 * 1588-2002 time stamping is enabled, hence reinitialize it
273 * to keep explicit chaining in the descriptor.
274 */
275 - p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
276 - ((priv->dirty_tx + 1) % DMA_TX_SIZE))
277 + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
278 + ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
279 * sizeof(struct dma_desc)));
280 }
281
282 --- a/drivers/net/ethernet/stmicro/stmmac/common.h
283 +++ b/drivers/net/ethernet/stmicro/stmmac/common.h
284 @@ -246,6 +246,15 @@ struct stmmac_extra_stats {
285 #define STMMAC_TX_MAX_FRAMES 256
286 #define STMMAC_TX_FRAMES 64
287
288 +/* Packets types */
289 +enum packets_types {
290 + PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
291 + PACKET_PTPQ = 0x2, /* PTP Packets */
292 + PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
293 + PACKET_UPQ = 0x4, /* Untagged Packets */
294 + PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
295 +};
296 +
297 /* Rx IPC status */
298 enum rx_frame_status {
299 good_frame = 0x0,
300 @@ -324,6 +333,9 @@ struct dma_features {
301 unsigned int number_tx_queues;
302 /* Alternate (enhanced) DESC mode */
303 unsigned int enh_desc;
304 + /* TX and RX FIFO sizes */
305 + unsigned int tx_fifo_size;
306 + unsigned int rx_fifo_size;
307 };
308
309 /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
310 @@ -361,7 +373,7 @@ struct stmmac_desc_ops {
311 /* Invoked by the xmit function to prepare the tx descriptor */
312 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
313 bool csum_flag, int mode, bool tx_own,
314 - bool ls);
315 + bool ls, unsigned int tot_pkt_len);
316 void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
317 int len2, bool tx_own, bool ls,
318 unsigned int tcphdrlen,
319 @@ -413,6 +425,14 @@ struct stmmac_dma_ops {
320 int (*reset)(void __iomem *ioaddr);
321 void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
322 u32 dma_tx, u32 dma_rx, int atds);
323 + void (*init_chan)(void __iomem *ioaddr,
324 + struct stmmac_dma_cfg *dma_cfg, u32 chan);
325 + void (*init_rx_chan)(void __iomem *ioaddr,
326 + struct stmmac_dma_cfg *dma_cfg,
327 + u32 dma_rx_phy, u32 chan);
328 + void (*init_tx_chan)(void __iomem *ioaddr,
329 + struct stmmac_dma_cfg *dma_cfg,
330 + u32 dma_tx_phy, u32 chan);
331 /* Configure the AXI Bus Mode Register */
332 void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
333 /* Dump DMA registers */
334 @@ -421,25 +441,28 @@ struct stmmac_dma_ops {
335 * An invalid value enables the store-and-forward mode */
336 void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
337 int rxfifosz);
338 + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
339 + int fifosz);
340 + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
341 /* To track extra statistic (if supported) */
342 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
343 void __iomem *ioaddr);
344 void (*enable_dma_transmission) (void __iomem *ioaddr);
345 - void (*enable_dma_irq) (void __iomem *ioaddr);
346 - void (*disable_dma_irq) (void __iomem *ioaddr);
347 - void (*start_tx) (void __iomem *ioaddr);
348 - void (*stop_tx) (void __iomem *ioaddr);
349 - void (*start_rx) (void __iomem *ioaddr);
350 - void (*stop_rx) (void __iomem *ioaddr);
351 + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
352 + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
353 + void (*start_tx)(void __iomem *ioaddr, u32 chan);
354 + void (*stop_tx)(void __iomem *ioaddr, u32 chan);
355 + void (*start_rx)(void __iomem *ioaddr, u32 chan);
356 + void (*stop_rx)(void __iomem *ioaddr, u32 chan);
357 int (*dma_interrupt) (void __iomem *ioaddr,
358 - struct stmmac_extra_stats *x);
359 + struct stmmac_extra_stats *x, u32 chan);
360 /* If supported then get the optional core features */
361 void (*get_hw_feature)(void __iomem *ioaddr,
362 struct dma_features *dma_cap);
363 /* Program the HW RX Watchdog */
364 - void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
365 - void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
366 - void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
367 + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
368 + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
369 + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
370 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
371 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
372 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
373 @@ -451,20 +474,44 @@ struct mac_device_info;
374 struct stmmac_ops {
375 /* MAC core initialization */
376 void (*core_init)(struct mac_device_info *hw, int mtu);
377 + /* Enable the MAC RX/TX */
378 + void (*set_mac)(void __iomem *ioaddr, bool enable);
379 /* Enable and verify that the IPC module is supported */
380 int (*rx_ipc)(struct mac_device_info *hw);
381 /* Enable RX Queues */
382 - void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
383 + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
384 + /* RX Queues Priority */
385 + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
386 + /* TX Queues Priority */
387 + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
388 + /* RX Queues Routing */
389 + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
390 + u32 queue);
391 + /* Program RX Algorithms */
392 + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
393 + /* Program TX Algorithms */
394 + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
395 + /* Set MTL TX queues weight */
396 + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
397 + u32 weight, u32 queue);
398 + /* RX MTL queue to RX dma mapping */
399 + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
400 + /* Configure AV Algorithm */
401 + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
402 + u32 idle_slope, u32 high_credit, u32 low_credit,
403 + u32 queue);
404 /* Dump MAC registers */
405 void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
406 /* Handle extra events on specific interrupts hw dependent */
407 int (*host_irq_status)(struct mac_device_info *hw,
408 struct stmmac_extra_stats *x);
409 + /* Handle MTL interrupts */
410 + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
411 /* Multicast filter setting */
412 void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
413 /* Flow control setting */
414 void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
415 - unsigned int fc, unsigned int pause_time);
416 + unsigned int fc, unsigned int pause_time, u32 tx_cnt);
417 /* Set power management mode (e.g. magic frame) */
418 void (*pmt)(struct mac_device_info *hw, unsigned long mode);
419 /* Set/Get Unicast MAC addresses */
420 @@ -477,7 +524,8 @@ struct stmmac_ops {
421 void (*reset_eee_mode)(struct mac_device_info *hw);
422 void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
423 void (*set_eee_pls)(struct mac_device_info *hw, int link);
424 - void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
425 + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
426 + u32 rx_queues, u32 tx_queues);
427 /* PCS calls */
428 void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
429 bool loopback);
430 @@ -547,6 +595,11 @@ struct mac_device_info {
431 unsigned int ps;
432 };
433
434 +struct stmmac_rx_routing {
435 + u32 reg_mask;
436 + u32 reg_shift;
437 +};
438 +
439 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
440 int perfect_uc_entries,
441 int *synopsys_id);
442 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
443 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
444 @@ -14,16 +14,34 @@
445 #include <linux/clk.h>
446 #include <linux/clk-provider.h>
447 #include <linux/device.h>
448 +#include <linux/gpio/consumer.h>
449 #include <linux/ethtool.h>
450 #include <linux/io.h>
451 +#include <linux/iopoll.h>
452 #include <linux/ioport.h>
453 #include <linux/module.h>
454 +#include <linux/of_device.h>
455 #include <linux/of_net.h>
456 #include <linux/mfd/syscon.h>
457 #include <linux/platform_device.h>
458 +#include <linux/reset.h>
459 #include <linux/stmmac.h>
460
461 #include "stmmac_platform.h"
462 +#include "dwmac4.h"
463 +
464 +struct tegra_eqos {
465 + struct device *dev;
466 + void __iomem *regs;
467 +
468 + struct reset_control *rst;
469 + struct clk *clk_master;
470 + struct clk *clk_slave;
471 + struct clk *clk_tx;
472 + struct clk *clk_rx;
473 +
474 + struct gpio_desc *reset;
475 +};
476
477 static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
478 struct plat_stmmacenet_data *plat_dat)
479 @@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struc
480 return 0;
481 }
482
483 +static void *dwc_qos_probe(struct platform_device *pdev,
484 + struct plat_stmmacenet_data *plat_dat,
485 + struct stmmac_resources *stmmac_res)
486 +{
487 + int err;
488 +
489 + plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
490 + if (IS_ERR(plat_dat->stmmac_clk)) {
491 + dev_err(&pdev->dev, "apb_pclk clock not found.\n");
492 + return ERR_CAST(plat_dat->stmmac_clk);
493 + }
494 +
495 + err = clk_prepare_enable(plat_dat->stmmac_clk);
496 + if (err < 0) {
497 + dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
498 + err);
499 + return ERR_PTR(err);
500 + }
501 +
502 + plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
503 + if (IS_ERR(plat_dat->pclk)) {
504 + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
505 + err = PTR_ERR(plat_dat->pclk);
506 + goto disable;
507 + }
508 +
509 + err = clk_prepare_enable(plat_dat->pclk);
510 + if (err < 0) {
511 + dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
512 + err);
513 + goto disable;
514 + }
515 +
516 + return NULL;
517 +
518 +disable:
519 + clk_disable_unprepare(plat_dat->stmmac_clk);
520 + return ERR_PTR(err);
521 +}
522 +
523 +static int dwc_qos_remove(struct platform_device *pdev)
524 +{
525 + struct net_device *ndev = platform_get_drvdata(pdev);
526 + struct stmmac_priv *priv = netdev_priv(ndev);
527 +
528 + clk_disable_unprepare(priv->plat->pclk);
529 + clk_disable_unprepare(priv->plat->stmmac_clk);
530 +
531 + return 0;
532 +}
533 +
534 +#define SDMEMCOMPPADCTRL 0x8800
535 +#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
536 +
537 +#define AUTO_CAL_CONFIG 0x8804
538 +#define AUTO_CAL_CONFIG_START BIT(31)
539 +#define AUTO_CAL_CONFIG_ENABLE BIT(29)
540 +
541 +#define AUTO_CAL_STATUS 0x880c
542 +#define AUTO_CAL_STATUS_ACTIVE BIT(31)
543 +
544 +static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
545 +{
546 + struct tegra_eqos *eqos = priv;
547 + unsigned long rate = 125000000;
548 + bool needs_calibration = false;
549 + u32 value;
550 + int err;
551 +
552 + switch (speed) {
553 + case SPEED_1000:
554 + needs_calibration = true;
555 + rate = 125000000;
556 + break;
557 +
558 + case SPEED_100:
559 + needs_calibration = true;
560 + rate = 25000000;
561 + break;
562 +
563 + case SPEED_10:
564 + rate = 2500000;
565 + break;
566 +
567 + default:
568 + dev_err(eqos->dev, "invalid speed %u\n", speed);
569 + break;
570 + }
571 +
572 + if (needs_calibration) {
573 + /* calibrate */
574 + value = readl(eqos->regs + SDMEMCOMPPADCTRL);
575 + value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
576 + writel(value, eqos->regs + SDMEMCOMPPADCTRL);
577 +
578 + udelay(1);
579 +
580 + value = readl(eqos->regs + AUTO_CAL_CONFIG);
581 + value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
582 + writel(value, eqos->regs + AUTO_CAL_CONFIG);
583 +
584 + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
585 + value,
586 + value & AUTO_CAL_STATUS_ACTIVE,
587 + 1, 10);
588 + if (err < 0) {
589 + dev_err(eqos->dev, "calibration did not start\n");
590 + goto failed;
591 + }
592 +
593 + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
594 + value,
595 + (value & AUTO_CAL_STATUS_ACTIVE) == 0,
596 + 20, 200);
597 + if (err < 0) {
598 + dev_err(eqos->dev, "calibration didn't finish\n");
599 + goto failed;
600 + }
601 +
602 + failed:
603 + value = readl(eqos->regs + SDMEMCOMPPADCTRL);
604 + value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
605 + writel(value, eqos->regs + SDMEMCOMPPADCTRL);
606 + } else {
607 + value = readl(eqos->regs + AUTO_CAL_CONFIG);
608 + value &= ~AUTO_CAL_CONFIG_ENABLE;
609 + writel(value, eqos->regs + AUTO_CAL_CONFIG);
610 + }
611 +
612 + err = clk_set_rate(eqos->clk_tx, rate);
613 + if (err < 0)
614 + dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
615 +}
616 +
617 +static int tegra_eqos_init(struct platform_device *pdev, void *priv)
618 +{
619 + struct tegra_eqos *eqos = priv;
620 + unsigned long rate;
621 + u32 value;
622 +
623 + rate = clk_get_rate(eqos->clk_slave);
624 +
625 + value = (rate / 1000000) - 1;
626 + writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
627 +
628 + return 0;
629 +}
630 +
631 +static void *tegra_eqos_probe(struct platform_device *pdev,
632 + struct plat_stmmacenet_data *data,
633 + struct stmmac_resources *res)
634 +{
635 + struct tegra_eqos *eqos;
636 + int err;
637 +
638 + eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
639 + if (!eqos) {
640 + err = -ENOMEM;
641 + goto error;
642 + }
643 +
644 + eqos->dev = &pdev->dev;
645 + eqos->regs = res->addr;
646 +
647 + eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
648 + if (IS_ERR(eqos->clk_master)) {
649 + err = PTR_ERR(eqos->clk_master);
650 + goto error;
651 + }
652 +
653 + err = clk_prepare_enable(eqos->clk_master);
654 + if (err < 0)
655 + goto error;
656 +
657 + eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
658 + if (IS_ERR(eqos->clk_slave)) {
659 + err = PTR_ERR(eqos->clk_slave);
660 + goto disable_master;
661 + }
662 +
663 + data->stmmac_clk = eqos->clk_slave;
664 +
665 + err = clk_prepare_enable(eqos->clk_slave);
666 + if (err < 0)
667 + goto disable_master;
668 +
669 + eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
670 + if (IS_ERR(eqos->clk_rx)) {
671 + err = PTR_ERR(eqos->clk_rx);
672 + goto disable_slave;
673 + }
674 +
675 + err = clk_prepare_enable(eqos->clk_rx);
676 + if (err < 0)
677 + goto disable_slave;
678 +
679 + eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
680 + if (IS_ERR(eqos->clk_tx)) {
681 + err = PTR_ERR(eqos->clk_tx);
682 + goto disable_rx;
683 + }
684 +
685 + err = clk_prepare_enable(eqos->clk_tx);
686 + if (err < 0)
687 + goto disable_rx;
688 +
689 + eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
690 + if (IS_ERR(eqos->reset)) {
691 + err = PTR_ERR(eqos->reset);
692 + goto disable_tx;
693 + }
694 +
695 + usleep_range(2000, 4000);
696 + gpiod_set_value(eqos->reset, 0);
697 +
698 + eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
699 + if (IS_ERR(eqos->rst)) {
700 + err = PTR_ERR(eqos->rst);
701 + goto reset_phy;
702 + }
703 +
704 + err = reset_control_assert(eqos->rst);
705 + if (err < 0)
706 + goto reset_phy;
707 +
708 + usleep_range(2000, 4000);
709 +
710 + err = reset_control_deassert(eqos->rst);
711 + if (err < 0)
712 + goto reset_phy;
713 +
714 + usleep_range(2000, 4000);
715 +
716 + data->fix_mac_speed = tegra_eqos_fix_speed;
717 + data->init = tegra_eqos_init;
718 + data->bsp_priv = eqos;
719 +
720 + err = tegra_eqos_init(pdev, eqos);
721 + if (err < 0)
722 + goto reset;
723 +
724 +out:
725 + return eqos;
726 +
727 +reset:
728 + reset_control_assert(eqos->rst);
729 +reset_phy:
730 + gpiod_set_value(eqos->reset, 1);
731 +disable_tx:
732 + clk_disable_unprepare(eqos->clk_tx);
733 +disable_rx:
734 + clk_disable_unprepare(eqos->clk_rx);
735 +disable_slave:
736 + clk_disable_unprepare(eqos->clk_slave);
737 +disable_master:
738 + clk_disable_unprepare(eqos->clk_master);
739 +error:
740 + eqos = ERR_PTR(err);
741 + goto out;
742 +}
743 +
744 +static int tegra_eqos_remove(struct platform_device *pdev)
745 +{
746 + struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
747 +
748 + reset_control_assert(eqos->rst);
749 + gpiod_set_value(eqos->reset, 1);
750 + clk_disable_unprepare(eqos->clk_tx);
751 + clk_disable_unprepare(eqos->clk_rx);
752 + clk_disable_unprepare(eqos->clk_slave);
753 + clk_disable_unprepare(eqos->clk_master);
754 +
755 + return 0;
756 +}
757 +
758 +struct dwc_eth_dwmac_data {
759 + void *(*probe)(struct platform_device *pdev,
760 + struct plat_stmmacenet_data *data,
761 + struct stmmac_resources *res);
762 + int (*remove)(struct platform_device *pdev);
763 +};
764 +
765 +static const struct dwc_eth_dwmac_data dwc_qos_data = {
766 + .probe = dwc_qos_probe,
767 + .remove = dwc_qos_remove,
768 +};
769 +
770 +static const struct dwc_eth_dwmac_data tegra_eqos_data = {
771 + .probe = tegra_eqos_probe,
772 + .remove = tegra_eqos_remove,
773 +};
774 +
775 static int dwc_eth_dwmac_probe(struct platform_device *pdev)
776 {
777 + const struct dwc_eth_dwmac_data *data;
778 struct plat_stmmacenet_data *plat_dat;
779 struct stmmac_resources stmmac_res;
780 struct resource *res;
781 + void *priv;
782 int ret;
783
784 + data = of_device_get_match_data(&pdev->dev);
785 +
786 memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
787
788 /**
789 @@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct pl
790 if (IS_ERR(plat_dat))
791 return PTR_ERR(plat_dat);
792
793 - plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
794 - if (IS_ERR(plat_dat->stmmac_clk)) {
795 - dev_err(&pdev->dev, "apb_pclk clock not found.\n");
796 - ret = PTR_ERR(plat_dat->stmmac_clk);
797 - plat_dat->stmmac_clk = NULL;
798 - goto err_remove_config_dt;
799 + priv = data->probe(pdev, plat_dat, &stmmac_res);
800 + if (IS_ERR(priv)) {
801 + ret = PTR_ERR(priv);
802 + dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret);
803 + goto remove_config;
804 }
805 - clk_prepare_enable(plat_dat->stmmac_clk);
806 -
807 - plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
808 - if (IS_ERR(plat_dat->pclk)) {
809 - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
810 - ret = PTR_ERR(plat_dat->pclk);
811 - plat_dat->pclk = NULL;
812 - goto err_out_clk_dis_phy;
813 - }
814 - clk_prepare_enable(plat_dat->pclk);
815
816 ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
817 if (ret)
818 - goto err_out_clk_dis_aper;
819 + goto remove;
820
821 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
822 if (ret)
823 - goto err_out_clk_dis_aper;
824 + goto remove;
825
826 - return 0;
827 + return ret;
828
829 -err_out_clk_dis_aper:
830 - clk_disable_unprepare(plat_dat->pclk);
831 -err_out_clk_dis_phy:
832 - clk_disable_unprepare(plat_dat->stmmac_clk);
833 -err_remove_config_dt:
834 +remove:
835 + data->remove(pdev);
836 +remove_config:
837 stmmac_remove_config_dt(pdev, plat_dat);
838
839 return ret;
840 @@ -178,11 +479,29 @@ err_remove_config_dt:
841
842 static int dwc_eth_dwmac_remove(struct platform_device *pdev)
843 {
844 - return stmmac_pltfr_remove(pdev);
845 + struct net_device *ndev = platform_get_drvdata(pdev);
846 + struct stmmac_priv *priv = netdev_priv(ndev);
847 + const struct dwc_eth_dwmac_data *data;
848 + int err;
849 +
850 + data = of_device_get_match_data(&pdev->dev);
851 +
852 + err = stmmac_dvr_remove(&pdev->dev);
853 + if (err < 0)
854 + dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
855 +
856 + err = data->remove(pdev);
857 + if (err < 0)
858 + dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
859 +
860 + stmmac_remove_config_dt(pdev, priv->plat);
861 +
862 + return err;
863 }
864
865 static const struct of_device_id dwc_eth_dwmac_match[] = {
866 - { .compatible = "snps,dwc-qos-ethernet-4.10", },
867 + { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
868 + { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
869 { }
870 };
871 MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
872 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
873 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
874 @@ -74,6 +74,10 @@ struct rk_priv_data {
875 #define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
876 #define GRF_CLR_BIT(nr) (BIT(nr+16))
877
878 +#define DELAY_ENABLE(soc, tx, rx) \
879 + (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
880 + ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
881 +
882 #define RK3228_GRF_MAC_CON0 0x0900
883 #define RK3228_GRF_MAC_CON1 0x0904
884
885 @@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct r
886 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
887 RK3228_GMAC_PHY_INTF_SEL_RGMII |
888 RK3228_GMAC_RMII_MODE_CLR |
889 - RK3228_GMAC_RXCLK_DLY_ENABLE |
890 - RK3228_GMAC_TXCLK_DLY_ENABLE);
891 + DELAY_ENABLE(RK3228, tx_delay, rx_delay));
892
893 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
894 RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
895 @@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct r
896 RK3288_GMAC_PHY_INTF_SEL_RGMII |
897 RK3288_GMAC_RMII_MODE_CLR);
898 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
899 - RK3288_GMAC_RXCLK_DLY_ENABLE |
900 - RK3288_GMAC_TXCLK_DLY_ENABLE |
901 + DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
902 RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
903 RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
904 }
905 @@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct r
906 RK3366_GMAC_PHY_INTF_SEL_RGMII |
907 RK3366_GMAC_RMII_MODE_CLR);
908 regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
909 - RK3366_GMAC_RXCLK_DLY_ENABLE |
910 - RK3366_GMAC_TXCLK_DLY_ENABLE |
911 + DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
912 RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
913 RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
914 }
915 @@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct r
916 RK3368_GMAC_PHY_INTF_SEL_RGMII |
917 RK3368_GMAC_RMII_MODE_CLR);
918 regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
919 - RK3368_GMAC_RXCLK_DLY_ENABLE |
920 - RK3368_GMAC_TXCLK_DLY_ENABLE |
921 + DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
922 RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
923 RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
924 }
925 @@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct r
926 RK3399_GMAC_PHY_INTF_SEL_RGMII |
927 RK3399_GMAC_RMII_MODE_CLR);
928 regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
929 - RK3399_GMAC_RXCLK_DLY_ENABLE |
930 - RK3399_GMAC_TXCLK_DLY_ENABLE |
931 + DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
932 RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
933 RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
934 }
935 @@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_pri
936 return ret;
937
938 /*rmii or rgmii*/
939 - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
940 + switch (bsp_priv->phy_iface) {
941 + case PHY_INTERFACE_MODE_RGMII:
942 dev_info(dev, "init for RGMII\n");
943 bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
944 bsp_priv->rx_delay);
945 - } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
946 + break;
947 + case PHY_INTERFACE_MODE_RGMII_ID:
948 + dev_info(dev, "init for RGMII_ID\n");
949 + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
950 + break;
951 + case PHY_INTERFACE_MODE_RGMII_RXID:
952 + dev_info(dev, "init for RGMII_RXID\n");
953 + bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
954 + break;
955 + case PHY_INTERFACE_MODE_RGMII_TXID:
956 + dev_info(dev, "init for RGMII_TXID\n");
957 + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
958 + break;
959 + case PHY_INTERFACE_MODE_RMII:
960 dev_info(dev, "init for RMII\n");
961 bsp_priv->ops->set_to_rmii(bsp_priv);
962 - } else {
963 + break;
964 + default:
965 dev_err(dev, "NO interface defined!\n");
966 }
967
968 @@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, uns
969 struct rk_priv_data *bsp_priv = priv;
970 struct device *dev = &bsp_priv->pdev->dev;
971
972 - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
973 + switch (bsp_priv->phy_iface) {
974 + case PHY_INTERFACE_MODE_RGMII:
975 + case PHY_INTERFACE_MODE_RGMII_ID:
976 + case PHY_INTERFACE_MODE_RGMII_RXID:
977 + case PHY_INTERFACE_MODE_RGMII_TXID:
978 bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
979 - else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
980 + break;
981 + case PHY_INTERFACE_MODE_RMII:
982 bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
983 - else
984 + break;
985 + default:
986 dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
987 + }
988 }
989
990 static int rk_gmac_probe(struct platform_device *pdev)
991 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
992 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
993 @@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct
994
995
996 static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
997 - unsigned int fc, unsigned int pause_time)
998 + unsigned int fc, unsigned int pause_time,
999 + u32 tx_cnt)
1000 {
1001 void __iomem *ioaddr = hw->pcsr;
1002 /* Set flow such that DZPQ in Mac Register 6 is 0,
1003 @@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __
1004 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
1005 }
1006
1007 -static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
1008 +static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
1009 + u32 rx_queues, u32 tx_queues)
1010 {
1011 u32 value = readl(ioaddr + GMAC_DEBUG);
1012
1013 @@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem
1014
1015 static const struct stmmac_ops dwmac1000_ops = {
1016 .core_init = dwmac1000_core_init,
1017 + .set_mac = stmmac_set_mac,
1018 .rx_ipc = dwmac1000_rx_ipc_enable,
1019 .dump_regs = dwmac1000_dump_regs,
1020 .host_irq_status = dwmac1000_irq_status,
1021 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
1022 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
1023 @@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(voi
1024 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1025 }
1026
1027 -static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
1028 +static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
1029 + u32 number_chan)
1030 {
1031 writel(riwt, ioaddr + DMA_RX_WATCHDOG);
1032 }
1033 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
1034 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
1035 @@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct m
1036 }
1037
1038 static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
1039 - unsigned int fc, unsigned int pause_time)
1040 + unsigned int fc, unsigned int pause_time,
1041 + u32 tx_cnt)
1042 {
1043 void __iomem *ioaddr = hw->pcsr;
1044 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
1045 @@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_devi
1046
1047 static const struct stmmac_ops dwmac100_ops = {
1048 .core_init = dwmac100_core_init,
1049 + .set_mac = stmmac_set_mac,
1050 .rx_ipc = dwmac100_rx_ipc_enable,
1051 .dump_regs = dwmac100_dump_mac_regs,
1052 .host_irq_status = dwmac100_irq_status,
1053 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1054 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1055 @@ -22,9 +22,15 @@
1056 #define GMAC_HASH_TAB_32_63 0x00000014
1057 #define GMAC_RX_FLOW_CTRL 0x00000090
1058 #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
1059 +#define GMAC_TXQ_PRTY_MAP0 0x98
1060 +#define GMAC_TXQ_PRTY_MAP1 0x9C
1061 #define GMAC_RXQ_CTRL0 0x000000a0
1062 +#define GMAC_RXQ_CTRL1 0x000000a4
1063 +#define GMAC_RXQ_CTRL2 0x000000a8
1064 +#define GMAC_RXQ_CTRL3 0x000000ac
1065 #define GMAC_INT_STATUS 0x000000b0
1066 #define GMAC_INT_EN 0x000000b4
1067 +#define GMAC_1US_TIC_COUNTER 0x000000dc
1068 #define GMAC_PCS_BASE 0x000000e0
1069 #define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
1070 #define GMAC_PMT 0x000000c0
1071 @@ -38,6 +44,22 @@
1072 #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
1073 #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
1074
1075 +/* RX Queues Routing */
1076 +#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
1077 +#define GMAC_RXQCTRL_AVCPQ_SHIFT 0
1078 +#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4)
1079 +#define GMAC_RXQCTRL_PTPQ_SHIFT 4
1080 +#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8)
1081 +#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8
1082 +#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12)
1083 +#define GMAC_RXQCTRL_UPQ_SHIFT 12
1084 +#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16)
1085 +#define GMAC_RXQCTRL_MCBCQ_SHIFT 16
1086 +#define GMAC_RXQCTRL_MCBCQEN BIT(20)
1087 +#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
1088 +#define GMAC_RXQCTRL_TACPQE BIT(21)
1089 +#define GMAC_RXQCTRL_TACPQE_SHIFT 21
1090 +
1091 /* MAC Packet Filtering */
1092 #define GMAC_PACKET_FILTER_PR BIT(0)
1093 #define GMAC_PACKET_FILTER_HMC BIT(2)
1094 @@ -53,6 +75,14 @@
1095 /* MAC Flow Control RX */
1096 #define GMAC_RX_FLOW_CTRL_RFE BIT(0)
1097
1098 +/* RX Queues Priorities */
1099 +#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
1100 +#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8)
1101 +
1102 +/* TX Queues Priorities */
1103 +#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
1104 +#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8)
1105 +
1106 /* MAC Flow Control TX */
1107 #define GMAC_TX_FLOW_CTRL_TFE BIT(1)
1108 #define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
1109 @@ -148,6 +178,8 @@ enum power_event {
1110 /* MAC HW features1 bitmap */
1111 #define GMAC_HW_FEAT_AVSEL BIT(20)
1112 #define GMAC_HW_TSOEN BIT(18)
1113 +#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
1114 +#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
1115
1116 /* MAC HW features2 bitmap */
1117 #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
1118 @@ -161,8 +193,25 @@ enum power_event {
1119 #define GMAC_HI_REG_AE BIT(31)
1120
1121 /* MTL registers */
1122 +#define MTL_OPERATION_MODE 0x00000c00
1123 +#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
1124 +#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
1125 +#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
1126 +#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5)
1127 +#define MTL_OPERATION_SCHALG_SP (0x3 << 5)
1128 +#define MTL_OPERATION_RAA BIT(2)
1129 +#define MTL_OPERATION_RAA_SP (0x0 << 2)
1130 +#define MTL_OPERATION_RAA_WSP (0x1 << 2)
1131 +
1132 #define MTL_INT_STATUS 0x00000c20
1133 -#define MTL_INT_Q0 BIT(0)
1134 +#define MTL_INT_QX(x) BIT(x)
1135 +
1136 +#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
1137 +#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
1138 +#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
1139 +#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
1140 +#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
1141 +#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
1142
1143 #define MTL_CHAN_BASE_ADDR 0x00000d00
1144 #define MTL_CHAN_BASE_OFFSET 0x40
1145 @@ -180,6 +229,7 @@ enum power_event {
1146 #define MTL_OP_MODE_TSF BIT(1)
1147
1148 #define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
1149 +#define MTL_OP_MODE_TQS_SHIFT 16
1150
1151 #define MTL_OP_MODE_TTC_MASK 0x70
1152 #define MTL_OP_MODE_TTC_SHIFT 4
1153 @@ -193,6 +243,17 @@ enum power_event {
1154 #define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
1155 #define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
1156
1157 +#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
1158 +#define MTL_OP_MODE_RQS_SHIFT 20
1159 +
1160 +#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
1161 +#define MTL_OP_MODE_RFD_SHIFT 14
1162 +
1163 +#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
1164 +#define MTL_OP_MODE_RFA_SHIFT 8
1165 +
1166 +#define MTL_OP_MODE_EHFC BIT(7)
1167 +
1168 #define MTL_OP_MODE_RTC_MASK 0x18
1169 #define MTL_OP_MODE_RTC_SHIFT 3
1170
1171 @@ -201,6 +262,46 @@ enum power_event {
1172 #define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
1173 #define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
1174
1175 +/* MTL ETS Control register */
1176 +#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
1177 +#define MTL_ETS_CTRL_BASE_OFFSET 0x40
1178 +#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
1179 + ((x) * MTL_ETS_CTRL_BASE_OFFSET))
1180 +
1181 +#define MTL_ETS_CTRL_CC BIT(3)
1182 +#define MTL_ETS_CTRL_AVALG BIT(2)
1183 +
1184 +/* MTL Queue Quantum Weight */
1185 +#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
1186 +#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
1187 +#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
1188 + ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
1189 +#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
1190 +
1191 +/* MTL sendSlopeCredit register */
1192 +#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
1193 +#define MTL_SEND_SLP_CRED_OFFSET 0x40
1194 +#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
1195 + ((x) * MTL_SEND_SLP_CRED_OFFSET))
1196 +
1197 +#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
1198 +
1199 +/* MTL hiCredit register */
1200 +#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
1201 +#define MTL_HIGH_CRED_OFFSET 0x40
1202 +#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
1203 + ((x) * MTL_HIGH_CRED_OFFSET))
1204 +
1205 +#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
1206 +
1207 +/* MTL loCredit register */
1208 +#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
1209 +#define MTL_LOW_CRED_OFFSET 0x40
1210 +#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
1211 + ((x) * MTL_LOW_CRED_OFFSET))
1212 +
1213 +#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
1214 +
1215 /* MTL debug */
1216 #define MTL_DEBUG_TXSTSFSTS BIT(5)
1217 #define MTL_DEBUG_TXFSTS BIT(4)
1218 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1219 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1220 @@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_
1221 writel(value, ioaddr + GMAC_INT_EN);
1222 }
1223
1224 -static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
1225 +static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
1226 + u8 mode, u32 queue)
1227 {
1228 void __iomem *ioaddr = hw->pcsr;
1229 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
1230
1231 value &= GMAC_RX_QUEUE_CLEAR(queue);
1232 - value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
1233 + if (mode == MTL_QUEUE_AVB)
1234 + value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
1235 + else if (mode == MTL_QUEUE_DCB)
1236 + value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
1237
1238 writel(value, ioaddr + GMAC_RXQ_CTRL0);
1239 }
1240
1241 +static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
1242 + u32 prio, u32 queue)
1243 +{
1244 + void __iomem *ioaddr = hw->pcsr;
1245 + u32 base_register;
1246 + u32 value;
1247 +
1248 + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
1249 +
1250 + value = readl(ioaddr + base_register);
1251 +
1252 + value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
1253 + value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
1254 + GMAC_RXQCTRL_PSRQX_MASK(queue);
1255 + writel(value, ioaddr + base_register);
1256 +}
1257 +
1258 +static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
1259 + u32 prio, u32 queue)
1260 +{
1261 + void __iomem *ioaddr = hw->pcsr;
1262 + u32 base_register;
1263 + u32 value;
1264 +
1265 + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
1266 +
1267 + value = readl(ioaddr + base_register);
1268 +
1269 + value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
1270 + value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
1271 + GMAC_TXQCTRL_PSTQX_MASK(queue);
1272 +
1273 + writel(value, ioaddr + base_register);
1274 +}
1275 +
1276 +static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
1277 + u8 packet, u32 queue)
1278 +{
1279 + void __iomem *ioaddr = hw->pcsr;
1280 + u32 value;
1281 +
1282 + const struct stmmac_rx_routing route_possibilities[] = {
1283 + { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
1284 + { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
1285 + { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
1286 + { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
1287 + { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
1288 + };
1289 +
1290 + value = readl(ioaddr + GMAC_RXQ_CTRL1);
1291 +
1292 + /* routing configuration */
1293 + value &= ~route_possibilities[packet - 1].reg_mask;
1294 + value |= (queue << route_possibilities[packet-1].reg_shift) &
1295 + route_possibilities[packet - 1].reg_mask;
1296 +
1297 + /* some packets require extra ops */
1298 + if (packet == PACKET_AVCPQ) {
1299 + value &= ~GMAC_RXQCTRL_TACPQE;
1300 + value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
1301 + } else if (packet == PACKET_MCBCQ) {
1302 + value &= ~GMAC_RXQCTRL_MCBCQEN;
1303 + value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
1304 + }
1305 +
1306 + writel(value, ioaddr + GMAC_RXQ_CTRL1);
1307 +}
1308 +
1309 +static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
1310 + u32 rx_alg)
1311 +{
1312 + void __iomem *ioaddr = hw->pcsr;
1313 + u32 value = readl(ioaddr + MTL_OPERATION_MODE);
1314 +
1315 + value &= ~MTL_OPERATION_RAA;
1316 + switch (rx_alg) {
1317 + case MTL_RX_ALGORITHM_SP:
1318 + value |= MTL_OPERATION_RAA_SP;
1319 + break;
1320 + case MTL_RX_ALGORITHM_WSP:
1321 + value |= MTL_OPERATION_RAA_WSP;
1322 + break;
1323 + default:
1324 + break;
1325 + }
1326 +
1327 + writel(value, ioaddr + MTL_OPERATION_MODE);
1328 +}
1329 +
1330 +static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
1331 + u32 tx_alg)
1332 +{
1333 + void __iomem *ioaddr = hw->pcsr;
1334 + u32 value = readl(ioaddr + MTL_OPERATION_MODE);
1335 +
1336 + value &= ~MTL_OPERATION_SCHALG_MASK;
1337 + switch (tx_alg) {
1338 + case MTL_TX_ALGORITHM_WRR:
1339 + value |= MTL_OPERATION_SCHALG_WRR;
1340 + break;
1341 + case MTL_TX_ALGORITHM_WFQ:
1342 + value |= MTL_OPERATION_SCHALG_WFQ;
1343 + break;
1344 + case MTL_TX_ALGORITHM_DWRR:
1345 + value |= MTL_OPERATION_SCHALG_DWRR;
1346 + break;
1347 + case MTL_TX_ALGORITHM_SP:
1348 + value |= MTL_OPERATION_SCHALG_SP;
1349 + break;
1350 + default:
1351 + break;
1352 + }
1353 +}
1354 +
1355 +static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
1356 + u32 weight, u32 queue)
1357 +{
1358 + void __iomem *ioaddr = hw->pcsr;
1359 + u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
1360 +
1361 + value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
1362 + value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
1363 + writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
1364 +}
1365 +
1366 +static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
1367 +{
1368 + void __iomem *ioaddr = hw->pcsr;
1369 + u32 value;
1370 +
1371 + if (queue < 4)
1372 + value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
1373 + else
1374 + value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
1375 +
1376 + if (queue == 0 || queue == 4) {
1377 + value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
1378 + value |= MTL_RXQ_DMA_Q04MDMACH(chan);
1379 + } else {
1380 + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
1381 + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
1382 + }
1383 +
1384 + if (queue < 4)
1385 + writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
1386 + else
1387 + writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
1388 +}
1389 +
1390 +static void dwmac4_config_cbs(struct mac_device_info *hw,
1391 + u32 send_slope, u32 idle_slope,
1392 + u32 high_credit, u32 low_credit, u32 queue)
1393 +{
1394 + void __iomem *ioaddr = hw->pcsr;
1395 + u32 value;
1396 +
1397 + pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
1398 + pr_debug("\tsend_slope: 0x%08x\n", send_slope);
1399 + pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
1400 + pr_debug("\thigh_credit: 0x%08x\n", high_credit);
1401 + pr_debug("\tlow_credit: 0x%08x\n", low_credit);
1402 +
1403 + /* enable AV algorithm */
1404 + value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
1405 + value |= MTL_ETS_CTRL_AVALG;
1406 + value |= MTL_ETS_CTRL_CC;
1407 + writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
1408 +
1409 + /* configure send slope */
1410 + value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
1411 + value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
1412 + value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
1413 + writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
1414 +
1415 + /* configure idle slope (same register as tx weight) */
1416 + dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
1417 +
1418 + /* configure high credit */
1419 + value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
1420 + value &= ~MTL_HIGH_CRED_HC_MASK;
1421 + value |= high_credit & MTL_HIGH_CRED_HC_MASK;
1422 + writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
1423 +
1424 + /* configure high credit */
1425 + value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
1426 + value &= ~MTL_HIGH_CRED_LC_MASK;
1427 + value |= low_credit & MTL_HIGH_CRED_LC_MASK;
1428 + writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
1429 +}
1430 +
1431 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
1432 {
1433 void __iomem *ioaddr = hw->pcsr;
1434 @@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac
1435 }
1436
1437 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
1438 - unsigned int fc, unsigned int pause_time)
1439 + unsigned int fc, unsigned int pause_time,
1440 + u32 tx_cnt)
1441 {
1442 void __iomem *ioaddr = hw->pcsr;
1443 - u32 channel = STMMAC_CHAN0; /* FIXME */
1444 unsigned int flow = 0;
1445 + u32 queue = 0;
1446
1447 pr_debug("GMAC Flow-Control:\n");
1448 if (fc & FLOW_RX) {
1449 @@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_
1450 }
1451 if (fc & FLOW_TX) {
1452 pr_debug("\tTransmit Flow-Control ON\n");
1453 - flow |= GMAC_TX_FLOW_CTRL_TFE;
1454 - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
1455
1456 - if (duplex) {
1457 + if (duplex)
1458 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
1459 - flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
1460 - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
1461 +
1462 + for (queue = 0; queue < tx_cnt; queue++) {
1463 + flow |= GMAC_TX_FLOW_CTRL_TFE;
1464 +
1465 + if (duplex)
1466 + flow |=
1467 + (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
1468 +
1469 + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
1470 }
1471 }
1472 }
1473 @@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iome
1474 }
1475 }
1476
1477 +static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
1478 +{
1479 + void __iomem *ioaddr = hw->pcsr;
1480 + u32 mtl_int_qx_status;
1481 + int ret = 0;
1482 +
1483 + mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
1484 +
1485 + /* Check MTL Interrupt */
1486 + if (mtl_int_qx_status & MTL_INT_QX(chan)) {
1487 + /* read Queue x Interrupt status */
1488 + u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
1489 +
1490 + if (status & MTL_RX_OVERFLOW_INT) {
1491 + /* clear Interrupt */
1492 + writel(status | MTL_RX_OVERFLOW_INT,
1493 + ioaddr + MTL_CHAN_INT_CTRL(chan));
1494 + ret = CORE_IRQ_MTL_RX_OVERFLOW;
1495 + }
1496 + }
1497 +
1498 + return ret;
1499 +}
1500 +
1501 static int dwmac4_irq_status(struct mac_device_info *hw,
1502 struct stmmac_extra_stats *x)
1503 {
1504 void __iomem *ioaddr = hw->pcsr;
1505 - u32 mtl_int_qx_status;
1506 u32 intr_status;
1507 int ret = 0;
1508
1509 @@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_
1510 x->irq_receive_pmt_irq_n++;
1511 }
1512
1513 - mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
1514 - /* Check MTL Interrupt: Currently only one queue is used: Q0. */
1515 - if (mtl_int_qx_status & MTL_INT_Q0) {
1516 - /* read Queue 0 Interrupt status */
1517 - u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
1518 -
1519 - if (status & MTL_RX_OVERFLOW_INT) {
1520 - /* clear Interrupt */
1521 - writel(status | MTL_RX_OVERFLOW_INT,
1522 - ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
1523 - ret = CORE_IRQ_MTL_RX_OVERFLOW;
1524 - }
1525 - }
1526 -
1527 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
1528 if (intr_status & PCS_RGSMIIIS_IRQ)
1529 dwmac4_phystatus(ioaddr, x);
1530 @@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_
1531 return ret;
1532 }
1533
1534 -static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
1535 +static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
1536 + u32 rx_queues, u32 tx_queues)
1537 {
1538 u32 value;
1539 + u32 queue;
1540
1541 - /* Currently only channel 0 is supported */
1542 - value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
1543 + for (queue = 0; queue < tx_queues; queue++) {
1544 + value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
1545
1546 - if (value & MTL_DEBUG_TXSTSFSTS)
1547 - x->mtl_tx_status_fifo_full++;
1548 - if (value & MTL_DEBUG_TXFSTS)
1549 - x->mtl_tx_fifo_not_empty++;
1550 - if (value & MTL_DEBUG_TWCSTS)
1551 - x->mmtl_fifo_ctrl++;
1552 - if (value & MTL_DEBUG_TRCSTS_MASK) {
1553 - u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
1554 - >> MTL_DEBUG_TRCSTS_SHIFT;
1555 - if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
1556 - x->mtl_tx_fifo_read_ctrl_write++;
1557 - else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
1558 - x->mtl_tx_fifo_read_ctrl_wait++;
1559 - else if (trcsts == MTL_DEBUG_TRCSTS_READ)
1560 - x->mtl_tx_fifo_read_ctrl_read++;
1561 - else
1562 - x->mtl_tx_fifo_read_ctrl_idle++;
1563 + if (value & MTL_DEBUG_TXSTSFSTS)
1564 + x->mtl_tx_status_fifo_full++;
1565 + if (value & MTL_DEBUG_TXFSTS)
1566 + x->mtl_tx_fifo_not_empty++;
1567 + if (value & MTL_DEBUG_TWCSTS)
1568 + x->mmtl_fifo_ctrl++;
1569 + if (value & MTL_DEBUG_TRCSTS_MASK) {
1570 + u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
1571 + >> MTL_DEBUG_TRCSTS_SHIFT;
1572 + if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
1573 + x->mtl_tx_fifo_read_ctrl_write++;
1574 + else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
1575 + x->mtl_tx_fifo_read_ctrl_wait++;
1576 + else if (trcsts == MTL_DEBUG_TRCSTS_READ)
1577 + x->mtl_tx_fifo_read_ctrl_read++;
1578 + else
1579 + x->mtl_tx_fifo_read_ctrl_idle++;
1580 + }
1581 + if (value & MTL_DEBUG_TXPAUSED)
1582 + x->mac_tx_in_pause++;
1583 }
1584 - if (value & MTL_DEBUG_TXPAUSED)
1585 - x->mac_tx_in_pause++;
1586
1587 - value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
1588 + for (queue = 0; queue < rx_queues; queue++) {
1589 + value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
1590
1591 - if (value & MTL_DEBUG_RXFSTS_MASK) {
1592 - u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
1593 - >> MTL_DEBUG_RRCSTS_SHIFT;
1594 -
1595 - if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
1596 - x->mtl_rx_fifo_fill_level_full++;
1597 - else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
1598 - x->mtl_rx_fifo_fill_above_thresh++;
1599 - else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
1600 - x->mtl_rx_fifo_fill_below_thresh++;
1601 - else
1602 - x->mtl_rx_fifo_fill_level_empty++;
1603 - }
1604 - if (value & MTL_DEBUG_RRCSTS_MASK) {
1605 - u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
1606 - MTL_DEBUG_RRCSTS_SHIFT;
1607 -
1608 - if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
1609 - x->mtl_rx_fifo_read_ctrl_flush++;
1610 - else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
1611 - x->mtl_rx_fifo_read_ctrl_read_data++;
1612 - else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
1613 - x->mtl_rx_fifo_read_ctrl_status++;
1614 - else
1615 - x->mtl_rx_fifo_read_ctrl_idle++;
1616 + if (value & MTL_DEBUG_RXFSTS_MASK) {
1617 + u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
1618 + >> MTL_DEBUG_RRCSTS_SHIFT;
1619 +
1620 + if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
1621 + x->mtl_rx_fifo_fill_level_full++;
1622 + else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
1623 + x->mtl_rx_fifo_fill_above_thresh++;
1624 + else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
1625 + x->mtl_rx_fifo_fill_below_thresh++;
1626 + else
1627 + x->mtl_rx_fifo_fill_level_empty++;
1628 + }
1629 + if (value & MTL_DEBUG_RRCSTS_MASK) {
1630 + u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
1631 + MTL_DEBUG_RRCSTS_SHIFT;
1632 +
1633 + if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
1634 + x->mtl_rx_fifo_read_ctrl_flush++;
1635 + else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
1636 + x->mtl_rx_fifo_read_ctrl_read_data++;
1637 + else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
1638 + x->mtl_rx_fifo_read_ctrl_status++;
1639 + else
1640 + x->mtl_rx_fifo_read_ctrl_idle++;
1641 + }
1642 + if (value & MTL_DEBUG_RWCSTS)
1643 + x->mtl_rx_fifo_ctrl_active++;
1644 }
1645 - if (value & MTL_DEBUG_RWCSTS)
1646 - x->mtl_rx_fifo_ctrl_active++;
1647
1648 /* GMAC debug */
1649 value = readl(ioaddr + GMAC_DEBUG);
1650 @@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *i
1651
1652 static const struct stmmac_ops dwmac4_ops = {
1653 .core_init = dwmac4_core_init,
1654 + .set_mac = stmmac_set_mac,
1655 .rx_ipc = dwmac4_rx_ipc_enable,
1656 .rx_queue_enable = dwmac4_rx_queue_enable,
1657 + .rx_queue_prio = dwmac4_rx_queue_priority,
1658 + .tx_queue_prio = dwmac4_tx_queue_priority,
1659 + .rx_queue_routing = dwmac4_tx_queue_routing,
1660 + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1661 + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1662 + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1663 + .map_mtl_to_dma = dwmac4_map_mtl_dma,
1664 + .config_cbs = dwmac4_config_cbs,
1665 .dump_regs = dwmac4_dump_regs,
1666 .host_irq_status = dwmac4_irq_status,
1667 + .host_mtl_irq_status = dwmac4_irq_mtl_status,
1668 + .flow_ctrl = dwmac4_flow_ctrl,
1669 + .pmt = dwmac4_pmt,
1670 + .set_umac_addr = dwmac4_set_umac_addr,
1671 + .get_umac_addr = dwmac4_get_umac_addr,
1672 + .set_eee_mode = dwmac4_set_eee_mode,
1673 + .reset_eee_mode = dwmac4_reset_eee_mode,
1674 + .set_eee_timer = dwmac4_set_eee_timer,
1675 + .set_eee_pls = dwmac4_set_eee_pls,
1676 + .pcs_ctrl_ane = dwmac4_ctrl_ane,
1677 + .pcs_rane = dwmac4_rane,
1678 + .pcs_get_adv_lp = dwmac4_get_adv_lp,
1679 + .debug = dwmac4_debug,
1680 + .set_filter = dwmac4_set_filter,
1681 +};
1682 +
1683 +static const struct stmmac_ops dwmac410_ops = {
1684 + .core_init = dwmac4_core_init,
1685 + .set_mac = stmmac_dwmac4_set_mac,
1686 + .rx_ipc = dwmac4_rx_ipc_enable,
1687 + .rx_queue_enable = dwmac4_rx_queue_enable,
1688 + .rx_queue_prio = dwmac4_rx_queue_priority,
1689 + .tx_queue_prio = dwmac4_tx_queue_priority,
1690 + .rx_queue_routing = dwmac4_tx_queue_routing,
1691 + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1692 + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1693 + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1694 + .map_mtl_to_dma = dwmac4_map_mtl_dma,
1695 + .config_cbs = dwmac4_config_cbs,
1696 + .dump_regs = dwmac4_dump_regs,
1697 + .host_irq_status = dwmac4_irq_status,
1698 + .host_mtl_irq_status = dwmac4_irq_mtl_status,
1699 .flow_ctrl = dwmac4_flow_ctrl,
1700 .pmt = dwmac4_pmt,
1701 .set_umac_addr = dwmac4_set_umac_addr,
1702 @@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(voi
1703 if (mac->multicast_filter_bins)
1704 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1705
1706 - mac->mac = &dwmac4_ops;
1707 -
1708 mac->link.port = GMAC_CONFIG_PS;
1709 mac->link.duplex = GMAC_CONFIG_DM;
1710 mac->link.speed = GMAC_CONFIG_FES;
1711 @@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(voi
1712 else
1713 mac->dma = &dwmac4_dma_ops;
1714
1715 + if (*synopsys_id >= DWMAC_CORE_4_00)
1716 + mac->mac = &dwmac410_ops;
1717 + else
1718 + mac->mac = &dwmac4_ops;
1719 +
1720 return mac;
1721 }
1722 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1723 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1724 @@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestam
1725 {
1726 /* Context type from W/B descriptor must be zero */
1727 if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
1728 - return -EINVAL;
1729 + return 0;
1730
1731 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
1732 if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
1733 - return 0;
1734 + return 1;
1735
1736 - return 1;
1737 + return 0;
1738 }
1739
1740 static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
1741 @@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestam
1742 }
1743 }
1744 exit:
1745 - return ret;
1746 + if (likely(ret == 0))
1747 + return 1;
1748 +
1749 + return 0;
1750 }
1751
1752 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
1753 @@ -304,12 +307,13 @@ static void dwmac4_rd_init_tx_desc(struc
1754
1755 static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1756 bool csum_flag, int mode, bool tx_own,
1757 - bool ls)
1758 + bool ls, unsigned int tot_pkt_len)
1759 {
1760 unsigned int tdes3 = le32_to_cpu(p->des3);
1761
1762 p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
1763
1764 + tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
1765 if (is_fs)
1766 tdes3 |= TDES3_FIRST_DESCRIPTOR;
1767 else
1768 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1769 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1770 @@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem
1771 writel(value, ioaddr + DMA_SYS_BUS_MODE);
1772 }
1773
1774 -static void dwmac4_dma_init_channel(void __iomem *ioaddr,
1775 - struct stmmac_dma_cfg *dma_cfg,
1776 - u32 dma_tx_phy, u32 dma_rx_phy,
1777 - u32 channel)
1778 +void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
1779 + struct stmmac_dma_cfg *dma_cfg,
1780 + u32 dma_rx_phy, u32 chan)
1781 {
1782 u32 value;
1783 - int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
1784 - int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
1785 + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
1786
1787 - /* set PBL for each channels. Currently we affect same configuration
1788 - * on each channel
1789 - */
1790 - value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
1791 - if (dma_cfg->pblx8)
1792 - value = value | DMA_BUS_MODE_PBL;
1793 - writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
1794 + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
1795 + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
1796 + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
1797 +
1798 + writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
1799 +}
1800
1801 - value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
1802 +void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
1803 + struct stmmac_dma_cfg *dma_cfg,
1804 + u32 dma_tx_phy, u32 chan)
1805 +{
1806 + u32 value;
1807 + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
1808 +
1809 + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
1810 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
1811 - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
1812 + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
1813
1814 - value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
1815 - value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
1816 - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
1817 + writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
1818 +}
1819
1820 - /* Mask interrupts by writing to CSR7 */
1821 - writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
1822 +void dwmac4_dma_init_channel(void __iomem *ioaddr,
1823 + struct stmmac_dma_cfg *dma_cfg, u32 chan)
1824 +{
1825 + u32 value;
1826 +
1827 + /* common channel control register config */
1828 + value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
1829 + if (dma_cfg->pblx8)
1830 + value = value | DMA_BUS_MODE_PBL;
1831 + writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
1832
1833 - writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
1834 - writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
1835 + /* Mask interrupts by writing to CSR7 */
1836 + writel(DMA_CHAN_INTR_DEFAULT_MASK,
1837 + ioaddr + DMA_CHAN_INTR_ENA(chan));
1838 }
1839
1840 static void dwmac4_dma_init(void __iomem *ioaddr,
1841 @@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem
1842 u32 dma_tx, u32 dma_rx, int atds)
1843 {
1844 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
1845 - int i;
1846
1847 /* Set the Fixed burst mode */
1848 if (dma_cfg->fixed_burst)
1849 @@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem
1850 value |= DMA_SYS_BUS_AAL;
1851
1852 writel(value, ioaddr + DMA_SYS_BUS_MODE);
1853 -
1854 - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
1855 - dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
1856 }
1857
1858 static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
1859 @@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __
1860 _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
1861 }
1862
1863 -static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
1864 +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
1865 {
1866 - int i;
1867 + u32 chan;
1868
1869 - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
1870 - writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
1871 + for (chan = 0; chan < number_chan; chan++)
1872 + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
1873 }
1874
1875 -static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
1876 - int rxmode, u32 channel)
1877 +static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
1878 + u32 channel, int fifosz)
1879 {
1880 - u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
1881 + unsigned int rqs = fifosz / 256 - 1;
1882 + u32 mtl_rx_op, mtl_rx_int;
1883
1884 - /* Following code only done for channel 0, other channels not yet
1885 - * supported.
1886 - */
1887 - mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
1888 + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
1889 +
1890 + if (mode == SF_DMA_MODE) {
1891 + pr_debug("GMAC: enable RX store and forward mode\n");
1892 + mtl_rx_op |= MTL_OP_MODE_RSF;
1893 + } else {
1894 + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
1895 + mtl_rx_op &= ~MTL_OP_MODE_RSF;
1896 + mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
1897 + if (mode <= 32)
1898 + mtl_rx_op |= MTL_OP_MODE_RTC_32;
1899 + else if (mode <= 64)
1900 + mtl_rx_op |= MTL_OP_MODE_RTC_64;
1901 + else if (mode <= 96)
1902 + mtl_rx_op |= MTL_OP_MODE_RTC_96;
1903 + else
1904 + mtl_rx_op |= MTL_OP_MODE_RTC_128;
1905 + }
1906 +
1907 + mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
1908 + mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
1909 +
1910 + /* enable flow control only if each channel gets 4 KiB or more FIFO */
1911 + if (fifosz >= 4096) {
1912 + unsigned int rfd, rfa;
1913 +
1914 + mtl_rx_op |= MTL_OP_MODE_EHFC;
1915 +
1916 + /* Set Threshold for Activating Flow Control to min 2 frames,
1917 + * i.e. 1500 * 2 = 3000 bytes.
1918 + *
1919 + * Set Threshold for Deactivating Flow Control to min 1 frame,
1920 + * i.e. 1500 bytes.
1921 + */
1922 + switch (fifosz) {
1923 + case 4096:
1924 + /* This violates the above formula because of FIFO size
1925 + * limit therefore overflow may occur in spite of this.
1926 + */
1927 + rfd = 0x03; /* Full-2.5K */
1928 + rfa = 0x01; /* Full-1.5K */
1929 + break;
1930 +
1931 + case 8192:
1932 + rfd = 0x06; /* Full-4K */
1933 + rfa = 0x0a; /* Full-6K */
1934 + break;
1935 +
1936 + case 16384:
1937 + rfd = 0x06; /* Full-4K */
1938 + rfa = 0x12; /* Full-10K */
1939 + break;
1940 +
1941 + default:
1942 + rfd = 0x06; /* Full-4K */
1943 + rfa = 0x1e; /* Full-16K */
1944 + break;
1945 + }
1946
1947 - if (txmode == SF_DMA_MODE) {
1948 + mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
1949 + mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
1950 +
1951 + mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
1952 + mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
1953 + }
1954 +
1955 + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
1956 +
1957 + /* Enable MTL RX overflow */
1958 + mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
1959 + writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
1960 + ioaddr + MTL_CHAN_INT_CTRL(channel));
1961 +}
1962 +
1963 +static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
1964 + u32 channel)
1965 +{
1966 + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
1967 +
1968 + if (mode == SF_DMA_MODE) {
1969 pr_debug("GMAC: enable TX store and forward mode\n");
1970 /* Transmit COE type 2 cannot be done in cut-through mode. */
1971 mtl_tx_op |= MTL_OP_MODE_TSF;
1972 } else {
1973 - pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
1974 + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
1975 mtl_tx_op &= ~MTL_OP_MODE_TSF;
1976 mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
1977 /* Set the transmit threshold */
1978 - if (txmode <= 32)
1979 + if (mode <= 32)
1980 mtl_tx_op |= MTL_OP_MODE_TTC_32;
1981 - else if (txmode <= 64)
1982 + else if (mode <= 64)
1983 mtl_tx_op |= MTL_OP_MODE_TTC_64;
1984 - else if (txmode <= 96)
1985 + else if (mode <= 96)
1986 mtl_tx_op |= MTL_OP_MODE_TTC_96;
1987 - else if (txmode <= 128)
1988 + else if (mode <= 128)
1989 mtl_tx_op |= MTL_OP_MODE_TTC_128;
1990 - else if (txmode <= 192)
1991 + else if (mode <= 192)
1992 mtl_tx_op |= MTL_OP_MODE_TTC_192;
1993 - else if (txmode <= 256)
1994 + else if (mode <= 256)
1995 mtl_tx_op |= MTL_OP_MODE_TTC_256;
1996 - else if (txmode <= 384)
1997 + else if (mode <= 384)
1998 mtl_tx_op |= MTL_OP_MODE_TTC_384;
1999 else
2000 mtl_tx_op |= MTL_OP_MODE_TTC_512;
2001 @@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void
2002 */
2003 mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
2004 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
2005 -
2006 - mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
2007 -
2008 - if (rxmode == SF_DMA_MODE) {
2009 - pr_debug("GMAC: enable RX store and forward mode\n");
2010 - mtl_rx_op |= MTL_OP_MODE_RSF;
2011 - } else {
2012 - pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
2013 - mtl_rx_op &= ~MTL_OP_MODE_RSF;
2014 - mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
2015 - if (rxmode <= 32)
2016 - mtl_rx_op |= MTL_OP_MODE_RTC_32;
2017 - else if (rxmode <= 64)
2018 - mtl_rx_op |= MTL_OP_MODE_RTC_64;
2019 - else if (rxmode <= 96)
2020 - mtl_rx_op |= MTL_OP_MODE_RTC_96;
2021 - else
2022 - mtl_rx_op |= MTL_OP_MODE_RTC_128;
2023 - }
2024 -
2025 - writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
2026 -
2027 - /* Enable MTL RX overflow */
2028 - mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
2029 - writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
2030 - ioaddr + MTL_CHAN_INT_CTRL(channel));
2031 -}
2032 -
2033 -static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
2034 - int rxmode, int rxfifosz)
2035 -{
2036 - /* Only Channel 0 is actually configured and used */
2037 - dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
2038 }
2039
2040 static void dwmac4_get_hw_feature(void __iomem *ioaddr,
2041 @@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void _
2042 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
2043 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
2044 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
2045 + /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
2046 + * shifting and store the sizes in bytes.
2047 + */
2048 + dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
2049 + dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
2050 /* MAC HW feature2 */
2051 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
2052 /* TX and RX number of channels */
2053 @@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iom
2054 const struct stmmac_dma_ops dwmac4_dma_ops = {
2055 .reset = dwmac4_dma_reset,
2056 .init = dwmac4_dma_init,
2057 + .init_chan = dwmac4_dma_init_channel,
2058 + .init_rx_chan = dwmac4_dma_init_rx_chan,
2059 + .init_tx_chan = dwmac4_dma_init_tx_chan,
2060 .axi = dwmac4_dma_axi,
2061 .dump_regs = dwmac4_dump_dma_regs,
2062 - .dma_mode = dwmac4_dma_operation_mode,
2063 + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
2064 + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
2065 .enable_dma_irq = dwmac4_enable_dma_irq,
2066 .disable_dma_irq = dwmac4_disable_dma_irq,
2067 .start_tx = dwmac4_dma_start_tx,
2068 @@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_o
2069 const struct stmmac_dma_ops dwmac410_dma_ops = {
2070 .reset = dwmac4_dma_reset,
2071 .init = dwmac4_dma_init,
2072 + .init_chan = dwmac4_dma_init_channel,
2073 + .init_rx_chan = dwmac4_dma_init_rx_chan,
2074 + .init_tx_chan = dwmac4_dma_init_tx_chan,
2075 .axi = dwmac4_dma_axi,
2076 .dump_regs = dwmac4_dump_dma_regs,
2077 - .dma_mode = dwmac4_dma_operation_mode,
2078 + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
2079 + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
2080 .enable_dma_irq = dwmac410_enable_dma_irq,
2081 .disable_dma_irq = dwmac4_disable_dma_irq,
2082 .start_tx = dwmac4_dma_start_tx,
2083 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
2084 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
2085 @@ -185,17 +185,17 @@
2086
2087 int dwmac4_dma_reset(void __iomem *ioaddr);
2088 void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
2089 -void dwmac4_enable_dma_irq(void __iomem *ioaddr);
2090 -void dwmac410_enable_dma_irq(void __iomem *ioaddr);
2091 -void dwmac4_disable_dma_irq(void __iomem *ioaddr);
2092 -void dwmac4_dma_start_tx(void __iomem *ioaddr);
2093 -void dwmac4_dma_stop_tx(void __iomem *ioaddr);
2094 -void dwmac4_dma_start_rx(void __iomem *ioaddr);
2095 -void dwmac4_dma_stop_rx(void __iomem *ioaddr);
2096 +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
2097 +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
2098 +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
2099 +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
2100 +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
2101 +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
2102 +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
2103 int dwmac4_dma_interrupt(void __iomem *ioaddr,
2104 - struct stmmac_extra_stats *x);
2105 -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
2106 -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
2107 + struct stmmac_extra_stats *x, u32 chan);
2108 +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
2109 +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
2110 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
2111 void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
2112
2113 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
2114 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
2115 @@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioadd
2116
2117 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
2118 {
2119 - writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
2120 + writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
2121 }
2122
2123 void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
2124 {
2125 - writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
2126 + writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
2127 }
2128
2129 -void dwmac4_dma_start_tx(void __iomem *ioaddr)
2130 +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
2131 {
2132 - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2133 + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
2134
2135 value |= DMA_CONTROL_ST;
2136 - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2137 + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
2138
2139 value = readl(ioaddr + GMAC_CONFIG);
2140 value |= GMAC_CONFIG_TE;
2141 writel(value, ioaddr + GMAC_CONFIG);
2142 }
2143
2144 -void dwmac4_dma_stop_tx(void __iomem *ioaddr)
2145 +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
2146 {
2147 - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2148 + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
2149
2150 value &= ~DMA_CONTROL_ST;
2151 - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2152 + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
2153
2154 value = readl(ioaddr + GMAC_CONFIG);
2155 value &= ~GMAC_CONFIG_TE;
2156 writel(value, ioaddr + GMAC_CONFIG);
2157 }
2158
2159 -void dwmac4_dma_start_rx(void __iomem *ioaddr)
2160 +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
2161 {
2162 - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2163 + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
2164
2165 value |= DMA_CONTROL_SR;
2166
2167 - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2168 + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
2169
2170 value = readl(ioaddr + GMAC_CONFIG);
2171 value |= GMAC_CONFIG_RE;
2172 writel(value, ioaddr + GMAC_CONFIG);
2173 }
2174
2175 -void dwmac4_dma_stop_rx(void __iomem *ioaddr)
2176 +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
2177 {
2178 - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2179 + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
2180
2181 value &= ~DMA_CONTROL_SR;
2182 - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2183 + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
2184
2185 value = readl(ioaddr + GMAC_CONFIG);
2186 value &= ~GMAC_CONFIG_RE;
2187 writel(value, ioaddr + GMAC_CONFIG);
2188 }
2189
2190 -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
2191 +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
2192 {
2193 - writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
2194 + writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
2195 }
2196
2197 -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
2198 +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
2199 {
2200 - writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
2201 + writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
2202 }
2203
2204 -void dwmac4_enable_dma_irq(void __iomem *ioaddr)
2205 +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
2206 {
2207 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
2208 - DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2209 + DMA_CHAN_INTR_ENA(chan));
2210 }
2211
2212 -void dwmac410_enable_dma_irq(void __iomem *ioaddr)
2213 +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
2214 {
2215 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
2216 - ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2217 + ioaddr + DMA_CHAN_INTR_ENA(chan));
2218 }
2219
2220 -void dwmac4_disable_dma_irq(void __iomem *ioaddr)
2221 +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
2222 {
2223 - writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2224 + writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
2225 }
2226
2227 int dwmac4_dma_interrupt(void __iomem *ioaddr,
2228 - struct stmmac_extra_stats *x)
2229 + struct stmmac_extra_stats *x, u32 chan)
2230 {
2231 int ret = 0;
2232
2233 - u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
2234 + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
2235
2236 /* ABNORMAL interrupts */
2237 if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
2238 @@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *i
2239 if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
2240 u32 value;
2241
2242 - value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2243 + value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
2244 /* to schedule NAPI on real RIE event. */
2245 if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
2246 x->rx_normal_irq_n++;
2247 @@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *i
2248 * status [21-0] expect reserved bits [5-3]
2249 */
2250 writel((intr_status & 0x3fffc7),
2251 - ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
2252 + ioaddr + DMA_CHAN_STATUS(chan));
2253
2254 return ret;
2255 }
2256 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
2257 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
2258 @@ -137,13 +137,14 @@
2259 #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
2260
2261 void dwmac_enable_dma_transmission(void __iomem *ioaddr);
2262 -void dwmac_enable_dma_irq(void __iomem *ioaddr);
2263 -void dwmac_disable_dma_irq(void __iomem *ioaddr);
2264 -void dwmac_dma_start_tx(void __iomem *ioaddr);
2265 -void dwmac_dma_stop_tx(void __iomem *ioaddr);
2266 -void dwmac_dma_start_rx(void __iomem *ioaddr);
2267 -void dwmac_dma_stop_rx(void __iomem *ioaddr);
2268 -int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
2269 +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
2270 +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
2271 +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
2272 +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
2273 +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
2274 +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
2275 +int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
2276 + u32 chan);
2277 int dwmac_dma_reset(void __iomem *ioaddr);
2278
2279 #endif /* __DWMAC_DMA_H__ */
2280 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
2281 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
2282 @@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void
2283 writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
2284 }
2285
2286 -void dwmac_enable_dma_irq(void __iomem *ioaddr)
2287 +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
2288 {
2289 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
2290 }
2291
2292 -void dwmac_disable_dma_irq(void __iomem *ioaddr)
2293 +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
2294 {
2295 writel(0, ioaddr + DMA_INTR_ENA);
2296 }
2297
2298 -void dwmac_dma_start_tx(void __iomem *ioaddr)
2299 +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
2300 {
2301 u32 value = readl(ioaddr + DMA_CONTROL);
2302 value |= DMA_CONTROL_ST;
2303 writel(value, ioaddr + DMA_CONTROL);
2304 }
2305
2306 -void dwmac_dma_stop_tx(void __iomem *ioaddr)
2307 +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
2308 {
2309 u32 value = readl(ioaddr + DMA_CONTROL);
2310 value &= ~DMA_CONTROL_ST;
2311 writel(value, ioaddr + DMA_CONTROL);
2312 }
2313
2314 -void dwmac_dma_start_rx(void __iomem *ioaddr)
2315 +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
2316 {
2317 u32 value = readl(ioaddr + DMA_CONTROL);
2318 value |= DMA_CONTROL_SR;
2319 writel(value, ioaddr + DMA_CONTROL);
2320 }
2321
2322 -void dwmac_dma_stop_rx(void __iomem *ioaddr)
2323 +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
2324 {
2325 u32 value = readl(ioaddr + DMA_CONTROL);
2326 value &= ~DMA_CONTROL_SR;
2327 @@ -156,7 +156,7 @@ static void show_rx_process_state(unsign
2328 #endif
2329
2330 int dwmac_dma_interrupt(void __iomem *ioaddr,
2331 - struct stmmac_extra_stats *x)
2332 + struct stmmac_extra_stats *x, u32 chan)
2333 {
2334 int ret = 0;
2335 /* read the status register (CSR5) */
2336 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2337 +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2338 @@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(str
2339
2340 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
2341 bool csum_flag, int mode, bool tx_own,
2342 - bool ls)
2343 + bool ls, unsigned int tot_pkt_len)
2344 {
2345 unsigned int tdes0 = le32_to_cpu(p->des0);
2346
2347 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2348 +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2349 @@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct
2350
2351 static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
2352 bool csum_flag, int mode, bool tx_own,
2353 - bool ls)
2354 + bool ls, unsigned int tot_pkt_len)
2355 {
2356 unsigned int tdes1 = le32_to_cpu(p->des1);
2357
2358 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
2359 +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
2360 @@ -26,16 +26,17 @@
2361
2362 static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
2363 {
2364 - struct stmmac_priv *priv = (struct stmmac_priv *)p;
2365 - unsigned int entry = priv->cur_tx;
2366 - struct dma_desc *desc;
2367 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
2368 unsigned int nopaged_len = skb_headlen(skb);
2369 + struct stmmac_priv *priv = tx_q->priv_data;
2370 + unsigned int entry = tx_q->cur_tx;
2371 unsigned int bmax, len, des2;
2372 + struct dma_desc *desc;
2373
2374 if (priv->extend_desc)
2375 - desc = (struct dma_desc *)(priv->dma_etx + entry);
2376 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2377 else
2378 - desc = priv->dma_tx + entry;
2379 + desc = tx_q->dma_tx + entry;
2380
2381 if (priv->plat->enh_desc)
2382 bmax = BUF_SIZE_8KiB;
2383 @@ -52,48 +53,51 @@ static int stmmac_jumbo_frm(void *p, str
2384 if (dma_mapping_error(priv->device, des2))
2385 return -1;
2386
2387 - priv->tx_skbuff_dma[entry].buf = des2;
2388 - priv->tx_skbuff_dma[entry].len = bmax;
2389 - priv->tx_skbuff_dma[entry].is_jumbo = true;
2390 + tx_q->tx_skbuff_dma[entry].buf = des2;
2391 + tx_q->tx_skbuff_dma[entry].len = bmax;
2392 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
2393
2394 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
2395 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
2396 - STMMAC_RING_MODE, 0, false);
2397 - priv->tx_skbuff[entry] = NULL;
2398 + STMMAC_RING_MODE, 0,
2399 + false, skb->len);
2400 + tx_q->tx_skbuff[entry] = NULL;
2401 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2402
2403 if (priv->extend_desc)
2404 - desc = (struct dma_desc *)(priv->dma_etx + entry);
2405 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2406 else
2407 - desc = priv->dma_tx + entry;
2408 + desc = tx_q->dma_tx + entry;
2409
2410 des2 = dma_map_single(priv->device, skb->data + bmax, len,
2411 DMA_TO_DEVICE);
2412 desc->des2 = cpu_to_le32(des2);
2413 if (dma_mapping_error(priv->device, des2))
2414 return -1;
2415 - priv->tx_skbuff_dma[entry].buf = des2;
2416 - priv->tx_skbuff_dma[entry].len = len;
2417 - priv->tx_skbuff_dma[entry].is_jumbo = true;
2418 + tx_q->tx_skbuff_dma[entry].buf = des2;
2419 + tx_q->tx_skbuff_dma[entry].len = len;
2420 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
2421
2422 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
2423 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
2424 - STMMAC_RING_MODE, 1, true);
2425 + STMMAC_RING_MODE, 1,
2426 + true, skb->len);
2427 } else {
2428 des2 = dma_map_single(priv->device, skb->data,
2429 nopaged_len, DMA_TO_DEVICE);
2430 desc->des2 = cpu_to_le32(des2);
2431 if (dma_mapping_error(priv->device, des2))
2432 return -1;
2433 - priv->tx_skbuff_dma[entry].buf = des2;
2434 - priv->tx_skbuff_dma[entry].len = nopaged_len;
2435 - priv->tx_skbuff_dma[entry].is_jumbo = true;
2436 + tx_q->tx_skbuff_dma[entry].buf = des2;
2437 + tx_q->tx_skbuff_dma[entry].len = nopaged_len;
2438 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
2439 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
2440 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
2441 - STMMAC_RING_MODE, 0, true);
2442 + STMMAC_RING_MODE, 0,
2443 + true, skb->len);
2444 }
2445
2446 - priv->cur_tx = entry;
2447 + tx_q->cur_tx = entry;
2448
2449 return entry;
2450 }
2451 @@ -125,12 +129,13 @@ static void stmmac_init_desc3(struct dma
2452
2453 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
2454 {
2455 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
2456 - unsigned int entry = priv->dirty_tx;
2457 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
2458 + struct stmmac_priv *priv = tx_q->priv_data;
2459 + unsigned int entry = tx_q->dirty_tx;
2460
2461 /* des3 is only used for jumbo frames tx or time stamping */
2462 - if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
2463 - (priv->tx_skbuff_dma[entry].last_segment &&
2464 + if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
2465 + (tx_q->tx_skbuff_dma[entry].last_segment &&
2466 !priv->extend_desc && priv->hwts_tx_en)))
2467 p->des3 = 0;
2468 }
2469 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
2470 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
2471 @@ -46,38 +46,51 @@ struct stmmac_tx_info {
2472 bool is_jumbo;
2473 };
2474
2475 -struct stmmac_priv {
2476 - /* Frequently used values are kept adjacent for cache effect */
2477 +/* Frequently used values are kept adjacent for cache effect */
2478 +struct stmmac_tx_queue {
2479 + u32 queue_index;
2480 + struct stmmac_priv *priv_data;
2481 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
2482 struct dma_desc *dma_tx;
2483 struct sk_buff **tx_skbuff;
2484 + struct stmmac_tx_info *tx_skbuff_dma;
2485 unsigned int cur_tx;
2486 unsigned int dirty_tx;
2487 + dma_addr_t dma_tx_phy;
2488 + u32 tx_tail_addr;
2489 +};
2490 +
2491 +struct stmmac_rx_queue {
2492 + u32 queue_index;
2493 + struct stmmac_priv *priv_data;
2494 + struct dma_extended_desc *dma_erx;
2495 + struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
2496 + struct sk_buff **rx_skbuff;
2497 + dma_addr_t *rx_skbuff_dma;
2498 + unsigned int cur_rx;
2499 + unsigned int dirty_rx;
2500 + u32 rx_zeroc_thresh;
2501 + dma_addr_t dma_rx_phy;
2502 + u32 rx_tail_addr;
2503 + struct napi_struct napi ____cacheline_aligned_in_smp;
2504 +};
2505 +
2506 +struct stmmac_priv {
2507 + /* Frequently used values are kept adjacent for cache effect */
2508 u32 tx_count_frames;
2509 u32 tx_coal_frames;
2510 u32 tx_coal_timer;
2511 - struct stmmac_tx_info *tx_skbuff_dma;
2512 - dma_addr_t dma_tx_phy;
2513 +
2514 int tx_coalesce;
2515 int hwts_tx_en;
2516 bool tx_path_in_lpi_mode;
2517 struct timer_list txtimer;
2518 bool tso;
2519
2520 - struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
2521 - struct dma_extended_desc *dma_erx;
2522 - struct sk_buff **rx_skbuff;
2523 - unsigned int cur_rx;
2524 - unsigned int dirty_rx;
2525 unsigned int dma_buf_sz;
2526 unsigned int rx_copybreak;
2527 - unsigned int rx_zeroc_thresh;
2528 u32 rx_riwt;
2529 int hwts_rx_en;
2530 - dma_addr_t *rx_skbuff_dma;
2531 - dma_addr_t dma_rx_phy;
2532 -
2533 - struct napi_struct napi ____cacheline_aligned_in_smp;
2534
2535 void __iomem *ioaddr;
2536 struct net_device *dev;
2537 @@ -85,6 +98,12 @@ struct stmmac_priv {
2538 struct mac_device_info *hw;
2539 spinlock_t lock;
2540
2541 + /* RX Queue */
2542 + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
2543 +
2544 + /* TX Queue */
2545 + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
2546 +
2547 int oldlink;
2548 int speed;
2549 int oldduplex;
2550 @@ -119,8 +138,6 @@ struct stmmac_priv {
2551 spinlock_t ptp_lock;
2552 void __iomem *mmcaddr;
2553 void __iomem *ptpaddr;
2554 - u32 rx_tail_addr;
2555 - u32 tx_tail_addr;
2556 u32 mss;
2557
2558 #ifdef CONFIG_DEBUG_FS
2559 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2560 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2561 @@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device
2562 struct ethtool_pauseparam *pause)
2563 {
2564 struct stmmac_priv *priv = netdev_priv(netdev);
2565 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2566 struct phy_device *phy = netdev->phydev;
2567 int new_pause = FLOW_OFF;
2568
2569 @@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device
2570 }
2571
2572 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
2573 - priv->pause);
2574 + priv->pause, tx_cnt);
2575 return 0;
2576 }
2577
2578 @@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(str
2579 struct ethtool_stats *dummy, u64 *data)
2580 {
2581 struct stmmac_priv *priv = netdev_priv(dev);
2582 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
2583 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
2584 int i, j = 0;
2585
2586 /* Update the DMA HW counters for dwmac10/100 */
2587 @@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(str
2588 if ((priv->hw->mac->debug) &&
2589 (priv->synopsys_id >= DWMAC_CORE_3_50))
2590 priv->hw->mac->debug(priv->ioaddr,
2591 - (void *)&priv->xstats);
2592 + (void *)&priv->xstats,
2593 + rx_queues_count, tx_queues_count);
2594 }
2595 for (i = 0; i < STMMAC_STATS_LEN; i++) {
2596 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
2597 @@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct ne
2598 struct ethtool_coalesce *ec)
2599 {
2600 struct stmmac_priv *priv = netdev_priv(dev);
2601 + u32 rx_cnt = priv->plat->rx_queues_to_use;
2602 unsigned int rx_riwt;
2603
2604 /* Check not supported parameters */
2605 @@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct ne
2606 priv->tx_coal_frames = ec->tx_max_coalesced_frames;
2607 priv->tx_coal_timer = ec->tx_coalesce_usecs;
2608 priv->rx_riwt = rx_riwt;
2609 - priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
2610 + priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
2611
2612 return 0;
2613 }
2614 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2615 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2616 @@ -139,6 +139,64 @@ static void stmmac_verify_args(void)
2617 }
2618
2619 /**
2620 + * stmmac_disable_all_queues - Disable all queues
2621 + * @priv: driver private structure
2622 + */
2623 +static void stmmac_disable_all_queues(struct stmmac_priv *priv)
2624 +{
2625 + u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2626 + u32 queue;
2627 +
2628 + for (queue = 0; queue < rx_queues_cnt; queue++) {
2629 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2630 +
2631 + napi_disable(&rx_q->napi);
2632 + }
2633 +}
2634 +
2635 +/**
2636 + * stmmac_enable_all_queues - Enable all queues
2637 + * @priv: driver private structure
2638 + */
2639 +static void stmmac_enable_all_queues(struct stmmac_priv *priv)
2640 +{
2641 + u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2642 + u32 queue;
2643 +
2644 + for (queue = 0; queue < rx_queues_cnt; queue++) {
2645 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2646 +
2647 + napi_enable(&rx_q->napi);
2648 + }
2649 +}
2650 +
2651 +/**
2652 + * stmmac_stop_all_queues - Stop all queues
2653 + * @priv: driver private structure
2654 + */
2655 +static void stmmac_stop_all_queues(struct stmmac_priv *priv)
2656 +{
2657 + u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2658 + u32 queue;
2659 +
2660 + for (queue = 0; queue < tx_queues_cnt; queue++)
2661 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2662 +}
2663 +
2664 +/**
2665 + * stmmac_start_all_queues - Start all queues
2666 + * @priv: driver private structure
2667 + */
2668 +static void stmmac_start_all_queues(struct stmmac_priv *priv)
2669 +{
2670 + u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2671 + u32 queue;
2672 +
2673 + for (queue = 0; queue < tx_queues_cnt; queue++)
2674 + netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
2675 +}
2676 +
2677 +/**
2678 * stmmac_clk_csr_set - dynamically set the MDC clock
2679 * @priv: driver private structure
2680 * Description: this is to dynamically set the MDC clock according to the csr
2681 @@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf
2682 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2683 }
2684
2685 -static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
2686 +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2687 {
2688 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2689 u32 avail;
2690
2691 - if (priv->dirty_tx > priv->cur_tx)
2692 - avail = priv->dirty_tx - priv->cur_tx - 1;
2693 + if (tx_q->dirty_tx > tx_q->cur_tx)
2694 + avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
2695 else
2696 - avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
2697 + avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
2698
2699 return avail;
2700 }
2701
2702 -static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
2703 +/**
2704 + * stmmac_rx_dirty - Get RX queue dirty
2705 + * @priv: driver private structure
2706 + * @queue: RX queue index
2707 + */
2708 +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
2709 {
2710 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2711 u32 dirty;
2712
2713 - if (priv->dirty_rx <= priv->cur_rx)
2714 - dirty = priv->cur_rx - priv->dirty_rx;
2715 + if (rx_q->dirty_rx <= rx_q->cur_rx)
2716 + dirty = rx_q->cur_rx - rx_q->dirty_rx;
2717 else
2718 - dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
2719 + dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
2720
2721 return dirty;
2722 }
2723 @@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_spe
2724 */
2725 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
2726 {
2727 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2728 + u32 queue;
2729 +
2730 + /* check if all TX queues have the work finished */
2731 + for (queue = 0; queue < tx_cnt; queue++) {
2732 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2733 +
2734 + if (tx_q->dirty_tx != tx_q->cur_tx)
2735 + return; /* still unfinished work */
2736 + }
2737 +
2738 /* Check and enter in LPI mode */
2739 - if ((priv->dirty_tx == priv->cur_tx) &&
2740 - (priv->tx_path_in_lpi_mode == false))
2741 + if (!priv->tx_path_in_lpi_mode)
2742 priv->hw->mac->set_eee_mode(priv->hw,
2743 priv->plat->en_tx_lpi_clockgating);
2744 }
2745 @@ -359,14 +434,14 @@ static void stmmac_get_tx_hwtstamp(struc
2746 return;
2747
2748 /* check tx tstamp status */
2749 - if (!priv->hw->desc->get_tx_timestamp_status(p)) {
2750 + if (priv->hw->desc->get_tx_timestamp_status(p)) {
2751 /* get the valid tstamp */
2752 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
2753
2754 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
2755 shhwtstamp.hwtstamp = ns_to_ktime(ns);
2756
2757 - netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
2758 + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
2759 /* pass tstamp to stack */
2760 skb_tstamp_tx(skb, &shhwtstamp);
2761 }
2762 @@ -393,19 +468,19 @@ static void stmmac_get_rx_hwtstamp(struc
2763 return;
2764
2765 /* Check if timestamp is available */
2766 - if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
2767 + if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
2768 /* For GMAC4, the valid timestamp is from CTX next desc. */
2769 if (priv->plat->has_gmac4)
2770 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
2771 else
2772 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
2773
2774 - netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
2775 + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
2776 shhwtstamp = skb_hwtstamps(skb);
2777 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
2778 shhwtstamp->hwtstamp = ns_to_ktime(ns);
2779 } else {
2780 - netdev_err(priv->dev, "cannot get RX hw timestamp\n");
2781 + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
2782 }
2783 }
2784
2785 @@ -471,7 +546,10 @@ static int stmmac_hwtstamp_ioctl(struct
2786 /* PTP v1, UDP, any kind of event packet */
2787 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2788 /* take time stamp for all event messages */
2789 - snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
2790 + if (priv->plat->has_gmac4)
2791 + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
2792 + else
2793 + snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
2794
2795 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
2796 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
2797 @@ -503,7 +581,10 @@ static int stmmac_hwtstamp_ioctl(struct
2798 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
2799 ptp_v2 = PTP_TCR_TSVER2ENA;
2800 /* take time stamp for all event messages */
2801 - snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
2802 + if (priv->plat->has_gmac4)
2803 + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
2804 + else
2805 + snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
2806
2807 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
2808 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
2809 @@ -537,7 +618,10 @@ static int stmmac_hwtstamp_ioctl(struct
2810 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2811 ptp_v2 = PTP_TCR_TSVER2ENA;
2812 /* take time stamp for all event messages */
2813 - snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
2814 + if (priv->plat->has_gmac4)
2815 + snap_type_sel = PTP_GMAC4_TCR_SNAPTYPSEL_1;
2816 + else
2817 + snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
2818
2819 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
2820 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
2821 @@ -673,6 +757,19 @@ static void stmmac_release_ptp(struct st
2822 }
2823
2824 /**
2825 + * stmmac_mac_flow_ctrl - Configure flow control in all queues
2826 + * @priv: driver private structure
2827 + * Description: It is used for configuring the flow control in all queues
2828 + */
2829 +static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
2830 +{
2831 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2832 +
2833 + priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
2834 + priv->pause, tx_cnt);
2835 +}
2836 +
2837 +/**
2838 * stmmac_adjust_link - adjusts the link parameters
2839 * @dev: net device structure
2840 * Description: this is the helper called by the physical abstraction layer
2841 @@ -687,7 +784,6 @@ static void stmmac_adjust_link(struct ne
2842 struct phy_device *phydev = dev->phydev;
2843 unsigned long flags;
2844 int new_state = 0;
2845 - unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
2846
2847 if (!phydev)
2848 return;
2849 @@ -709,8 +805,7 @@ static void stmmac_adjust_link(struct ne
2850 }
2851 /* Flow Control operation */
2852 if (phydev->pause)
2853 - priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
2854 - fc, pause_time);
2855 + stmmac_mac_flow_ctrl(priv, phydev->duplex);
2856
2857 if (phydev->speed != priv->speed) {
2858 new_state = 1;
2859 @@ -878,22 +973,56 @@ static int stmmac_init_phy(struct net_de
2860 return 0;
2861 }
2862
2863 -static void stmmac_display_rings(struct stmmac_priv *priv)
2864 +static void stmmac_display_rx_rings(struct stmmac_priv *priv)
2865 {
2866 - void *head_rx, *head_tx;
2867 + u32 rx_cnt = priv->plat->rx_queues_to_use;
2868 + void *head_rx;
2869 + u32 queue;
2870
2871 - if (priv->extend_desc) {
2872 - head_rx = (void *)priv->dma_erx;
2873 - head_tx = (void *)priv->dma_etx;
2874 - } else {
2875 - head_rx = (void *)priv->dma_rx;
2876 - head_tx = (void *)priv->dma_tx;
2877 + /* Display RX rings */
2878 + for (queue = 0; queue < rx_cnt; queue++) {
2879 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2880 +
2881 + pr_info("\tRX Queue %u rings\n", queue);
2882 +
2883 + if (priv->extend_desc)
2884 + head_rx = (void *)rx_q->dma_erx;
2885 + else
2886 + head_rx = (void *)rx_q->dma_rx;
2887 +
2888 + /* Display RX ring */
2889 + priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
2890 + }
2891 +}
2892 +
2893 +static void stmmac_display_tx_rings(struct stmmac_priv *priv)
2894 +{
2895 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2896 + void *head_tx;
2897 + u32 queue;
2898 +
2899 + /* Display TX rings */
2900 + for (queue = 0; queue < tx_cnt; queue++) {
2901 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2902 +
2903 + pr_info("\tTX Queue %d rings\n", queue);
2904 +
2905 + if (priv->extend_desc)
2906 + head_tx = (void *)tx_q->dma_etx;
2907 + else
2908 + head_tx = (void *)tx_q->dma_tx;
2909 +
2910 + priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
2911 }
2912 +}
2913
2914 - /* Display Rx ring */
2915 - priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
2916 - /* Display Tx ring */
2917 - priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
2918 +static void stmmac_display_rings(struct stmmac_priv *priv)
2919 +{
2920 + /* Display RX ring */
2921 + stmmac_display_rx_rings(priv);
2922 +
2923 + /* Display TX ring */
2924 + stmmac_display_tx_rings(priv);
2925 }
2926
2927 static int stmmac_set_bfsize(int mtu, int bufsize)
2928 @@ -913,48 +1042,88 @@ static int stmmac_set_bfsize(int mtu, in
2929 }
2930
2931 /**
2932 - * stmmac_clear_descriptors - clear descriptors
2933 + * stmmac_clear_rx_descriptors - clear RX descriptors
2934 * @priv: driver private structure
2935 - * Description: this function is called to clear the tx and rx descriptors
2936 + * @queue: RX queue index
2937 + * Description: this function is called to clear the RX descriptors
2938 * in case of both basic and extended descriptors are used.
2939 */
2940 -static void stmmac_clear_descriptors(struct stmmac_priv *priv)
2941 +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
2942 {
2943 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2944 int i;
2945
2946 - /* Clear the Rx/Tx descriptors */
2947 + /* Clear the RX descriptors */
2948 for (i = 0; i < DMA_RX_SIZE; i++)
2949 if (priv->extend_desc)
2950 - priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
2951 + priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
2952 priv->use_riwt, priv->mode,
2953 (i == DMA_RX_SIZE - 1));
2954 else
2955 - priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
2956 + priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
2957 priv->use_riwt, priv->mode,
2958 (i == DMA_RX_SIZE - 1));
2959 +}
2960 +
2961 +/**
2962 + * stmmac_clear_tx_descriptors - clear tx descriptors
2963 + * @priv: driver private structure
2964 + * @queue: TX queue index.
2965 + * Description: this function is called to clear the TX descriptors
2966 + * in case of both basic and extended descriptors are used.
2967 + */
2968 +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
2969 +{
2970 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2971 + int i;
2972 +
2973 + /* Clear the TX descriptors */
2974 for (i = 0; i < DMA_TX_SIZE; i++)
2975 if (priv->extend_desc)
2976 - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
2977 + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
2978 priv->mode,
2979 (i == DMA_TX_SIZE - 1));
2980 else
2981 - priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
2982 + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
2983 priv->mode,
2984 (i == DMA_TX_SIZE - 1));
2985 }
2986
2987 /**
2988 + * stmmac_clear_descriptors - clear descriptors
2989 + * @priv: driver private structure
2990 + * Description: this function is called to clear the TX and RX descriptors
2991 + * in case of both basic and extended descriptors are used.
2992 + */
2993 +static void stmmac_clear_descriptors(struct stmmac_priv *priv)
2994 +{
2995 + u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
2996 + u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
2997 + u32 queue;
2998 +
2999 + /* Clear the RX descriptors */
3000 + for (queue = 0; queue < rx_queue_cnt; queue++)
3001 + stmmac_clear_rx_descriptors(priv, queue);
3002 +
3003 + /* Clear the TX descriptors */
3004 + for (queue = 0; queue < tx_queue_cnt; queue++)
3005 + stmmac_clear_tx_descriptors(priv, queue);
3006 +}
3007 +
3008 +/**
3009 * stmmac_init_rx_buffers - init the RX descriptor buffer.
3010 * @priv: driver private structure
3011 * @p: descriptor pointer
3012 * @i: descriptor index
3013 - * @flags: gfp flag.
3014 + * @flags: gfp flag
3015 + * @queue: RX queue index
3016 * Description: this function is called to allocate a receive buffer, perform
3017 * the DMA mapping and init the descriptor.
3018 */
3019 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
3020 - int i, gfp_t flags)
3021 + int i, gfp_t flags, u32 queue)
3022 {
3023 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3024 struct sk_buff *skb;
3025
3026 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
3027 @@ -963,20 +1132,20 @@ static int stmmac_init_rx_buffers(struct
3028 "%s: Rx init fails; skb is NULL\n", __func__);
3029 return -ENOMEM;
3030 }
3031 - priv->rx_skbuff[i] = skb;
3032 - priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
3033 + rx_q->rx_skbuff[i] = skb;
3034 + rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
3035 priv->dma_buf_sz,
3036 DMA_FROM_DEVICE);
3037 - if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
3038 + if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
3039 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
3040 dev_kfree_skb_any(skb);
3041 return -EINVAL;
3042 }
3043
3044 if (priv->synopsys_id >= DWMAC_CORE_4_00)
3045 - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
3046 + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
3047 else
3048 - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
3049 + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
3050
3051 if ((priv->hw->mode->init_desc3) &&
3052 (priv->dma_buf_sz == BUF_SIZE_16KiB))
3053 @@ -985,30 +1154,71 @@ static int stmmac_init_rx_buffers(struct
3054 return 0;
3055 }
3056
3057 -static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
3058 +/**
3059 + * stmmac_free_rx_buffer - free RX dma buffers
3060 + * @priv: private structure
3061 + * @queue: RX queue index
3062 + * @i: buffer index.
3063 + */
3064 +static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
3065 {
3066 - if (priv->rx_skbuff[i]) {
3067 - dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
3068 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3069 +
3070 + if (rx_q->rx_skbuff[i]) {
3071 + dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
3072 priv->dma_buf_sz, DMA_FROM_DEVICE);
3073 - dev_kfree_skb_any(priv->rx_skbuff[i]);
3074 + dev_kfree_skb_any(rx_q->rx_skbuff[i]);
3075 }
3076 - priv->rx_skbuff[i] = NULL;
3077 + rx_q->rx_skbuff[i] = NULL;
3078 }
3079
3080 /**
3081 - * init_dma_desc_rings - init the RX/TX descriptor rings
3082 + * stmmac_free_tx_buffer - free RX dma buffers
3083 + * @priv: private structure
3084 + * @queue: RX queue index
3085 + * @i: buffer index.
3086 + */
3087 +static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
3088 +{
3089 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3090 +
3091 + if (tx_q->tx_skbuff_dma[i].buf) {
3092 + if (tx_q->tx_skbuff_dma[i].map_as_page)
3093 + dma_unmap_page(priv->device,
3094 + tx_q->tx_skbuff_dma[i].buf,
3095 + tx_q->tx_skbuff_dma[i].len,
3096 + DMA_TO_DEVICE);
3097 + else
3098 + dma_unmap_single(priv->device,
3099 + tx_q->tx_skbuff_dma[i].buf,
3100 + tx_q->tx_skbuff_dma[i].len,
3101 + DMA_TO_DEVICE);
3102 + }
3103 +
3104 + if (tx_q->tx_skbuff[i]) {
3105 + dev_kfree_skb_any(tx_q->tx_skbuff[i]);
3106 + tx_q->tx_skbuff[i] = NULL;
3107 + tx_q->tx_skbuff_dma[i].buf = 0;
3108 + tx_q->tx_skbuff_dma[i].map_as_page = false;
3109 + }
3110 +}
3111 +
3112 +/**
3113 + * init_dma_rx_desc_rings - init the RX descriptor rings
3114 * @dev: net device structure
3115 * @flags: gfp flag.
3116 - * Description: this function initializes the DMA RX/TX descriptors
3117 + * Description: this function initializes the DMA RX descriptors
3118 * and allocates the socket buffers. It supports the chained and ring
3119 * modes.
3120 */
3121 -static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
3122 +static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
3123 {
3124 - int i;
3125 struct stmmac_priv *priv = netdev_priv(dev);
3126 + u32 rx_count = priv->plat->rx_queues_to_use;
3127 unsigned int bfsize = 0;
3128 int ret = -ENOMEM;
3129 + int queue;
3130 + int i;
3131
3132 if (priv->hw->mode->set_16kib_bfsize)
3133 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
3134 @@ -1018,257 +1228,516 @@ static int init_dma_desc_rings(struct ne
3135
3136 priv->dma_buf_sz = bfsize;
3137
3138 - netif_dbg(priv, probe, priv->dev,
3139 - "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
3140 - __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
3141 -
3142 /* RX INITIALIZATION */
3143 netif_dbg(priv, probe, priv->dev,
3144 "SKB addresses:\nskb\t\tskb data\tdma data\n");
3145
3146 - for (i = 0; i < DMA_RX_SIZE; i++) {
3147 - struct dma_desc *p;
3148 - if (priv->extend_desc)
3149 - p = &((priv->dma_erx + i)->basic);
3150 - else
3151 - p = priv->dma_rx + i;
3152 + for (queue = 0; queue < rx_count; queue++) {
3153 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3154 +
3155 + netif_dbg(priv, probe, priv->dev,
3156 + "(%s) dma_rx_phy=0x%08x\n", __func__,
3157 + (u32)rx_q->dma_rx_phy);
3158 +
3159 + for (i = 0; i < DMA_RX_SIZE; i++) {
3160 + struct dma_desc *p;
3161 +
3162 + if (priv->extend_desc)
3163 + p = &((rx_q->dma_erx + i)->basic);
3164 + else
3165 + p = rx_q->dma_rx + i;
3166
3167 - ret = stmmac_init_rx_buffers(priv, p, i, flags);
3168 - if (ret)
3169 - goto err_init_rx_buffers;
3170 + ret = stmmac_init_rx_buffers(priv, p, i, flags,
3171 + queue);
3172 + if (ret)
3173 + goto err_init_rx_buffers;
3174 +
3175 + netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
3176 + rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
3177 + (unsigned int)rx_q->rx_skbuff_dma[i]);
3178 + }
3179 +
3180 + rx_q->cur_rx = 0;
3181 + rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
3182 +
3183 + stmmac_clear_rx_descriptors(priv, queue);
3184
3185 - netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
3186 - priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
3187 - (unsigned int)priv->rx_skbuff_dma[i]);
3188 + /* Setup the chained descriptor addresses */
3189 + if (priv->mode == STMMAC_CHAIN_MODE) {
3190 + if (priv->extend_desc)
3191 + priv->hw->mode->init(rx_q->dma_erx,
3192 + rx_q->dma_rx_phy,
3193 + DMA_RX_SIZE, 1);
3194 + else
3195 + priv->hw->mode->init(rx_q->dma_rx,
3196 + rx_q->dma_rx_phy,
3197 + DMA_RX_SIZE, 0);
3198 + }
3199 }
3200 - priv->cur_rx = 0;
3201 - priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
3202 +
3203 buf_sz = bfsize;
3204
3205 - /* Setup the chained descriptor addresses */
3206 - if (priv->mode == STMMAC_CHAIN_MODE) {
3207 - if (priv->extend_desc) {
3208 - priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
3209 - DMA_RX_SIZE, 1);
3210 - priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
3211 - DMA_TX_SIZE, 1);
3212 - } else {
3213 - priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
3214 - DMA_RX_SIZE, 0);
3215 - priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
3216 - DMA_TX_SIZE, 0);
3217 - }
3218 + return 0;
3219 +
3220 +err_init_rx_buffers:
3221 + while (queue >= 0) {
3222 + while (--i >= 0)
3223 + stmmac_free_rx_buffer(priv, queue, i);
3224 +
3225 + if (queue == 0)
3226 + break;
3227 +
3228 + i = DMA_RX_SIZE;
3229 + queue--;
3230 }
3231
3232 - /* TX INITIALIZATION */
3233 - for (i = 0; i < DMA_TX_SIZE; i++) {
3234 - struct dma_desc *p;
3235 - if (priv->extend_desc)
3236 - p = &((priv->dma_etx + i)->basic);
3237 - else
3238 - p = priv->dma_tx + i;
3239 + return ret;
3240 +}
3241
3242 - if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3243 - p->des0 = 0;
3244 - p->des1 = 0;
3245 - p->des2 = 0;
3246 - p->des3 = 0;
3247 - } else {
3248 - p->des2 = 0;
3249 +/**
3250 + * init_dma_tx_desc_rings - init the TX descriptor rings
3251 + * @dev: net device structure.
3252 + * Description: this function initializes the DMA TX descriptors
3253 + * and allocates the socket buffers. It supports the chained and ring
3254 + * modes.
3255 + */
3256 +static int init_dma_tx_desc_rings(struct net_device *dev)
3257 +{
3258 + struct stmmac_priv *priv = netdev_priv(dev);
3259 + u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
3260 + u32 queue;
3261 + int i;
3262 +
3263 + for (queue = 0; queue < tx_queue_cnt; queue++) {
3264 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3265 +
3266 + netif_dbg(priv, probe, priv->dev,
3267 + "(%s) dma_tx_phy=0x%08x\n", __func__,
3268 + (u32)tx_q->dma_tx_phy);
3269 +
3270 + /* Setup the chained descriptor addresses */
3271 + if (priv->mode == STMMAC_CHAIN_MODE) {
3272 + if (priv->extend_desc)
3273 + priv->hw->mode->init(tx_q->dma_etx,
3274 + tx_q->dma_tx_phy,
3275 + DMA_TX_SIZE, 1);
3276 + else
3277 + priv->hw->mode->init(tx_q->dma_tx,
3278 + tx_q->dma_tx_phy,
3279 + DMA_TX_SIZE, 0);
3280 }
3281
3282 - priv->tx_skbuff_dma[i].buf = 0;
3283 - priv->tx_skbuff_dma[i].map_as_page = false;
3284 - priv->tx_skbuff_dma[i].len = 0;
3285 - priv->tx_skbuff_dma[i].last_segment = false;
3286 - priv->tx_skbuff[i] = NULL;
3287 + for (i = 0; i < DMA_TX_SIZE; i++) {
3288 + struct dma_desc *p;
3289 + if (priv->extend_desc)
3290 + p = &((tx_q->dma_etx + i)->basic);
3291 + else
3292 + p = tx_q->dma_tx + i;
3293 +
3294 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3295 + p->des0 = 0;
3296 + p->des1 = 0;
3297 + p->des2 = 0;
3298 + p->des3 = 0;
3299 + } else {
3300 + p->des2 = 0;
3301 + }
3302 +
3303 + tx_q->tx_skbuff_dma[i].buf = 0;
3304 + tx_q->tx_skbuff_dma[i].map_as_page = false;
3305 + tx_q->tx_skbuff_dma[i].len = 0;
3306 + tx_q->tx_skbuff_dma[i].last_segment = false;
3307 + tx_q->tx_skbuff[i] = NULL;
3308 + }
3309 +
3310 + tx_q->dirty_tx = 0;
3311 + tx_q->cur_tx = 0;
3312 +
3313 + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
3314 }
3315
3316 - priv->dirty_tx = 0;
3317 - priv->cur_tx = 0;
3318 - netdev_reset_queue(priv->dev);
3319 + return 0;
3320 +}
3321 +
3322 +/**
3323 + * init_dma_desc_rings - init the RX/TX descriptor rings
3324 + * @dev: net device structure
3325 + * @flags: gfp flag.
3326 + * Description: this function initializes the DMA RX/TX descriptors
3327 + * and allocates the socket buffers. It supports the chained and ring
3328 + * modes.
3329 + */
3330 +static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
3331 +{
3332 + struct stmmac_priv *priv = netdev_priv(dev);
3333 + int ret;
3334 +
3335 + ret = init_dma_rx_desc_rings(dev, flags);
3336 + if (ret)
3337 + return ret;
3338 +
3339 + ret = init_dma_tx_desc_rings(dev);
3340
3341 stmmac_clear_descriptors(priv);
3342
3343 if (netif_msg_hw(priv))
3344 stmmac_display_rings(priv);
3345
3346 - return 0;
3347 -err_init_rx_buffers:
3348 - while (--i >= 0)
3349 - stmmac_free_rx_buffers(priv, i);
3350 return ret;
3351 }
3352
3353 -static void dma_free_rx_skbufs(struct stmmac_priv *priv)
3354 +/**
3355 + * dma_free_rx_skbufs - free RX dma buffers
3356 + * @priv: private structure
3357 + * @queue: RX queue index
3358 + */
3359 +static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
3360 {
3361 int i;
3362
3363 for (i = 0; i < DMA_RX_SIZE; i++)
3364 - stmmac_free_rx_buffers(priv, i);
3365 + stmmac_free_rx_buffer(priv, queue, i);
3366 }
3367
3368 -static void dma_free_tx_skbufs(struct stmmac_priv *priv)
3369 +/**
3370 + * dma_free_tx_skbufs - free TX dma buffers
3371 + * @priv: private structure
3372 + * @queue: TX queue index
3373 + */
3374 +static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
3375 {
3376 int i;
3377
3378 - for (i = 0; i < DMA_TX_SIZE; i++) {
3379 - if (priv->tx_skbuff_dma[i].buf) {
3380 - if (priv->tx_skbuff_dma[i].map_as_page)
3381 - dma_unmap_page(priv->device,
3382 - priv->tx_skbuff_dma[i].buf,
3383 - priv->tx_skbuff_dma[i].len,
3384 - DMA_TO_DEVICE);
3385 - else
3386 - dma_unmap_single(priv->device,
3387 - priv->tx_skbuff_dma[i].buf,
3388 - priv->tx_skbuff_dma[i].len,
3389 - DMA_TO_DEVICE);
3390 + for (i = 0; i < DMA_TX_SIZE; i++)
3391 + stmmac_free_tx_buffer(priv, queue, i);
3392 +}
3393 +
3394 +/**
3395 + * free_dma_rx_desc_resources - free RX dma desc resources
3396 + * @priv: private structure
3397 + */
3398 +static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
3399 +{
3400 + u32 rx_count = priv->plat->rx_queues_to_use;
3401 + u32 queue;
3402 +
3403 + /* Free RX queue resources */
3404 + for (queue = 0; queue < rx_count; queue++) {
3405 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3406 +
3407 + /* Release the DMA RX socket buffers */
3408 + dma_free_rx_skbufs(priv, queue);
3409 +
3410 + /* Free DMA regions of consistent memory previously allocated */
3411 + if (!priv->extend_desc)
3412 + dma_free_coherent(priv->device,
3413 + DMA_RX_SIZE * sizeof(struct dma_desc),
3414 + rx_q->dma_rx, rx_q->dma_rx_phy);
3415 + else
3416 + dma_free_coherent(priv->device, DMA_RX_SIZE *
3417 + sizeof(struct dma_extended_desc),
3418 + rx_q->dma_erx, rx_q->dma_rx_phy);
3419 +
3420 + kfree(rx_q->rx_skbuff_dma);
3421 + kfree(rx_q->rx_skbuff);
3422 + }
3423 +}
3424 +
3425 +/**
3426 + * free_dma_tx_desc_resources - free TX dma desc resources
3427 + * @priv: private structure
3428 + */
3429 +static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
3430 +{
3431 + u32 tx_count = priv->plat->tx_queues_to_use;
3432 + u32 queue = 0;
3433 +
3434 + /* Free TX queue resources */
3435 + for (queue = 0; queue < tx_count; queue++) {
3436 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3437 +
3438 + /* Release the DMA TX socket buffers */
3439 + dma_free_tx_skbufs(priv, queue);
3440 +
3441 + /* Free DMA regions of consistent memory previously allocated */
3442 + if (!priv->extend_desc)
3443 + dma_free_coherent(priv->device,
3444 + DMA_TX_SIZE * sizeof(struct dma_desc),
3445 + tx_q->dma_tx, tx_q->dma_tx_phy);
3446 + else
3447 + dma_free_coherent(priv->device, DMA_TX_SIZE *
3448 + sizeof(struct dma_extended_desc),
3449 + tx_q->dma_etx, tx_q->dma_tx_phy);
3450 +
3451 + kfree(tx_q->tx_skbuff_dma);
3452 + kfree(tx_q->tx_skbuff);
3453 + }
3454 +}
3455 +
3456 +/**
3457 + * alloc_dma_rx_desc_resources - alloc RX resources.
3458 + * @priv: private structure
3459 + * Description: according to which descriptor can be used (extend or basic)
3460 + * this function allocates the resources for TX and RX paths. In case of
3461 + * reception, for example, it pre-allocated the RX socket buffer in order to
3462 + * allow zero-copy mechanism.
3463 + */
3464 +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
3465 +{
3466 + u32 rx_count = priv->plat->rx_queues_to_use;
3467 + int ret = -ENOMEM;
3468 + u32 queue;
3469 +
3470 + /* RX queues buffers and DMA */
3471 + for (queue = 0; queue < rx_count; queue++) {
3472 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3473 +
3474 + rx_q->queue_index = queue;
3475 + rx_q->priv_data = priv;
3476 +
3477 + rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
3478 + sizeof(dma_addr_t),
3479 + GFP_KERNEL);
3480 + if (!rx_q->rx_skbuff_dma)
3481 + return -ENOMEM;
3482 +
3483 + rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
3484 + sizeof(struct sk_buff *),
3485 + GFP_KERNEL);
3486 + if (!rx_q->rx_skbuff)
3487 + goto err_dma;
3488 +
3489 + if (priv->extend_desc) {
3490 + rx_q->dma_erx = dma_zalloc_coherent(priv->device,
3491 + DMA_RX_SIZE *
3492 + sizeof(struct
3493 + dma_extended_desc),
3494 + &rx_q->dma_rx_phy,
3495 + GFP_KERNEL);
3496 + if (!rx_q->dma_erx)
3497 + goto err_dma;
3498 +
3499 + } else {
3500 + rx_q->dma_rx = dma_zalloc_coherent(priv->device,
3501 + DMA_RX_SIZE *
3502 + sizeof(struct
3503 + dma_desc),
3504 + &rx_q->dma_rx_phy,
3505 + GFP_KERNEL);
3506 + if (!rx_q->dma_rx)
3507 + goto err_dma;
3508 + }
3509 + }
3510 +
3511 + return 0;
3512 +
3513 +err_dma:
3514 + free_dma_rx_desc_resources(priv);
3515 +
3516 + return ret;
3517 +}
3518 +
3519 +/**
3520 + * alloc_dma_tx_desc_resources - alloc TX resources.
3521 + * @priv: private structure
3522 + * Description: according to which descriptor can be used (extend or basic)
3523 + * this function allocates the resources for TX and RX paths. In case of
3524 + * reception, for example, it pre-allocated the RX socket buffer in order to
3525 + * allow zero-copy mechanism.
3526 + */
3527 +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
3528 +{
3529 + u32 tx_count = priv->plat->tx_queues_to_use;
3530 + int ret = -ENOMEM;
3531 + u32 queue;
3532 +
3533 + /* TX queues buffers and DMA */
3534 + for (queue = 0; queue < tx_count; queue++) {
3535 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3536 +
3537 + tx_q->queue_index = queue;
3538 + tx_q->priv_data = priv;
3539 +
3540 + tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
3541 + sizeof(*tx_q->tx_skbuff_dma),
3542 + GFP_KERNEL);
3543 + if (!tx_q->tx_skbuff_dma)
3544 + return -ENOMEM;
3545 +
3546 + tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
3547 + sizeof(struct sk_buff *),
3548 + GFP_KERNEL);
3549 + if (!tx_q->tx_skbuff)
3550 + goto err_dma_buffers;
3551 +
3552 + if (priv->extend_desc) {
3553 + tx_q->dma_etx = dma_zalloc_coherent(priv->device,
3554 + DMA_TX_SIZE *
3555 + sizeof(struct
3556 + dma_extended_desc),
3557 + &tx_q->dma_tx_phy,
3558 + GFP_KERNEL);
3559 + if (!tx_q->dma_etx)
3560 + goto err_dma_buffers;
3561 + } else {
3562 + tx_q->dma_tx = dma_zalloc_coherent(priv->device,
3563 + DMA_TX_SIZE *
3564 + sizeof(struct
3565 + dma_desc),
3566 + &tx_q->dma_tx_phy,
3567 + GFP_KERNEL);
3568 + if (!tx_q->dma_tx)
3569 + goto err_dma_buffers;
3570 }
3571 + }
3572 +
3573 + return 0;
3574 +
3575 +err_dma_buffers:
3576 + free_dma_tx_desc_resources(priv);
3577 +
3578 + return ret;
3579 +}
3580 +
3581 +/**
3582 + * alloc_dma_desc_resources - alloc TX/RX resources.
3583 + * @priv: private structure
3584 + * Description: according to which descriptor can be used (extend or basic)
3585 + * this function allocates the resources for TX and RX paths. In case of
3586 + * reception, for example, it pre-allocated the RX socket buffer in order to
3587 + * allow zero-copy mechanism.
3588 + */
3589 +static int alloc_dma_desc_resources(struct stmmac_priv *priv)
3590 +{
3591 + /* RX Allocation */
3592 + int ret = alloc_dma_rx_desc_resources(priv);
3593 +
3594 + if (ret)
3595 + return ret;
3596 +
3597 + ret = alloc_dma_tx_desc_resources(priv);
3598 +
3599 + return ret;
3600 +}
3601 +
3602 +/**
3603 + * free_dma_desc_resources - free dma desc resources
3604 + * @priv: private structure
3605 + */
3606 +static void free_dma_desc_resources(struct stmmac_priv *priv)
3607 +{
3608 + /* Release the DMA RX socket buffers */
3609 + free_dma_rx_desc_resources(priv);
3610 +
3611 + /* Release the DMA TX socket buffers */
3612 + free_dma_tx_desc_resources(priv);
3613 +}
3614 +
3615 +/**
3616 + * stmmac_mac_enable_rx_queues - Enable MAC rx queues
3617 + * @priv: driver private structure
3618 + * Description: It is used for enabling the rx queues in the MAC
3619 + */
3620 +static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
3621 +{
3622 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
3623 + int queue;
3624 + u8 mode;
3625
3626 - if (priv->tx_skbuff[i]) {
3627 - dev_kfree_skb_any(priv->tx_skbuff[i]);
3628 - priv->tx_skbuff[i] = NULL;
3629 - priv->tx_skbuff_dma[i].buf = 0;
3630 - priv->tx_skbuff_dma[i].map_as_page = false;
3631 - }
3632 + for (queue = 0; queue < rx_queues_count; queue++) {
3633 + mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
3634 + priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
3635 }
3636 }
3637
3638 /**
3639 - * alloc_dma_desc_resources - alloc TX/RX resources.
3640 - * @priv: private structure
3641 - * Description: according to which descriptor can be used (extend or basic)
3642 - * this function allocates the resources for TX and RX paths. In case of
3643 - * reception, for example, it pre-allocated the RX socket buffer in order to
3644 - * allow zero-copy mechanism.
3645 + * stmmac_start_rx_dma - start RX DMA channel
3646 + * @priv: driver private structure
3647 + * @chan: RX channel index
3648 + * Description:
3649 + * This starts a RX DMA channel
3650 */
3651 -static int alloc_dma_desc_resources(struct stmmac_priv *priv)
3652 +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
3653 {
3654 - int ret = -ENOMEM;
3655 -
3656 - priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
3657 - GFP_KERNEL);
3658 - if (!priv->rx_skbuff_dma)
3659 - return -ENOMEM;
3660 -
3661 - priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
3662 - GFP_KERNEL);
3663 - if (!priv->rx_skbuff)
3664 - goto err_rx_skbuff;
3665 -
3666 - priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
3667 - sizeof(*priv->tx_skbuff_dma),
3668 - GFP_KERNEL);
3669 - if (!priv->tx_skbuff_dma)
3670 - goto err_tx_skbuff_dma;
3671 -
3672 - priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
3673 - GFP_KERNEL);
3674 - if (!priv->tx_skbuff)
3675 - goto err_tx_skbuff;
3676 -
3677 - if (priv->extend_desc) {
3678 - priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
3679 - sizeof(struct
3680 - dma_extended_desc),
3681 - &priv->dma_rx_phy,
3682 - GFP_KERNEL);
3683 - if (!priv->dma_erx)
3684 - goto err_dma;
3685 -
3686 - priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
3687 - sizeof(struct
3688 - dma_extended_desc),
3689 - &priv->dma_tx_phy,
3690 - GFP_KERNEL);
3691 - if (!priv->dma_etx) {
3692 - dma_free_coherent(priv->device, DMA_RX_SIZE *
3693 - sizeof(struct dma_extended_desc),
3694 - priv->dma_erx, priv->dma_rx_phy);
3695 - goto err_dma;
3696 - }
3697 - } else {
3698 - priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
3699 - sizeof(struct dma_desc),
3700 - &priv->dma_rx_phy,
3701 - GFP_KERNEL);
3702 - if (!priv->dma_rx)
3703 - goto err_dma;
3704 + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
3705 + priv->hw->dma->start_rx(priv->ioaddr, chan);
3706 +}
3707
3708 - priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
3709 - sizeof(struct dma_desc),
3710 - &priv->dma_tx_phy,
3711 - GFP_KERNEL);
3712 - if (!priv->dma_tx) {
3713 - dma_free_coherent(priv->device, DMA_RX_SIZE *
3714 - sizeof(struct dma_desc),
3715 - priv->dma_rx, priv->dma_rx_phy);
3716 - goto err_dma;
3717 - }
3718 - }
3719 +/**
3720 + * stmmac_start_tx_dma - start TX DMA channel
3721 + * @priv: driver private structure
3722 + * @chan: TX channel index
3723 + * Description:
3724 + * This starts a TX DMA channel
3725 + */
3726 +static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
3727 +{
3728 + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
3729 + priv->hw->dma->start_tx(priv->ioaddr, chan);
3730 +}
3731
3732 - return 0;
3733 +/**
3734 + * stmmac_stop_rx_dma - stop RX DMA channel
3735 + * @priv: driver private structure
3736 + * @chan: RX channel index
3737 + * Description:
3738 + * This stops a RX DMA channel
3739 + */
3740 +static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
3741 +{
3742 + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
3743 + priv->hw->dma->stop_rx(priv->ioaddr, chan);
3744 +}
3745
3746 -err_dma:
3747 - kfree(priv->tx_skbuff);
3748 -err_tx_skbuff:
3749 - kfree(priv->tx_skbuff_dma);
3750 -err_tx_skbuff_dma:
3751 - kfree(priv->rx_skbuff);
3752 -err_rx_skbuff:
3753 - kfree(priv->rx_skbuff_dma);
3754 - return ret;
3755 +/**
3756 + * stmmac_stop_tx_dma - stop TX DMA channel
3757 + * @priv: driver private structure
3758 + * @chan: TX channel index
3759 + * Description:
3760 + * This stops a TX DMA channel
3761 + */
3762 +static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
3763 +{
3764 + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
3765 + priv->hw->dma->stop_tx(priv->ioaddr, chan);
3766 }
3767
3768 -static void free_dma_desc_resources(struct stmmac_priv *priv)
3769 +/**
3770 + * stmmac_start_all_dma - start all RX and TX DMA channels
3771 + * @priv: driver private structure
3772 + * Description:
3773 + * This starts all the RX and TX DMA channels
3774 + */
3775 +static void stmmac_start_all_dma(struct stmmac_priv *priv)
3776 {
3777 - /* Release the DMA TX/RX socket buffers */
3778 - dma_free_rx_skbufs(priv);
3779 - dma_free_tx_skbufs(priv);
3780 -
3781 - /* Free DMA regions of consistent memory previously allocated */
3782 - if (!priv->extend_desc) {
3783 - dma_free_coherent(priv->device,
3784 - DMA_TX_SIZE * sizeof(struct dma_desc),
3785 - priv->dma_tx, priv->dma_tx_phy);
3786 - dma_free_coherent(priv->device,
3787 - DMA_RX_SIZE * sizeof(struct dma_desc),
3788 - priv->dma_rx, priv->dma_rx_phy);
3789 - } else {
3790 - dma_free_coherent(priv->device, DMA_TX_SIZE *
3791 - sizeof(struct dma_extended_desc),
3792 - priv->dma_etx, priv->dma_tx_phy);
3793 - dma_free_coherent(priv->device, DMA_RX_SIZE *
3794 - sizeof(struct dma_extended_desc),
3795 - priv->dma_erx, priv->dma_rx_phy);
3796 - }
3797 - kfree(priv->rx_skbuff_dma);
3798 - kfree(priv->rx_skbuff);
3799 - kfree(priv->tx_skbuff_dma);
3800 - kfree(priv->tx_skbuff);
3801 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
3802 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
3803 + u32 chan = 0;
3804 +
3805 + for (chan = 0; chan < rx_channels_count; chan++)
3806 + stmmac_start_rx_dma(priv, chan);
3807 +
3808 + for (chan = 0; chan < tx_channels_count; chan++)
3809 + stmmac_start_tx_dma(priv, chan);
3810 }
3811
3812 /**
3813 - * stmmac_mac_enable_rx_queues - Enable MAC rx queues
3814 - * @priv: driver private structure
3815 - * Description: It is used for enabling the rx queues in the MAC
3816 + * stmmac_stop_all_dma - stop all RX and TX DMA channels
3817 + * @priv: driver private structure
3818 + * Description:
3819 + * This stops the RX and TX DMA channels
3820 */
3821 -static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
3822 +static void stmmac_stop_all_dma(struct stmmac_priv *priv)
3823 {
3824 - int rx_count = priv->dma_cap.number_rx_queues;
3825 - int queue = 0;
3826 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
3827 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
3828 + u32 chan = 0;
3829
3830 - /* If GMAC does not have multiple queues, then this is not necessary*/
3831 - if (rx_count == 1)
3832 - return;
3833 + for (chan = 0; chan < rx_channels_count; chan++)
3834 + stmmac_stop_rx_dma(priv, chan);
3835
3836 - /**
3837 - * If the core is synthesized with multiple rx queues / multiple
3838 - * dma channels, then rx queues will be disabled by default.
3839 - * For now only rx queue 0 is enabled.
3840 - */
3841 - priv->hw->mac->rx_queue_enable(priv->hw, queue);
3842 + for (chan = 0; chan < tx_channels_count; chan++)
3843 + stmmac_stop_tx_dma(priv, chan);
3844 }
3845
3846 /**
3847 @@ -1279,11 +1748,20 @@ static void stmmac_mac_enable_rx_queues(
3848 */
3849 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
3850 {
3851 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
3852 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
3853 int rxfifosz = priv->plat->rx_fifo_size;
3854 -
3855 - if (priv->plat->force_thresh_dma_mode)
3856 - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
3857 - else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
3858 + u32 txmode = 0;
3859 + u32 rxmode = 0;
3860 + u32 chan = 0;
3861 +
3862 + if (rxfifosz == 0)
3863 + rxfifosz = priv->dma_cap.rx_fifo_size;
3864 +
3865 + if (priv->plat->force_thresh_dma_mode) {
3866 + txmode = tc;
3867 + rxmode = tc;
3868 + } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
3869 /*
3870 * In case of GMAC, SF mode can be enabled
3871 * to perform the TX COE in HW. This depends on:
3872 @@ -1291,37 +1769,53 @@ static void stmmac_dma_operation_mode(st
3873 * 2) There is no bugged Jumbo frame support
3874 * that needs to not insert csum in the TDES.
3875 */
3876 - priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
3877 - rxfifosz);
3878 + txmode = SF_DMA_MODE;
3879 + rxmode = SF_DMA_MODE;
3880 priv->xstats.threshold = SF_DMA_MODE;
3881 - } else
3882 - priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
3883 + } else {
3884 + txmode = tc;
3885 + rxmode = SF_DMA_MODE;
3886 + }
3887 +
3888 + /* configure all channels */
3889 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3890 + for (chan = 0; chan < rx_channels_count; chan++)
3891 + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
3892 + rxfifosz);
3893 +
3894 + for (chan = 0; chan < tx_channels_count; chan++)
3895 + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
3896 + } else {
3897 + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
3898 rxfifosz);
3899 + }
3900 }
3901
3902 /**
3903 * stmmac_tx_clean - to manage the transmission completion
3904 * @priv: driver private structure
3905 + * @queue: TX queue index
3906 * Description: it reclaims the transmit resources after transmission completes.
3907 */
3908 -static void stmmac_tx_clean(struct stmmac_priv *priv)
3909 +static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
3910 {
3911 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3912 unsigned int bytes_compl = 0, pkts_compl = 0;
3913 - unsigned int entry = priv->dirty_tx;
3914 + unsigned int entry = tx_q->dirty_tx;
3915
3916 netif_tx_lock(priv->dev);
3917
3918 priv->xstats.tx_clean++;
3919
3920 - while (entry != priv->cur_tx) {
3921 - struct sk_buff *skb = priv->tx_skbuff[entry];
3922 + while (entry != tx_q->cur_tx) {
3923 + struct sk_buff *skb = tx_q->tx_skbuff[entry];
3924 struct dma_desc *p;
3925 int status;
3926
3927 if (priv->extend_desc)
3928 - p = (struct dma_desc *)(priv->dma_etx + entry);
3929 + p = (struct dma_desc *)(tx_q->dma_etx + entry);
3930 else
3931 - p = priv->dma_tx + entry;
3932 + p = tx_q->dma_tx + entry;
3933
3934 status = priv->hw->desc->tx_status(&priv->dev->stats,
3935 &priv->xstats, p,
3936 @@ -1342,48 +1836,51 @@ static void stmmac_tx_clean(struct stmma
3937 stmmac_get_tx_hwtstamp(priv, p, skb);
3938 }
3939
3940 - if (likely(priv->tx_skbuff_dma[entry].buf)) {
3941 - if (priv->tx_skbuff_dma[entry].map_as_page)
3942 + if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
3943 + if (tx_q->tx_skbuff_dma[entry].map_as_page)
3944 dma_unmap_page(priv->device,
3945 - priv->tx_skbuff_dma[entry].buf,
3946 - priv->tx_skbuff_dma[entry].len,
3947 + tx_q->tx_skbuff_dma[entry].buf,
3948 + tx_q->tx_skbuff_dma[entry].len,
3949 DMA_TO_DEVICE);
3950 else
3951 dma_unmap_single(priv->device,
3952 - priv->tx_skbuff_dma[entry].buf,
3953 - priv->tx_skbuff_dma[entry].len,
3954 + tx_q->tx_skbuff_dma[entry].buf,
3955 + tx_q->tx_skbuff_dma[entry].len,
3956 DMA_TO_DEVICE);
3957 - priv->tx_skbuff_dma[entry].buf = 0;
3958 - priv->tx_skbuff_dma[entry].len = 0;
3959 - priv->tx_skbuff_dma[entry].map_as_page = false;
3960 + tx_q->tx_skbuff_dma[entry].buf = 0;
3961 + tx_q->tx_skbuff_dma[entry].len = 0;
3962 + tx_q->tx_skbuff_dma[entry].map_as_page = false;
3963 }
3964
3965 if (priv->hw->mode->clean_desc3)
3966 - priv->hw->mode->clean_desc3(priv, p);
3967 + priv->hw->mode->clean_desc3(tx_q, p);
3968
3969 - priv->tx_skbuff_dma[entry].last_segment = false;
3970 - priv->tx_skbuff_dma[entry].is_jumbo = false;
3971 + tx_q->tx_skbuff_dma[entry].last_segment = false;
3972 + tx_q->tx_skbuff_dma[entry].is_jumbo = false;
3973
3974 if (likely(skb != NULL)) {
3975 pkts_compl++;
3976 bytes_compl += skb->len;
3977 dev_consume_skb_any(skb);
3978 - priv->tx_skbuff[entry] = NULL;
3979 + tx_q->tx_skbuff[entry] = NULL;
3980 }
3981
3982 priv->hw->desc->release_tx_desc(p, priv->mode);
3983
3984 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3985 }
3986 - priv->dirty_tx = entry;
3987 + tx_q->dirty_tx = entry;
3988 +
3989 + netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
3990 + pkts_compl, bytes_compl);
3991
3992 - netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
3993 + if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
3994 + queue))) &&
3995 + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
3996
3997 - if (unlikely(netif_queue_stopped(priv->dev) &&
3998 - stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
3999 netif_dbg(priv, tx_done, priv->dev,
4000 "%s: restart transmit\n", __func__);
4001 - netif_wake_queue(priv->dev);
4002 + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
4003 }
4004
4005 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
4006 @@ -1393,45 +1890,76 @@ static void stmmac_tx_clean(struct stmma
4007 netif_tx_unlock(priv->dev);
4008 }
4009
4010 -static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
4011 +static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
4012 {
4013 - priv->hw->dma->enable_dma_irq(priv->ioaddr);
4014 + priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
4015 }
4016
4017 -static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
4018 +static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
4019 {
4020 - priv->hw->dma->disable_dma_irq(priv->ioaddr);
4021 + priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
4022 }
4023
4024 /**
4025 * stmmac_tx_err - to manage the tx error
4026 * @priv: driver private structure
4027 + * @chan: channel index
4028 * Description: it cleans the descriptors and restarts the transmission
4029 * in case of transmission errors.
4030 */
4031 -static void stmmac_tx_err(struct stmmac_priv *priv)
4032 +static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
4033 {
4034 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
4035 int i;
4036 - netif_stop_queue(priv->dev);
4037
4038 - priv->hw->dma->stop_tx(priv->ioaddr);
4039 - dma_free_tx_skbufs(priv);
4040 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
4041 +
4042 + stmmac_stop_tx_dma(priv, chan);
4043 + dma_free_tx_skbufs(priv, chan);
4044 for (i = 0; i < DMA_TX_SIZE; i++)
4045 if (priv->extend_desc)
4046 - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
4047 + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
4048 priv->mode,
4049 (i == DMA_TX_SIZE - 1));
4050 else
4051 - priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
4052 + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
4053 priv->mode,
4054 (i == DMA_TX_SIZE - 1));
4055 - priv->dirty_tx = 0;
4056 - priv->cur_tx = 0;
4057 - netdev_reset_queue(priv->dev);
4058 - priv->hw->dma->start_tx(priv->ioaddr);
4059 + tx_q->dirty_tx = 0;
4060 + tx_q->cur_tx = 0;
4061 + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
4062 + stmmac_start_tx_dma(priv, chan);
4063
4064 priv->dev->stats.tx_errors++;
4065 - netif_wake_queue(priv->dev);
4066 + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
4067 +}
4068 +
4069 +/**
4070 + * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
4071 + * @priv: driver private structure
4072 + * @txmode: TX operating mode
4073 + * @rxmode: RX operating mode
4074 + * @chan: channel index
4075 + * Description: it is used for configuring of the DMA operation mode in
4076 + * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
4077 + * mode.
4078 + */
4079 +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
4080 + u32 rxmode, u32 chan)
4081 +{
4082 + int rxfifosz = priv->plat->rx_fifo_size;
4083 +
4084 + if (rxfifosz == 0)
4085 + rxfifosz = priv->dma_cap.rx_fifo_size;
4086 +
4087 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4088 + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
4089 + rxfifosz);
4090 + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
4091 + } else {
4092 + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
4093 + rxfifosz);
4094 + }
4095 }
4096
4097 /**
4098 @@ -1443,31 +1971,43 @@ static void stmmac_tx_err(struct stmmac_
4099 */
4100 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
4101 {
4102 + u32 tx_channel_count = priv->plat->tx_queues_to_use;
4103 int status;
4104 - int rxfifosz = priv->plat->rx_fifo_size;
4105 + u32 chan;
4106 +
4107 + for (chan = 0; chan < tx_channel_count; chan++) {
4108 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
4109
4110 - status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
4111 - if (likely((status & handle_rx)) || (status & handle_tx)) {
4112 - if (likely(napi_schedule_prep(&priv->napi))) {
4113 - stmmac_disable_dma_irq(priv);
4114 - __napi_schedule(&priv->napi);
4115 + status = priv->hw->dma->dma_interrupt(priv->ioaddr,
4116 + &priv->xstats, chan);
4117 + if (likely((status & handle_rx)) || (status & handle_tx)) {
4118 + if (likely(napi_schedule_prep(&rx_q->napi))) {
4119 + stmmac_disable_dma_irq(priv, chan);
4120 + __napi_schedule(&rx_q->napi);
4121 + }
4122 }
4123 - }
4124 - if (unlikely(status & tx_hard_error_bump_tc)) {
4125 - /* Try to bump up the dma threshold on this failure */
4126 - if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
4127 - (tc <= 256)) {
4128 - tc += 64;
4129 - if (priv->plat->force_thresh_dma_mode)
4130 - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
4131 - rxfifosz);
4132 - else
4133 - priv->hw->dma->dma_mode(priv->ioaddr, tc,
4134 - SF_DMA_MODE, rxfifosz);
4135 - priv->xstats.threshold = tc;
4136 +
4137 + if (unlikely(status & tx_hard_error_bump_tc)) {
4138 + /* Try to bump up the dma threshold on this failure */
4139 + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
4140 + (tc <= 256)) {
4141 + tc += 64;
4142 + if (priv->plat->force_thresh_dma_mode)
4143 + stmmac_set_dma_operation_mode(priv,
4144 + tc,
4145 + tc,
4146 + chan);
4147 + else
4148 + stmmac_set_dma_operation_mode(priv,
4149 + tc,
4150 + SF_DMA_MODE,
4151 + chan);
4152 + priv->xstats.threshold = tc;
4153 + }
4154 + } else if (unlikely(status == tx_hard_error)) {
4155 + stmmac_tx_err(priv, chan);
4156 }
4157 - } else if (unlikely(status == tx_hard_error))
4158 - stmmac_tx_err(priv);
4159 + }
4160 }
4161
4162 /**
4163 @@ -1574,6 +2114,13 @@ static void stmmac_check_ether_addr(stru
4164 */
4165 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
4166 {
4167 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
4168 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
4169 + struct stmmac_rx_queue *rx_q;
4170 + struct stmmac_tx_queue *tx_q;
4171 + u32 dummy_dma_rx_phy = 0;
4172 + u32 dummy_dma_tx_phy = 0;
4173 + u32 chan = 0;
4174 int atds = 0;
4175 int ret = 0;
4176
4177 @@ -1591,19 +2138,49 @@ static int stmmac_init_dma_engine(struct
4178 return ret;
4179 }
4180
4181 - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
4182 - priv->dma_tx_phy, priv->dma_rx_phy, atds);
4183 -
4184 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4185 - priv->rx_tail_addr = priv->dma_rx_phy +
4186 - (DMA_RX_SIZE * sizeof(struct dma_desc));
4187 - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
4188 - STMMAC_CHAN0);
4189 -
4190 - priv->tx_tail_addr = priv->dma_tx_phy +
4191 - (DMA_TX_SIZE * sizeof(struct dma_desc));
4192 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
4193 - STMMAC_CHAN0);
4194 + /* DMA Configuration */
4195 + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
4196 + dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
4197 +
4198 + /* DMA RX Channel Configuration */
4199 + for (chan = 0; chan < rx_channels_count; chan++) {
4200 + rx_q = &priv->rx_queue[chan];
4201 +
4202 + priv->hw->dma->init_rx_chan(priv->ioaddr,
4203 + priv->plat->dma_cfg,
4204 + rx_q->dma_rx_phy, chan);
4205 +
4206 + rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4207 + (DMA_RX_SIZE * sizeof(struct dma_desc));
4208 + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
4209 + rx_q->rx_tail_addr,
4210 + chan);
4211 + }
4212 +
4213 + /* DMA TX Channel Configuration */
4214 + for (chan = 0; chan < tx_channels_count; chan++) {
4215 + tx_q = &priv->tx_queue[chan];
4216 +
4217 + priv->hw->dma->init_chan(priv->ioaddr,
4218 + priv->plat->dma_cfg,
4219 + chan);
4220 +
4221 + priv->hw->dma->init_tx_chan(priv->ioaddr,
4222 + priv->plat->dma_cfg,
4223 + tx_q->dma_tx_phy, chan);
4224 +
4225 + tx_q->tx_tail_addr = tx_q->dma_tx_phy +
4226 + (DMA_TX_SIZE * sizeof(struct dma_desc));
4227 + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
4228 + tx_q->tx_tail_addr,
4229 + chan);
4230 + }
4231 + } else {
4232 + rx_q = &priv->rx_queue[chan];
4233 + tx_q = &priv->tx_queue[chan];
4234 + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
4235 + tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
4236 }
4237
4238 if (priv->plat->axi && priv->hw->dma->axi)
4239 @@ -1621,8 +2198,12 @@ static int stmmac_init_dma_engine(struct
4240 static void stmmac_tx_timer(unsigned long data)
4241 {
4242 struct stmmac_priv *priv = (struct stmmac_priv *)data;
4243 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4244 + u32 queue;
4245
4246 - stmmac_tx_clean(priv);
4247 + /* let's scan all the tx queues */
4248 + for (queue = 0; queue < tx_queues_count; queue++)
4249 + stmmac_tx_clean(priv, queue);
4250 }
4251
4252 /**
4253 @@ -1644,6 +2225,196 @@ static void stmmac_init_tx_coalesce(stru
4254 add_timer(&priv->txtimer);
4255 }
4256
4257 +static void stmmac_set_rings_length(struct stmmac_priv *priv)
4258 +{
4259 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
4260 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
4261 + u32 chan;
4262 +
4263 + /* set TX ring length */
4264 + if (priv->hw->dma->set_tx_ring_len) {
4265 + for (chan = 0; chan < tx_channels_count; chan++)
4266 + priv->hw->dma->set_tx_ring_len(priv->ioaddr,
4267 + (DMA_TX_SIZE - 1), chan);
4268 + }
4269 +
4270 + /* set RX ring length */
4271 + if (priv->hw->dma->set_rx_ring_len) {
4272 + for (chan = 0; chan < rx_channels_count; chan++)
4273 + priv->hw->dma->set_rx_ring_len(priv->ioaddr,
4274 + (DMA_RX_SIZE - 1), chan);
4275 + }
4276 +}
4277 +
4278 +/**
4279 + * stmmac_set_tx_queue_weight - Set TX queue weight
4280 + * @priv: driver private structure
4281 + * Description: It is used for setting TX queues weight
4282 + */
4283 +static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
4284 +{
4285 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4286 + u32 weight;
4287 + u32 queue;
4288 +
4289 + for (queue = 0; queue < tx_queues_count; queue++) {
4290 + weight = priv->plat->tx_queues_cfg[queue].weight;
4291 + priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
4292 + }
4293 +}
4294 +
4295 +/**
4296 + * stmmac_configure_cbs - Configure CBS in TX queue
4297 + * @priv: driver private structure
4298 + * Description: It is used for configuring CBS in AVB TX queues
4299 + */
4300 +static void stmmac_configure_cbs(struct stmmac_priv *priv)
4301 +{
4302 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4303 + u32 mode_to_use;
4304 + u32 queue;
4305 +
4306 + /* queue 0 is reserved for legacy traffic */
4307 + for (queue = 1; queue < tx_queues_count; queue++) {
4308 + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
4309 + if (mode_to_use == MTL_QUEUE_DCB)
4310 + continue;
4311 +
4312 + priv->hw->mac->config_cbs(priv->hw,
4313 + priv->plat->tx_queues_cfg[queue].send_slope,
4314 + priv->plat->tx_queues_cfg[queue].idle_slope,
4315 + priv->plat->tx_queues_cfg[queue].high_credit,
4316 + priv->plat->tx_queues_cfg[queue].low_credit,
4317 + queue);
4318 + }
4319 +}
4320 +
4321 +/**
4322 + * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
4323 + * @priv: driver private structure
4324 + * Description: It is used for mapping RX queues to RX dma channels
4325 + */
4326 +static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
4327 +{
4328 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4329 + u32 queue;
4330 + u32 chan;
4331 +
4332 + for (queue = 0; queue < rx_queues_count; queue++) {
4333 + chan = priv->plat->rx_queues_cfg[queue].chan;
4334 + priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
4335 + }
4336 +}
4337 +
4338 +/**
4339 + * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
4340 + * @priv: driver private structure
4341 + * Description: It is used for configuring the RX Queue Priority
4342 + */
4343 +static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
4344 +{
4345 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4346 + u32 queue;
4347 + u32 prio;
4348 +
4349 + for (queue = 0; queue < rx_queues_count; queue++) {
4350 + if (!priv->plat->rx_queues_cfg[queue].use_prio)
4351 + continue;
4352 +
4353 + prio = priv->plat->rx_queues_cfg[queue].prio;
4354 + priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
4355 + }
4356 +}
4357 +
4358 +/**
4359 + * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
4360 + * @priv: driver private structure
4361 + * Description: It is used for configuring the TX Queue Priority
4362 + */
4363 +static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
4364 +{
4365 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4366 + u32 queue;
4367 + u32 prio;
4368 +
4369 + for (queue = 0; queue < tx_queues_count; queue++) {
4370 + if (!priv->plat->tx_queues_cfg[queue].use_prio)
4371 + continue;
4372 +
4373 + prio = priv->plat->tx_queues_cfg[queue].prio;
4374 + priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
4375 + }
4376 +}
4377 +
4378 +/**
4379 + * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
4380 + * @priv: driver private structure
4381 + * Description: It is used for configuring the RX queue routing
4382 + */
4383 +static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
4384 +{
4385 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4386 + u32 queue;
4387 + u8 packet;
4388 +
4389 + for (queue = 0; queue < rx_queues_count; queue++) {
4390 + /* no specific packet type routing specified for the queue */
4391 + if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
4392 + continue;
4393 +
4394 + packet = priv->plat->rx_queues_cfg[queue].pkt_route;
4395 + priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
4396 + }
4397 +}
4398 +
4399 +/**
4400 + * stmmac_mtl_configuration - Configure MTL
4401 + * @priv: driver private structure
4402 + * Description: It is used for configurring MTL
4403 + */
4404 +static void stmmac_mtl_configuration(struct stmmac_priv *priv)
4405 +{
4406 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4407 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4408 +
4409 + if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
4410 + stmmac_set_tx_queue_weight(priv);
4411 +
4412 + /* Configure MTL RX algorithms */
4413 + if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
4414 + priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
4415 + priv->plat->rx_sched_algorithm);
4416 +
4417 + /* Configure MTL TX algorithms */
4418 + if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
4419 + priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
4420 + priv->plat->tx_sched_algorithm);
4421 +
4422 + /* Configure CBS in AVB TX queues */
4423 + if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
4424 + stmmac_configure_cbs(priv);
4425 +
4426 + /* Map RX MTL to DMA channels */
4427 + if (priv->hw->mac->map_mtl_to_dma)
4428 + stmmac_rx_queue_dma_chan_map(priv);
4429 +
4430 + /* Enable MAC RX Queues */
4431 + if (priv->hw->mac->rx_queue_enable)
4432 + stmmac_mac_enable_rx_queues(priv);
4433 +
4434 + /* Set RX priorities */
4435 + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
4436 + stmmac_mac_config_rx_queues_prio(priv);
4437 +
4438 + /* Set TX priorities */
4439 + if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
4440 + stmmac_mac_config_tx_queues_prio(priv);
4441 +
4442 + /* Set RX routing */
4443 + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
4444 + stmmac_mac_config_rx_queues_routing(priv);
4445 +}
4446 +
4447 /**
4448 * stmmac_hw_setup - setup mac in a usable state.
4449 * @dev : pointer to the device structure.
4450 @@ -1659,6 +2430,9 @@ static void stmmac_init_tx_coalesce(stru
4451 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
4452 {
4453 struct stmmac_priv *priv = netdev_priv(dev);
4454 + u32 rx_cnt = priv->plat->rx_queues_to_use;
4455 + u32 tx_cnt = priv->plat->tx_queues_to_use;
4456 + u32 chan;
4457 int ret;
4458
4459 /* DMA initialization and SW reset */
4460 @@ -1688,9 +2462,9 @@ static int stmmac_hw_setup(struct net_de
4461 /* Initialize the MAC Core */
4462 priv->hw->mac->core_init(priv->hw, dev->mtu);
4463
4464 - /* Initialize MAC RX Queues */
4465 - if (priv->hw->mac->rx_queue_enable)
4466 - stmmac_mac_enable_rx_queues(priv);
4467 + /* Initialize MTL*/
4468 + if (priv->synopsys_id >= DWMAC_CORE_4_00)
4469 + stmmac_mtl_configuration(priv);
4470
4471 ret = priv->hw->mac->rx_ipc(priv->hw);
4472 if (!ret) {
4473 @@ -1700,10 +2474,7 @@ static int stmmac_hw_setup(struct net_de
4474 }
4475
4476 /* Enable the MAC Rx/Tx */
4477 - if (priv->synopsys_id >= DWMAC_CORE_4_00)
4478 - stmmac_dwmac4_set_mac(priv->ioaddr, true);
4479 - else
4480 - stmmac_set_mac(priv->ioaddr, true);
4481 + priv->hw->mac->set_mac(priv->ioaddr, true);
4482
4483 /* Set the HW DMA mode and the COE */
4484 stmmac_dma_operation_mode(priv);
4485 @@ -1711,6 +2482,10 @@ static int stmmac_hw_setup(struct net_de
4486 stmmac_mmc_setup(priv);
4487
4488 if (init_ptp) {
4489 + ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
4490 + if (ret < 0)
4491 + netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
4492 +
4493 ret = stmmac_init_ptp(priv);
4494 if (ret == -EOPNOTSUPP)
4495 netdev_warn(priv->dev, "PTP not supported by HW\n");
4496 @@ -1725,35 +2500,37 @@ static int stmmac_hw_setup(struct net_de
4497 __func__);
4498 #endif
4499 /* Start the ball rolling... */
4500 - netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
4501 - priv->hw->dma->start_tx(priv->ioaddr);
4502 - priv->hw->dma->start_rx(priv->ioaddr);
4503 + stmmac_start_all_dma(priv);
4504
4505 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
4506
4507 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
4508 priv->rx_riwt = MAX_DMA_RIWT;
4509 - priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
4510 + priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
4511 }
4512
4513 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
4514 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
4515
4516 - /* set TX ring length */
4517 - if (priv->hw->dma->set_tx_ring_len)
4518 - priv->hw->dma->set_tx_ring_len(priv->ioaddr,
4519 - (DMA_TX_SIZE - 1));
4520 - /* set RX ring length */
4521 - if (priv->hw->dma->set_rx_ring_len)
4522 - priv->hw->dma->set_rx_ring_len(priv->ioaddr,
4523 - (DMA_RX_SIZE - 1));
4524 + /* set TX and RX rings length */
4525 + stmmac_set_rings_length(priv);
4526 +
4527 /* Enable TSO */
4528 - if (priv->tso)
4529 - priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
4530 + if (priv->tso) {
4531 + for (chan = 0; chan < tx_cnt; chan++)
4532 + priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
4533 + }
4534
4535 return 0;
4536 }
4537
4538 +static void stmmac_hw_teardown(struct net_device *dev)
4539 +{
4540 + struct stmmac_priv *priv = netdev_priv(dev);
4541 +
4542 + clk_disable_unprepare(priv->plat->clk_ptp_ref);
4543 +}
4544 +
4545 /**
4546 * stmmac_open - open entry point of the driver
4547 * @dev : pointer to the device structure.
4548 @@ -1821,7 +2598,7 @@ static int stmmac_open(struct net_device
4549 netdev_err(priv->dev,
4550 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
4551 __func__, dev->irq, ret);
4552 - goto init_error;
4553 + goto irq_error;
4554 }
4555
4556 /* Request the Wake IRQ in case of another line is used for WoL */
4557 @@ -1848,8 +2625,8 @@ static int stmmac_open(struct net_device
4558 }
4559 }
4560
4561 - napi_enable(&priv->napi);
4562 - netif_start_queue(dev);
4563 + stmmac_enable_all_queues(priv);
4564 + stmmac_start_all_queues(priv);
4565
4566 return 0;
4567
4568 @@ -1858,7 +2635,12 @@ lpiirq_error:
4569 free_irq(priv->wol_irq, dev);
4570 wolirq_error:
4571 free_irq(dev->irq, dev);
4572 +irq_error:
4573 + if (dev->phydev)
4574 + phy_stop(dev->phydev);
4575
4576 + del_timer_sync(&priv->txtimer);
4577 + stmmac_hw_teardown(dev);
4578 init_error:
4579 free_dma_desc_resources(priv);
4580 dma_desc_error:
4581 @@ -1887,9 +2669,9 @@ static int stmmac_release(struct net_dev
4582 phy_disconnect(dev->phydev);
4583 }
4584
4585 - netif_stop_queue(dev);
4586 + stmmac_stop_all_queues(priv);
4587
4588 - napi_disable(&priv->napi);
4589 + stmmac_disable_all_queues(priv);
4590
4591 del_timer_sync(&priv->txtimer);
4592
4593 @@ -1901,14 +2683,13 @@ static int stmmac_release(struct net_dev
4594 free_irq(priv->lpi_irq, dev);
4595
4596 /* Stop TX/RX DMA and clear the descriptors */
4597 - priv->hw->dma->stop_tx(priv->ioaddr);
4598 - priv->hw->dma->stop_rx(priv->ioaddr);
4599 + stmmac_stop_all_dma(priv);
4600
4601 /* Release and free the Rx/Tx resources */
4602 free_dma_desc_resources(priv);
4603
4604 /* Disable the MAC Rx/Tx */
4605 - stmmac_set_mac(priv->ioaddr, false);
4606 + priv->hw->mac->set_mac(priv->ioaddr, false);
4607
4608 netif_carrier_off(dev);
4609
4610 @@ -1927,22 +2708,24 @@ static int stmmac_release(struct net_dev
4611 * @des: buffer start address
4612 * @total_len: total length to fill in descriptors
4613 * @last_segmant: condition for the last descriptor
4614 + * @queue: TX queue index
4615 * Description:
4616 * This function fills descriptor and request new descriptors according to
4617 * buffer length to fill
4618 */
4619 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
4620 - int total_len, bool last_segment)
4621 + int total_len, bool last_segment, u32 queue)
4622 {
4623 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4624 struct dma_desc *desc;
4625 - int tmp_len;
4626 u32 buff_size;
4627 + int tmp_len;
4628
4629 tmp_len = total_len;
4630
4631 while (tmp_len > 0) {
4632 - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
4633 - desc = priv->dma_tx + priv->cur_tx;
4634 + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
4635 + desc = tx_q->dma_tx + tx_q->cur_tx;
4636
4637 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
4638 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4639 @@ -1950,7 +2733,7 @@ static void stmmac_tso_allocator(struct
4640
4641 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
4642 0, 1,
4643 - (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
4644 + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4645 0, 0);
4646
4647 tmp_len -= TSO_MAX_BUFF_SIZE;
4648 @@ -1986,23 +2769,28 @@ static void stmmac_tso_allocator(struct
4649 */
4650 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4651 {
4652 - u32 pay_len, mss;
4653 - int tmp_pay_len = 0;
4654 + struct dma_desc *desc, *first, *mss_desc = NULL;
4655 struct stmmac_priv *priv = netdev_priv(dev);
4656 int nfrags = skb_shinfo(skb)->nr_frags;
4657 + u32 queue = skb_get_queue_mapping(skb);
4658 unsigned int first_entry, des;
4659 - struct dma_desc *desc, *first, *mss_desc = NULL;
4660 + struct stmmac_tx_queue *tx_q;
4661 + int tmp_pay_len = 0;
4662 + u32 pay_len, mss;
4663 u8 proto_hdr_len;
4664 int i;
4665
4666 + tx_q = &priv->tx_queue[queue];
4667 +
4668 /* Compute header lengths */
4669 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4670
4671 /* Desc availability based on threshold should be enough safe */
4672 - if (unlikely(stmmac_tx_avail(priv) <
4673 + if (unlikely(stmmac_tx_avail(priv, queue) <
4674 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4675 - if (!netif_queue_stopped(dev)) {
4676 - netif_stop_queue(dev);
4677 + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4678 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4679 + queue));
4680 /* This is a hard error, log it. */
4681 netdev_err(priv->dev,
4682 "%s: Tx Ring full when queue awake\n",
4683 @@ -2017,10 +2805,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
4684
4685 /* set new MSS value if needed */
4686 if (mss != priv->mss) {
4687 - mss_desc = priv->dma_tx + priv->cur_tx;
4688 + mss_desc = tx_q->dma_tx + tx_q->cur_tx;
4689 priv->hw->desc->set_mss(mss_desc, mss);
4690 priv->mss = mss;
4691 - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
4692 + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
4693 }
4694
4695 if (netif_msg_tx_queued(priv)) {
4696 @@ -2030,9 +2818,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
4697 skb->data_len);
4698 }
4699
4700 - first_entry = priv->cur_tx;
4701 + first_entry = tx_q->cur_tx;
4702
4703 - desc = priv->dma_tx + first_entry;
4704 + desc = tx_q->dma_tx + first_entry;
4705 first = desc;
4706
4707 /* first descriptor: fill Headers on Buf1 */
4708 @@ -2041,9 +2829,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
4709 if (dma_mapping_error(priv->device, des))
4710 goto dma_map_err;
4711
4712 - priv->tx_skbuff_dma[first_entry].buf = des;
4713 - priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4714 - priv->tx_skbuff[first_entry] = skb;
4715 + tx_q->tx_skbuff_dma[first_entry].buf = des;
4716 + tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4717
4718 first->des0 = cpu_to_le32(des);
4719
4720 @@ -2054,7 +2841,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
4721 /* If needed take extra descriptors to fill the remaining payload */
4722 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4723
4724 - stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
4725 + stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4726
4727 /* Prepare fragments */
4728 for (i = 0; i < nfrags; i++) {
4729 @@ -2063,24 +2850,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
4730 des = skb_frag_dma_map(priv->device, frag, 0,
4731 skb_frag_size(frag),
4732 DMA_TO_DEVICE);
4733 + if (dma_mapping_error(priv->device, des))
4734 + goto dma_map_err;
4735
4736 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4737 - (i == nfrags - 1));
4738 + (i == nfrags - 1), queue);
4739
4740 - priv->tx_skbuff_dma[priv->cur_tx].buf = des;
4741 - priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
4742 - priv->tx_skbuff[priv->cur_tx] = NULL;
4743 - priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
4744 + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4745 + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4746 + tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
4747 + tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4748 }
4749
4750 - priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
4751 + tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4752 +
4753 + /* Only the last descriptor gets to point to the skb. */
4754 + tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4755
4756 - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
4757 + /* We've used all descriptors we need for this skb, however,
4758 + * advance cur_tx so that it references a fresh descriptor.
4759 + * ndo_start_xmit will fill this descriptor the next time it's
4760 + * called and stmmac_tx_clean may clean up to this descriptor.
4761 + */
4762 + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
4763
4764 - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
4765 + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4766 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4767 __func__);
4768 - netif_stop_queue(dev);
4769 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4770 }
4771
4772 dev->stats.tx_bytes += skb->len;
4773 @@ -2112,7 +2909,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
4774 priv->hw->desc->prepare_tso_tx_desc(first, 1,
4775 proto_hdr_len,
4776 pay_len,
4777 - 1, priv->tx_skbuff_dma[first_entry].last_segment,
4778 + 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4779 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
4780
4781 /* If context desc is used to change MSS */
4782 @@ -2127,20 +2924,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
4783
4784 if (netif_msg_pktdata(priv)) {
4785 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4786 - __func__, priv->cur_tx, priv->dirty_tx, first_entry,
4787 - priv->cur_tx, first, nfrags);
4788 + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4789 + tx_q->cur_tx, first, nfrags);
4790
4791 - priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
4792 + priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
4793 0);
4794
4795 pr_info(">>> frame to be transmitted: ");
4796 print_pkt(skb->data, skb_headlen(skb));
4797 }
4798
4799 - netdev_sent_queue(dev, skb->len);
4800 + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4801
4802 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
4803 - STMMAC_CHAN0);
4804 + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
4805 + queue);
4806
4807 return NETDEV_TX_OK;
4808
4809 @@ -2164,21 +2961,27 @@ static netdev_tx_t stmmac_xmit(struct sk
4810 struct stmmac_priv *priv = netdev_priv(dev);
4811 unsigned int nopaged_len = skb_headlen(skb);
4812 int i, csum_insertion = 0, is_jumbo = 0;
4813 + u32 queue = skb_get_queue_mapping(skb);
4814 int nfrags = skb_shinfo(skb)->nr_frags;
4815 - unsigned int entry, first_entry;
4816 + int entry;
4817 + unsigned int first_entry;
4818 struct dma_desc *desc, *first;
4819 + struct stmmac_tx_queue *tx_q;
4820 unsigned int enh_desc;
4821 unsigned int des;
4822
4823 + tx_q = &priv->tx_queue[queue];
4824 +
4825 /* Manage oversized TCP frames for GMAC4 device */
4826 if (skb_is_gso(skb) && priv->tso) {
4827 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4828 return stmmac_tso_xmit(skb, dev);
4829 }
4830
4831 - if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
4832 - if (!netif_queue_stopped(dev)) {
4833 - netif_stop_queue(dev);
4834 + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4835 + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4836 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4837 + queue));
4838 /* This is a hard error, log it. */
4839 netdev_err(priv->dev,
4840 "%s: Tx Ring full when queue awake\n",
4841 @@ -2190,20 +2993,18 @@ static netdev_tx_t stmmac_xmit(struct sk
4842 if (priv->tx_path_in_lpi_mode)
4843 stmmac_disable_eee_mode(priv);
4844
4845 - entry = priv->cur_tx;
4846 + entry = tx_q->cur_tx;
4847 first_entry = entry;
4848
4849 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4850
4851 if (likely(priv->extend_desc))
4852 - desc = (struct dma_desc *)(priv->dma_etx + entry);
4853 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4854 else
4855 - desc = priv->dma_tx + entry;
4856 + desc = tx_q->dma_tx + entry;
4857
4858 first = desc;
4859
4860 - priv->tx_skbuff[first_entry] = skb;
4861 -
4862 enh_desc = priv->plat->enh_desc;
4863 /* To program the descriptors according to the size of the frame */
4864 if (enh_desc)
4865 @@ -2211,7 +3012,7 @@ static netdev_tx_t stmmac_xmit(struct sk
4866
4867 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
4868 DWMAC_CORE_4_00)) {
4869 - entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
4870 + entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
4871 if (unlikely(entry < 0))
4872 goto dma_map_err;
4873 }
4874 @@ -2224,48 +3025,56 @@ static netdev_tx_t stmmac_xmit(struct sk
4875 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
4876
4877 if (likely(priv->extend_desc))
4878 - desc = (struct dma_desc *)(priv->dma_etx + entry);
4879 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4880 else
4881 - desc = priv->dma_tx + entry;
4882 + desc = tx_q->dma_tx + entry;
4883
4884 des = skb_frag_dma_map(priv->device, frag, 0, len,
4885 DMA_TO_DEVICE);
4886 if (dma_mapping_error(priv->device, des))
4887 goto dma_map_err; /* should reuse desc w/o issues */
4888
4889 - priv->tx_skbuff[entry] = NULL;
4890 + tx_q->tx_skbuff[entry] = NULL;
4891
4892 - priv->tx_skbuff_dma[entry].buf = des;
4893 + tx_q->tx_skbuff_dma[entry].buf = des;
4894 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
4895 desc->des0 = cpu_to_le32(des);
4896 else
4897 desc->des2 = cpu_to_le32(des);
4898
4899 - priv->tx_skbuff_dma[entry].map_as_page = true;
4900 - priv->tx_skbuff_dma[entry].len = len;
4901 - priv->tx_skbuff_dma[entry].last_segment = last_segment;
4902 + tx_q->tx_skbuff_dma[entry].map_as_page = true;
4903 + tx_q->tx_skbuff_dma[entry].len = len;
4904 + tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4905
4906 /* Prepare the descriptor and set the own bit too */
4907 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
4908 - priv->mode, 1, last_segment);
4909 + priv->mode, 1, last_segment,
4910 + skb->len);
4911 }
4912
4913 - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
4914 + /* Only the last descriptor gets to point to the skb. */
4915 + tx_q->tx_skbuff[entry] = skb;
4916
4917 - priv->cur_tx = entry;
4918 + /* We've used all descriptors we need for this skb, however,
4919 + * advance cur_tx so that it references a fresh descriptor.
4920 + * ndo_start_xmit will fill this descriptor the next time it's
4921 + * called and stmmac_tx_clean may clean up to this descriptor.
4922 + */
4923 + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
4924 + tx_q->cur_tx = entry;
4925
4926 if (netif_msg_pktdata(priv)) {
4927 void *tx_head;
4928
4929 netdev_dbg(priv->dev,
4930 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4931 - __func__, priv->cur_tx, priv->dirty_tx, first_entry,
4932 + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4933 entry, first, nfrags);
4934
4935 if (priv->extend_desc)
4936 - tx_head = (void *)priv->dma_etx;
4937 + tx_head = (void *)tx_q->dma_etx;
4938 else
4939 - tx_head = (void *)priv->dma_tx;
4940 + tx_head = (void *)tx_q->dma_tx;
4941
4942 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
4943
4944 @@ -2273,10 +3082,10 @@ static netdev_tx_t stmmac_xmit(struct sk
4945 print_pkt(skb->data, skb->len);
4946 }
4947
4948 - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
4949 + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4950 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4951 __func__);
4952 - netif_stop_queue(dev);
4953 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4954 }
4955
4956 dev->stats.tx_bytes += skb->len;
4957 @@ -2311,14 +3120,14 @@ static netdev_tx_t stmmac_xmit(struct sk
4958 if (dma_mapping_error(priv->device, des))
4959 goto dma_map_err;
4960
4961 - priv->tx_skbuff_dma[first_entry].buf = des;
4962 + tx_q->tx_skbuff_dma[first_entry].buf = des;
4963 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
4964 first->des0 = cpu_to_le32(des);
4965 else
4966 first->des2 = cpu_to_le32(des);
4967
4968 - priv->tx_skbuff_dma[first_entry].len = nopaged_len;
4969 - priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
4970 + tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4971 + tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4972
4973 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4974 priv->hwts_tx_en)) {
4975 @@ -2330,7 +3139,7 @@ static netdev_tx_t stmmac_xmit(struct sk
4976 /* Prepare the first descriptor setting the OWN bit too */
4977 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
4978 csum_insertion, priv->mode, 1,
4979 - last_segment);
4980 + last_segment, skb->len);
4981
4982 /* The own bit must be the latest setting done when prepare the
4983 * descriptor and then barrier is needed to make sure that
4984 @@ -2339,13 +3148,13 @@ static netdev_tx_t stmmac_xmit(struct sk
4985 dma_wmb();
4986 }
4987
4988 - netdev_sent_queue(dev, skb->len);
4989 + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4990
4991 if (priv->synopsys_id < DWMAC_CORE_4_00)
4992 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
4993 else
4994 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
4995 - STMMAC_CHAN0);
4996 + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
4997 + queue);
4998
4999 return NETDEV_TX_OK;
5000
5001 @@ -2373,9 +3182,9 @@ static void stmmac_rx_vlan(struct net_de
5002 }
5003
5004
5005 -static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
5006 +static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
5007 {
5008 - if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
5009 + if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
5010 return 0;
5011
5012 return 1;
5013 @@ -2384,30 +3193,33 @@ static inline int stmmac_rx_threshold_co
5014 /**
5015 * stmmac_rx_refill - refill used skb preallocated buffers
5016 * @priv: driver private structure
5017 + * @queue: RX queue index
5018 * Description : this is to reallocate the skb for the reception process
5019 * that is based on zero-copy.
5020 */
5021 -static inline void stmmac_rx_refill(struct stmmac_priv *priv)
5022 +static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
5023 {
5024 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5025 + int dirty = stmmac_rx_dirty(priv, queue);
5026 + unsigned int entry = rx_q->dirty_rx;
5027 +
5028 int bfsize = priv->dma_buf_sz;
5029 - unsigned int entry = priv->dirty_rx;
5030 - int dirty = stmmac_rx_dirty(priv);
5031
5032 while (dirty-- > 0) {
5033 struct dma_desc *p;
5034
5035 if (priv->extend_desc)
5036 - p = (struct dma_desc *)(priv->dma_erx + entry);
5037 + p = (struct dma_desc *)(rx_q->dma_erx + entry);
5038 else
5039 - p = priv->dma_rx + entry;
5040 + p = rx_q->dma_rx + entry;
5041
5042 - if (likely(priv->rx_skbuff[entry] == NULL)) {
5043 + if (likely(!rx_q->rx_skbuff[entry])) {
5044 struct sk_buff *skb;
5045
5046 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
5047 if (unlikely(!skb)) {
5048 /* so for a while no zero-copy! */
5049 - priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
5050 + rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
5051 if (unlikely(net_ratelimit()))
5052 dev_err(priv->device,
5053 "fail to alloc skb entry %d\n",
5054 @@ -2415,28 +3227,28 @@ static inline void stmmac_rx_refill(stru
5055 break;
5056 }
5057
5058 - priv->rx_skbuff[entry] = skb;
5059 - priv->rx_skbuff_dma[entry] =
5060 + rx_q->rx_skbuff[entry] = skb;
5061 + rx_q->rx_skbuff_dma[entry] =
5062 dma_map_single(priv->device, skb->data, bfsize,
5063 DMA_FROM_DEVICE);
5064 if (dma_mapping_error(priv->device,
5065 - priv->rx_skbuff_dma[entry])) {
5066 + rx_q->rx_skbuff_dma[entry])) {
5067 netdev_err(priv->dev, "Rx DMA map failed\n");
5068 dev_kfree_skb(skb);
5069 break;
5070 }
5071
5072 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
5073 - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
5074 + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
5075 p->des1 = 0;
5076 } else {
5077 - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
5078 + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
5079 }
5080 if (priv->hw->mode->refill_desc3)
5081 - priv->hw->mode->refill_desc3(priv, p);
5082 + priv->hw->mode->refill_desc3(rx_q, p);
5083
5084 - if (priv->rx_zeroc_thresh > 0)
5085 - priv->rx_zeroc_thresh--;
5086 + if (rx_q->rx_zeroc_thresh > 0)
5087 + rx_q->rx_zeroc_thresh--;
5088
5089 netif_dbg(priv, rx_status, priv->dev,
5090 "refill entry #%d\n", entry);
5091 @@ -2452,31 +3264,33 @@ static inline void stmmac_rx_refill(stru
5092
5093 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
5094 }
5095 - priv->dirty_rx = entry;
5096 + rx_q->dirty_rx = entry;
5097 }
5098
5099 /**
5100 * stmmac_rx - manage the receive process
5101 * @priv: driver private structure
5102 - * @limit: napi bugget.
5103 + * @limit: napi bugget
5104 + * @queue: RX queue index.
5105 * Description : this the function called by the napi poll method.
5106 * It gets all the frames inside the ring.
5107 */
5108 -static int stmmac_rx(struct stmmac_priv *priv, int limit)
5109 +static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5110 {
5111 - unsigned int entry = priv->cur_rx;
5112 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5113 + unsigned int entry = rx_q->cur_rx;
5114 + int coe = priv->hw->rx_csum;
5115 unsigned int next_entry;
5116 unsigned int count = 0;
5117 - int coe = priv->hw->rx_csum;
5118
5119 if (netif_msg_rx_status(priv)) {
5120 void *rx_head;
5121
5122 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5123 if (priv->extend_desc)
5124 - rx_head = (void *)priv->dma_erx;
5125 + rx_head = (void *)rx_q->dma_erx;
5126 else
5127 - rx_head = (void *)priv->dma_rx;
5128 + rx_head = (void *)rx_q->dma_rx;
5129
5130 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
5131 }
5132 @@ -2486,9 +3300,9 @@ static int stmmac_rx(struct stmmac_priv
5133 struct dma_desc *np;
5134
5135 if (priv->extend_desc)
5136 - p = (struct dma_desc *)(priv->dma_erx + entry);
5137 + p = (struct dma_desc *)(rx_q->dma_erx + entry);
5138 else
5139 - p = priv->dma_rx + entry;
5140 + p = rx_q->dma_rx + entry;
5141
5142 /* read the status of the incoming frame */
5143 status = priv->hw->desc->rx_status(&priv->dev->stats,
5144 @@ -2499,20 +3313,20 @@ static int stmmac_rx(struct stmmac_priv
5145
5146 count++;
5147
5148 - priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
5149 - next_entry = priv->cur_rx;
5150 + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
5151 + next_entry = rx_q->cur_rx;
5152
5153 if (priv->extend_desc)
5154 - np = (struct dma_desc *)(priv->dma_erx + next_entry);
5155 + np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5156 else
5157 - np = priv->dma_rx + next_entry;
5158 + np = rx_q->dma_rx + next_entry;
5159
5160 prefetch(np);
5161
5162 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
5163 priv->hw->desc->rx_extended_status(&priv->dev->stats,
5164 &priv->xstats,
5165 - priv->dma_erx +
5166 + rx_q->dma_erx +
5167 entry);
5168 if (unlikely(status == discard_frame)) {
5169 priv->dev->stats.rx_errors++;
5170 @@ -2522,9 +3336,9 @@ static int stmmac_rx(struct stmmac_priv
5171 * them in stmmac_rx_refill() function so that
5172 * device can reuse it.
5173 */
5174 - priv->rx_skbuff[entry] = NULL;
5175 + rx_q->rx_skbuff[entry] = NULL;
5176 dma_unmap_single(priv->device,
5177 - priv->rx_skbuff_dma[entry],
5178 + rx_q->rx_skbuff_dma[entry],
5179 priv->dma_buf_sz,
5180 DMA_FROM_DEVICE);
5181 }
5182 @@ -2572,7 +3386,7 @@ static int stmmac_rx(struct stmmac_priv
5183 */
5184 if (unlikely(!priv->plat->has_gmac4 &&
5185 ((frame_len < priv->rx_copybreak) ||
5186 - stmmac_rx_threshold_count(priv)))) {
5187 + stmmac_rx_threshold_count(rx_q)))) {
5188 skb = netdev_alloc_skb_ip_align(priv->dev,
5189 frame_len);
5190 if (unlikely(!skb)) {
5191 @@ -2584,21 +3398,21 @@ static int stmmac_rx(struct stmmac_priv
5192 }
5193
5194 dma_sync_single_for_cpu(priv->device,
5195 - priv->rx_skbuff_dma
5196 + rx_q->rx_skbuff_dma
5197 [entry], frame_len,
5198 DMA_FROM_DEVICE);
5199 skb_copy_to_linear_data(skb,
5200 - priv->
5201 + rx_q->
5202 rx_skbuff[entry]->data,
5203 frame_len);
5204
5205 skb_put(skb, frame_len);
5206 dma_sync_single_for_device(priv->device,
5207 - priv->rx_skbuff_dma
5208 + rx_q->rx_skbuff_dma
5209 [entry], frame_len,
5210 DMA_FROM_DEVICE);
5211 } else {
5212 - skb = priv->rx_skbuff[entry];
5213 + skb = rx_q->rx_skbuff[entry];
5214 if (unlikely(!skb)) {
5215 netdev_err(priv->dev,
5216 "%s: Inconsistent Rx chain\n",
5217 @@ -2607,12 +3421,12 @@ static int stmmac_rx(struct stmmac_priv
5218 break;
5219 }
5220 prefetch(skb->data - NET_IP_ALIGN);
5221 - priv->rx_skbuff[entry] = NULL;
5222 - priv->rx_zeroc_thresh++;
5223 + rx_q->rx_skbuff[entry] = NULL;
5224 + rx_q->rx_zeroc_thresh++;
5225
5226 skb_put(skb, frame_len);
5227 dma_unmap_single(priv->device,
5228 - priv->rx_skbuff_dma[entry],
5229 + rx_q->rx_skbuff_dma[entry],
5230 priv->dma_buf_sz,
5231 DMA_FROM_DEVICE);
5232 }
5233 @@ -2634,7 +3448,7 @@ static int stmmac_rx(struct stmmac_priv
5234 else
5235 skb->ip_summed = CHECKSUM_UNNECESSARY;
5236
5237 - napi_gro_receive(&priv->napi, skb);
5238 + napi_gro_receive(&rx_q->napi, skb);
5239
5240 priv->dev->stats.rx_packets++;
5241 priv->dev->stats.rx_bytes += frame_len;
5242 @@ -2642,7 +3456,7 @@ static int stmmac_rx(struct stmmac_priv
5243 entry = next_entry;
5244 }
5245
5246 - stmmac_rx_refill(priv);
5247 + stmmac_rx_refill(priv, queue);
5248
5249 priv->xstats.rx_pkt_n += count;
5250
5251 @@ -2659,16 +3473,24 @@ static int stmmac_rx(struct stmmac_priv
5252 */
5253 static int stmmac_poll(struct napi_struct *napi, int budget)
5254 {
5255 - struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
5256 + struct stmmac_rx_queue *rx_q =
5257 + container_of(napi, struct stmmac_rx_queue, napi);
5258 + struct stmmac_priv *priv = rx_q->priv_data;
5259 + u32 tx_count = priv->plat->tx_queues_to_use;
5260 + u32 chan = rx_q->queue_index;
5261 int work_done = 0;
5262 + u32 queue;
5263
5264 priv->xstats.napi_poll++;
5265 - stmmac_tx_clean(priv);
5266
5267 - work_done = stmmac_rx(priv, budget);
5268 + /* check all the queues */
5269 + for (queue = 0; queue < tx_count; queue++)
5270 + stmmac_tx_clean(priv, queue);
5271 +
5272 + work_done = stmmac_rx(priv, budget, rx_q->queue_index);
5273 if (work_done < budget) {
5274 napi_complete_done(napi, work_done);
5275 - stmmac_enable_dma_irq(priv);
5276 + stmmac_enable_dma_irq(priv, chan);
5277 }
5278 return work_done;
5279 }
5280 @@ -2684,9 +3506,12 @@ static int stmmac_poll(struct napi_struc
5281 static void stmmac_tx_timeout(struct net_device *dev)
5282 {
5283 struct stmmac_priv *priv = netdev_priv(dev);
5284 + u32 tx_count = priv->plat->tx_queues_to_use;
5285 + u32 chan;
5286
5287 /* Clear Tx resources and restart transmitting again */
5288 - stmmac_tx_err(priv);
5289 + for (chan = 0; chan < tx_count; chan++)
5290 + stmmac_tx_err(priv, chan);
5291 }
5292
5293 /**
5294 @@ -2809,6 +3634,12 @@ static irqreturn_t stmmac_interrupt(int
5295 {
5296 struct net_device *dev = (struct net_device *)dev_id;
5297 struct stmmac_priv *priv = netdev_priv(dev);
5298 + u32 rx_cnt = priv->plat->rx_queues_to_use;
5299 + u32 tx_cnt = priv->plat->tx_queues_to_use;
5300 + u32 queues_count;
5301 + u32 queue;
5302 +
5303 + queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5304
5305 if (priv->irq_wake)
5306 pm_wakeup_event(priv->device, 0);
5307 @@ -2822,16 +3653,30 @@ static irqreturn_t stmmac_interrupt(int
5308 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
5309 int status = priv->hw->mac->host_irq_status(priv->hw,
5310 &priv->xstats);
5311 +
5312 if (unlikely(status)) {
5313 /* For LPI we need to save the tx status */
5314 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5315 priv->tx_path_in_lpi_mode = true;
5316 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5317 priv->tx_path_in_lpi_mode = false;
5318 - if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
5319 - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
5320 - priv->rx_tail_addr,
5321 - STMMAC_CHAN0);
5322 + }
5323 +
5324 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
5325 + for (queue = 0; queue < queues_count; queue++) {
5326 + struct stmmac_rx_queue *rx_q =
5327 + &priv->rx_queue[queue];
5328 +
5329 + status |=
5330 + priv->hw->mac->host_mtl_irq_status(priv->hw,
5331 + queue);
5332 +
5333 + if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
5334 + priv->hw->dma->set_rx_tail_ptr)
5335 + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
5336 + rx_q->rx_tail_addr,
5337 + queue);
5338 + }
5339 }
5340
5341 /* PCS link status */
5342 @@ -2916,7 +3761,7 @@ static void sysfs_display_ring(void *hea
5343 ep++;
5344 } else {
5345 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
5346 - i, (unsigned int)virt_to_phys(ep),
5347 + i, (unsigned int)virt_to_phys(p),
5348 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5349 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5350 p++;
5351 @@ -2929,17 +3774,40 @@ static int stmmac_sysfs_ring_read(struct
5352 {
5353 struct net_device *dev = seq->private;
5354 struct stmmac_priv *priv = netdev_priv(dev);
5355 + u32 rx_count = priv->plat->rx_queues_to_use;
5356 + u32 tx_count = priv->plat->tx_queues_to_use;
5357 + u32 queue;
5358
5359 - if (priv->extend_desc) {
5360 - seq_printf(seq, "Extended RX descriptor ring:\n");
5361 - sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
5362 - seq_printf(seq, "Extended TX descriptor ring:\n");
5363 - sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
5364 - } else {
5365 - seq_printf(seq, "RX descriptor ring:\n");
5366 - sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
5367 - seq_printf(seq, "TX descriptor ring:\n");
5368 - sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
5369 + for (queue = 0; queue < rx_count; queue++) {
5370 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5371 +
5372 + seq_printf(seq, "RX Queue %d:\n", queue);
5373 +
5374 + if (priv->extend_desc) {
5375 + seq_printf(seq, "Extended descriptor ring:\n");
5376 + sysfs_display_ring((void *)rx_q->dma_erx,
5377 + DMA_RX_SIZE, 1, seq);
5378 + } else {
5379 + seq_printf(seq, "Descriptor ring:\n");
5380 + sysfs_display_ring((void *)rx_q->dma_rx,
5381 + DMA_RX_SIZE, 0, seq);
5382 + }
5383 + }
5384 +
5385 + for (queue = 0; queue < tx_count; queue++) {
5386 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5387 +
5388 + seq_printf(seq, "TX Queue %d:\n", queue);
5389 +
5390 + if (priv->extend_desc) {
5391 + seq_printf(seq, "Extended descriptor ring:\n");
5392 + sysfs_display_ring((void *)tx_q->dma_etx,
5393 + DMA_TX_SIZE, 1, seq);
5394 + } else {
5395 + seq_printf(seq, "Descriptor ring:\n");
5396 + sysfs_display_ring((void *)tx_q->dma_tx,
5397 + DMA_TX_SIZE, 0, seq);
5398 + }
5399 }
5400
5401 return 0;
5402 @@ -3222,11 +4090,14 @@ int stmmac_dvr_probe(struct device *devi
5403 struct plat_stmmacenet_data *plat_dat,
5404 struct stmmac_resources *res)
5405 {
5406 - int ret = 0;
5407 struct net_device *ndev = NULL;
5408 struct stmmac_priv *priv;
5409 + int ret = 0;
5410 + u32 queue;
5411
5412 - ndev = alloc_etherdev(sizeof(struct stmmac_priv));
5413 + ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
5414 + MTL_MAX_TX_QUEUES,
5415 + MTL_MAX_RX_QUEUES);
5416 if (!ndev)
5417 return -ENOMEM;
5418
5419 @@ -3268,6 +4139,10 @@ int stmmac_dvr_probe(struct device *devi
5420 if (ret)
5421 goto error_hw_init;
5422
5423 + /* Configure real RX and TX queues */
5424 + netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
5425 + netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
5426 +
5427 ndev->netdev_ops = &stmmac_netdev_ops;
5428
5429 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5430 @@ -3300,7 +4175,12 @@ int stmmac_dvr_probe(struct device *devi
5431 "Enable RX Mitigation via HW Watchdog Timer\n");
5432 }
5433
5434 - netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
5435 + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
5436 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5437 +
5438 + netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
5439 + (8 * priv->plat->rx_queues_to_use));
5440 + }
5441
5442 spin_lock_init(&priv->lock);
5443
5444 @@ -3345,7 +4225,11 @@ error_netdev_register:
5445 priv->hw->pcs != STMMAC_PCS_RTBI)
5446 stmmac_mdio_unregister(ndev);
5447 error_mdio_register:
5448 - netif_napi_del(&priv->napi);
5449 + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
5450 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5451 +
5452 + netif_napi_del(&rx_q->napi);
5453 + }
5454 error_hw_init:
5455 free_netdev(ndev);
5456
5457 @@ -3366,10 +4250,9 @@ int stmmac_dvr_remove(struct device *dev
5458
5459 netdev_info(priv->dev, "%s: removing driver", __func__);
5460
5461 - priv->hw->dma->stop_rx(priv->ioaddr);
5462 - priv->hw->dma->stop_tx(priv->ioaddr);
5463 + stmmac_stop_all_dma(priv);
5464
5465 - stmmac_set_mac(priv->ioaddr, false);
5466 + priv->hw->mac->set_mac(priv->ioaddr, false);
5467 netif_carrier_off(ndev);
5468 unregister_netdev(ndev);
5469 if (priv->plat->stmmac_rst)
5470 @@ -3408,20 +4291,19 @@ int stmmac_suspend(struct device *dev)
5471 spin_lock_irqsave(&priv->lock, flags);
5472
5473 netif_device_detach(ndev);
5474 - netif_stop_queue(ndev);
5475 + stmmac_stop_all_queues(priv);
5476
5477 - napi_disable(&priv->napi);
5478 + stmmac_disable_all_queues(priv);
5479
5480 /* Stop TX/RX DMA */
5481 - priv->hw->dma->stop_tx(priv->ioaddr);
5482 - priv->hw->dma->stop_rx(priv->ioaddr);
5483 + stmmac_stop_all_dma(priv);
5484
5485 /* Enable Power down mode by programming the PMT regs */
5486 if (device_may_wakeup(priv->device)) {
5487 priv->hw->mac->pmt(priv->hw, priv->wolopts);
5488 priv->irq_wake = 1;
5489 } else {
5490 - stmmac_set_mac(priv->ioaddr, false);
5491 + priv->hw->mac->set_mac(priv->ioaddr, false);
5492 pinctrl_pm_select_sleep_state(priv->device);
5493 /* Disable clock in case of PWM is off */
5494 clk_disable(priv->plat->pclk);
5495 @@ -3437,6 +4319,31 @@ int stmmac_suspend(struct device *dev)
5496 EXPORT_SYMBOL_GPL(stmmac_suspend);
5497
5498 /**
5499 + * stmmac_reset_queues_param - reset queue parameters
5500 + * @dev: device pointer
5501 + */
5502 +static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5503 +{
5504 + u32 rx_cnt = priv->plat->rx_queues_to_use;
5505 + u32 tx_cnt = priv->plat->tx_queues_to_use;
5506 + u32 queue;
5507 +
5508 + for (queue = 0; queue < rx_cnt; queue++) {
5509 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5510 +
5511 + rx_q->cur_rx = 0;
5512 + rx_q->dirty_rx = 0;
5513 + }
5514 +
5515 + for (queue = 0; queue < tx_cnt; queue++) {
5516 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5517 +
5518 + tx_q->cur_tx = 0;
5519 + tx_q->dirty_tx = 0;
5520 + }
5521 +}
5522 +
5523 +/**
5524 * stmmac_resume - resume callback
5525 * @dev: device pointer
5526 * Description: when resume this function is invoked to setup the DMA and CORE
5527 @@ -3476,10 +4383,8 @@ int stmmac_resume(struct device *dev)
5528
5529 spin_lock_irqsave(&priv->lock, flags);
5530
5531 - priv->cur_rx = 0;
5532 - priv->dirty_rx = 0;
5533 - priv->dirty_tx = 0;
5534 - priv->cur_tx = 0;
5535 + stmmac_reset_queues_param(priv);
5536 +
5537 /* reset private mss value to force mss context settings at
5538 * next tso xmit (only used for gmac4).
5539 */
5540 @@ -3491,9 +4396,9 @@ int stmmac_resume(struct device *dev)
5541 stmmac_init_tx_coalesce(priv);
5542 stmmac_set_rx_mode(ndev);
5543
5544 - napi_enable(&priv->napi);
5545 + stmmac_enable_all_queues(priv);
5546
5547 - netif_start_queue(ndev);
5548 + stmmac_start_all_queues(priv);
5549
5550 spin_unlock_irqrestore(&priv->lock, flags);
5551
5552 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
5553 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
5554 @@ -32,6 +32,7 @@
5555 */
5556 struct stmmac_pci_dmi_data {
5557 const char *name;
5558 + const char *asset_tag;
5559 unsigned int func;
5560 int phy_addr;
5561 };
5562 @@ -46,6 +47,7 @@ struct stmmac_pci_info {
5563 static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
5564 {
5565 const char *name = dmi_get_system_info(DMI_BOARD_NAME);
5566 + const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG);
5567 unsigned int func = PCI_FUNC(info->pdev->devfn);
5568 struct stmmac_pci_dmi_data *dmi;
5569
5570 @@ -57,18 +59,19 @@ static int stmmac_pci_find_phy_addr(stru
5571 return 1;
5572
5573 for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
5574 - if (!strcmp(dmi->name, name) && dmi->func == func)
5575 + if (!strcmp(dmi->name, name) && dmi->func == func) {
5576 + /* If asset tag is provided, match on it as well. */
5577 + if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag))
5578 + continue;
5579 return dmi->phy_addr;
5580 + }
5581 }
5582
5583 return -ENODEV;
5584 }
5585
5586 -static void stmmac_default_data(struct plat_stmmacenet_data *plat)
5587 +static void common_default_data(struct plat_stmmacenet_data *plat)
5588 {
5589 - plat->bus_id = 1;
5590 - plat->phy_addr = 0;
5591 - plat->interface = PHY_INTERFACE_MODE_GMII;
5592 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
5593 plat->has_gmac = 1;
5594 plat->force_sf_dma_mode = 1;
5595 @@ -76,10 +79,6 @@ static void stmmac_default_data(struct p
5596 plat->mdio_bus_data->phy_reset = NULL;
5597 plat->mdio_bus_data->phy_mask = 0;
5598
5599 - plat->dma_cfg->pbl = 32;
5600 - plat->dma_cfg->pblx8 = true;
5601 - /* TODO: AXI */
5602 -
5603 /* Set default value for multicast hash bins */
5604 plat->multicast_filter_bins = HASH_TABLE_SIZE;
5605
5606 @@ -88,6 +87,31 @@ static void stmmac_default_data(struct p
5607
5608 /* Set the maxmtu to a default of JUMBO_LEN */
5609 plat->maxmtu = JUMBO_LEN;
5610 +
5611 + /* Set default number of RX and TX queues to use */
5612 + plat->tx_queues_to_use = 1;
5613 + plat->rx_queues_to_use = 1;
5614 +
5615 + /* Disable Priority config by default */
5616 + plat->tx_queues_cfg[0].use_prio = false;
5617 + plat->rx_queues_cfg[0].use_prio = false;
5618 +
5619 + /* Disable RX queues routing by default */
5620 + plat->rx_queues_cfg[0].pkt_route = 0x0;
5621 +}
5622 +
5623 +static void stmmac_default_data(struct plat_stmmacenet_data *plat)
5624 +{
5625 + /* Set common default data first */
5626 + common_default_data(plat);
5627 +
5628 + plat->bus_id = 1;
5629 + plat->phy_addr = 0;
5630 + plat->interface = PHY_INTERFACE_MODE_GMII;
5631 +
5632 + plat->dma_cfg->pbl = 32;
5633 + plat->dma_cfg->pblx8 = true;
5634 + /* TODO: AXI */
5635 }
5636
5637 static int quark_default_data(struct plat_stmmacenet_data *plat,
5638 @@ -96,6 +120,9 @@ static int quark_default_data(struct pla
5639 struct pci_dev *pdev = info->pdev;
5640 int ret;
5641
5642 + /* Set common default data first */
5643 + common_default_data(plat);
5644 +
5645 /*
5646 * Refuse to load the driver and register net device if MAC controller
5647 * does not connect to any PHY interface.
5648 @@ -107,27 +134,12 @@ static int quark_default_data(struct pla
5649 plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
5650 plat->phy_addr = ret;
5651 plat->interface = PHY_INTERFACE_MODE_RMII;
5652 - plat->clk_csr = 2;
5653 - plat->has_gmac = 1;
5654 - plat->force_sf_dma_mode = 1;
5655 -
5656 - plat->mdio_bus_data->phy_reset = NULL;
5657 - plat->mdio_bus_data->phy_mask = 0;
5658
5659 plat->dma_cfg->pbl = 16;
5660 plat->dma_cfg->pblx8 = true;
5661 plat->dma_cfg->fixed_burst = 1;
5662 /* AXI (TODO) */
5663
5664 - /* Set default value for multicast hash bins */
5665 - plat->multicast_filter_bins = HASH_TABLE_SIZE;
5666 -
5667 - /* Set default value for unicast filter entries */
5668 - plat->unicast_filter_entries = 1;
5669 -
5670 - /* Set the maxmtu to a default of JUMBO_LEN */
5671 - plat->maxmtu = JUMBO_LEN;
5672 -
5673 return 0;
5674 }
5675
5676 @@ -142,6 +154,24 @@ static struct stmmac_pci_dmi_data quark_
5677 .func = 6,
5678 .phy_addr = 1,
5679 },
5680 + {
5681 + .name = "SIMATIC IOT2000",
5682 + .asset_tag = "6ES7647-0AA00-0YA2",
5683 + .func = 6,
5684 + .phy_addr = 1,
5685 + },
5686 + {
5687 + .name = "SIMATIC IOT2000",
5688 + .asset_tag = "6ES7647-0AA00-1YA2",
5689 + .func = 6,
5690 + .phy_addr = 1,
5691 + },
5692 + {
5693 + .name = "SIMATIC IOT2000",
5694 + .asset_tag = "6ES7647-0AA00-1YA2",
5695 + .func = 7,
5696 + .phy_addr = 1,
5697 + },
5698 {}
5699 };
5700
5701 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
5702 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
5703 @@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_set
5704 if (!np)
5705 return NULL;
5706
5707 - axi = kzalloc(sizeof(*axi), GFP_KERNEL);
5708 + axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
5709 if (!axi) {
5710 of_node_put(np);
5711 return ERR_PTR(-ENOMEM);
5712 @@ -132,6 +132,155 @@ static struct stmmac_axi *stmmac_axi_set
5713 }
5714
5715 /**
5716 + * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
5717 + * @pdev: platform device
5718 + */
5719 +static void stmmac_mtl_setup(struct platform_device *pdev,
5720 + struct plat_stmmacenet_data *plat)
5721 +{
5722 + struct device_node *q_node;
5723 + struct device_node *rx_node;
5724 + struct device_node *tx_node;
5725 + u8 queue = 0;
5726 +
5727 + /* For backwards-compatibility with device trees that don't have any
5728 + * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
5729 + * to one RX and TX queues each.
5730 + */
5731 + plat->rx_queues_to_use = 1;
5732 + plat->tx_queues_to_use = 1;
5733 +
5734 + rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
5735 + if (!rx_node)
5736 + return;
5737 +
5738 + tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
5739 + if (!tx_node) {
5740 + of_node_put(rx_node);
5741 + return;
5742 + }
5743 +
5744 + /* Processing RX queues common config */
5745 + if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
5746 + &plat->rx_queues_to_use))
5747 + plat->rx_queues_to_use = 1;
5748 +
5749 + if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
5750 + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
5751 + else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
5752 + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
5753 + else
5754 + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
5755 +
5756 + /* Processing individual RX queue config */
5757 + for_each_child_of_node(rx_node, q_node) {
5758 + if (queue >= plat->rx_queues_to_use)
5759 + break;
5760 +
5761 + if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
5762 + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5763 + else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
5764 + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
5765 + else
5766 + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5767 +
5768 + if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
5769 + &plat->rx_queues_cfg[queue].chan))
5770 + plat->rx_queues_cfg[queue].chan = queue;
5771 + /* TODO: Dynamic mapping to be included in the future */
5772 +
5773 + if (of_property_read_u32(q_node, "snps,priority",
5774 + &plat->rx_queues_cfg[queue].prio)) {
5775 + plat->rx_queues_cfg[queue].prio = 0;
5776 + plat->rx_queues_cfg[queue].use_prio = false;
5777 + } else {
5778 + plat->rx_queues_cfg[queue].use_prio = true;
5779 + }
5780 +
5781 + /* RX queue specific packet type routing */
5782 + if (of_property_read_bool(q_node, "snps,route-avcp"))
5783 + plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
5784 + else if (of_property_read_bool(q_node, "snps,route-ptp"))
5785 + plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
5786 + else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
5787 + plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
5788 + else if (of_property_read_bool(q_node, "snps,route-up"))
5789 + plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
5790 + else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
5791 + plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
5792 + else
5793 + plat->rx_queues_cfg[queue].pkt_route = 0x0;
5794 +
5795 + queue++;
5796 + }
5797 +
5798 + /* Processing TX queues common config */
5799 + if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
5800 + &plat->tx_queues_to_use))
5801 + plat->tx_queues_to_use = 1;
5802 +
5803 + if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
5804 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
5805 + else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
5806 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
5807 + else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
5808 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
5809 + else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
5810 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
5811 + else
5812 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
5813 +
5814 + queue = 0;
5815 +
5816 + /* Processing individual TX queue config */
5817 + for_each_child_of_node(tx_node, q_node) {
5818 + if (queue >= plat->tx_queues_to_use)
5819 + break;
5820 +
5821 + if (of_property_read_u8(q_node, "snps,weight",
5822 + &plat->tx_queues_cfg[queue].weight))
5823 + plat->tx_queues_cfg[queue].weight = 0x10 + queue;
5824 +
5825 + if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
5826 + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5827 + } else if (of_property_read_bool(q_node,
5828 + "snps,avb-algorithm")) {
5829 + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
5830 +
5831 + /* Credit Base Shaper parameters used by AVB */
5832 + if (of_property_read_u32(q_node, "snps,send_slope",
5833 + &plat->tx_queues_cfg[queue].send_slope))
5834 + plat->tx_queues_cfg[queue].send_slope = 0x0;
5835 + if (of_property_read_u32(q_node, "snps,idle_slope",
5836 + &plat->tx_queues_cfg[queue].idle_slope))
5837 + plat->tx_queues_cfg[queue].idle_slope = 0x0;
5838 + if (of_property_read_u32(q_node, "snps,high_credit",
5839 + &plat->tx_queues_cfg[queue].high_credit))
5840 + plat->tx_queues_cfg[queue].high_credit = 0x0;
5841 + if (of_property_read_u32(q_node, "snps,low_credit",
5842 + &plat->tx_queues_cfg[queue].low_credit))
5843 + plat->tx_queues_cfg[queue].low_credit = 0x0;
5844 + } else {
5845 + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5846 + }
5847 +
5848 + if (of_property_read_u32(q_node, "snps,priority",
5849 + &plat->tx_queues_cfg[queue].prio)) {
5850 + plat->tx_queues_cfg[queue].prio = 0;
5851 + plat->tx_queues_cfg[queue].use_prio = false;
5852 + } else {
5853 + plat->tx_queues_cfg[queue].use_prio = true;
5854 + }
5855 +
5856 + queue++;
5857 + }
5858 +
5859 + of_node_put(rx_node);
5860 + of_node_put(tx_node);
5861 + of_node_put(q_node);
5862 +}
5863 +
5864 +/**
5865 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
5866 * @plat: driver data platform structure
5867 * @np: device tree node
5868 @@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_d
5869
5870 plat->axi = stmmac_axi_setup(pdev);
5871
5872 + stmmac_mtl_setup(pdev, plat);
5873 +
5874 /* clock setup */
5875 plat->stmmac_clk = devm_clk_get(&pdev->dev,
5876 STMMAC_RESOURCE_NAME);
5877 @@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_d
5878 clk_prepare_enable(plat->pclk);
5879
5880 /* Fall-back to main clock in case of no PTP ref is passed */
5881 - plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
5882 + plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
5883 if (IS_ERR(plat->clk_ptp_ref)) {
5884 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
5885 plat->clk_ptp_ref = NULL;
5886 dev_warn(&pdev->dev, "PTP uses main clock\n");
5887 } else {
5888 - clk_prepare_enable(plat->clk_ptp_ref);
5889 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
5890 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
5891 }
5892 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
5893 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
5894 @@ -59,7 +59,8 @@
5895 /* Enable Snapshot for Messages Relevant to Master */
5896 #define PTP_TCR_TSMSTRENA BIT(15)
5897 /* Select PTP packets for Taking Snapshots */
5898 -#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
5899 +#define PTP_TCR_SNAPTYPSEL_1 BIT(16)
5900 +#define PTP_GMAC4_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
5901 /* Enable MAC address for PTP Frame Filtering */
5902 #define PTP_TCR_TSENMACADDR BIT(18)
5903
5904 --- a/include/linux/stmmac.h
5905 +++ b/include/linux/stmmac.h
5906 @@ -28,6 +28,9 @@
5907
5908 #include <linux/platform_device.h>
5909
5910 +#define MTL_MAX_RX_QUEUES 8
5911 +#define MTL_MAX_TX_QUEUES 8
5912 +
5913 #define STMMAC_RX_COE_NONE 0
5914 #define STMMAC_RX_COE_TYPE1 1
5915 #define STMMAC_RX_COE_TYPE2 2
5916 @@ -44,6 +47,18 @@
5917 #define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
5918 #define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
5919
5920 +/* MTL algorithms identifiers */
5921 +#define MTL_TX_ALGORITHM_WRR 0x0
5922 +#define MTL_TX_ALGORITHM_WFQ 0x1
5923 +#define MTL_TX_ALGORITHM_DWRR 0x2
5924 +#define MTL_TX_ALGORITHM_SP 0x3
5925 +#define MTL_RX_ALGORITHM_SP 0x4
5926 +#define MTL_RX_ALGORITHM_WSP 0x5
5927 +
5928 +/* RX/TX Queue Mode */
5929 +#define MTL_QUEUE_AVB 0x0
5930 +#define MTL_QUEUE_DCB 0x1
5931 +
5932 /* The MDC clock could be set higher than the IEEE 802.3
5933 * specified frequency limit 0f 2.5 MHz, by programming a clock divider
5934 * of value different than the above defined values. The resultant MDIO
5935 @@ -109,6 +124,26 @@ struct stmmac_axi {
5936 bool axi_rb;
5937 };
5938
5939 +struct stmmac_rxq_cfg {
5940 + u8 mode_to_use;
5941 + u8 chan;
5942 + u8 pkt_route;
5943 + bool use_prio;
5944 + u32 prio;
5945 +};
5946 +
5947 +struct stmmac_txq_cfg {
5948 + u8 weight;
5949 + u8 mode_to_use;
5950 + /* Credit Base Shaper parameters */
5951 + u32 send_slope;
5952 + u32 idle_slope;
5953 + u32 high_credit;
5954 + u32 low_credit;
5955 + bool use_prio;
5956 + u32 prio;
5957 +};
5958 +
5959 struct plat_stmmacenet_data {
5960 int bus_id;
5961 int phy_addr;
5962 @@ -133,6 +168,12 @@ struct plat_stmmacenet_data {
5963 int unicast_filter_entries;
5964 int tx_fifo_size;
5965 int rx_fifo_size;
5966 + u8 rx_queues_to_use;
5967 + u8 tx_queues_to_use;
5968 + u8 rx_sched_algorithm;
5969 + u8 tx_sched_algorithm;
5970 + struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
5971 + struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
5972 void (*fix_mac_speed)(void *priv, unsigned int speed);
5973 int (*init)(struct platform_device *pdev, void *priv);
5974 void (*exit)(struct platform_device *pdev, void *priv);