1 --- a/Documentation/devicetree/bindings/net/stmmac.txt
2 +++ b/Documentation/devicetree/bindings/net/stmmac.txt
3 @@ -7,9 +7,12 @@ Required properties:
4 - interrupt-parent: Should be the phandle for the interrupt controller
5 that services interrupts for this device
6 - interrupts: Should contain the STMMAC interrupts
7 -- interrupt-names: Should contain the interrupt names "macirq"
8 - "eth_wake_irq" if this interrupt is supported in the "interrupts"
10 +- interrupt-names: Should contain a list of interrupt names corresponding to
11 + the interrupts in the interrupts property, if available.
12 + Valid interrupt names are:
13 + - "macirq" (combined signal for various interrupt events)
14 + - "eth_wake_irq" (the interrupt to manage the remote wake-up packet detection)
15 + - "eth_lpi" (the interrupt that occurs when Tx or Rx enters/exits LPI state)
16 - phy-mode: See ethernet.txt file in the same directory.
17 - snps,reset-gpio gpio number for phy reset.
18 - snps,reset-active-low boolean flag to indicate if phy reset is active low.
19 @@ -28,9 +31,9 @@ Optional properties:
20 clocks may be specified in derived bindings.
21 - clock-names: One name for each entry in the clocks property, the
22 first one should be "stmmaceth" and the second one should be "pclk".
23 -- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
24 - available this clock is used for programming the Timestamp Addend Register.
25 - If not passed then the system clock will be used and this is fine on some
26 +- ptp_ref: this is the PTP reference clock; in case of the PTP is available
27 + this clock is used for programming the Timestamp Addend Register. If not
28 + passed then the system clock will be used and this is fine on some
30 - tx-fifo-depth: See ethernet.txt file in the same directory
31 - rx-fifo-depth: See ethernet.txt file in the same directory
32 @@ -72,7 +75,45 @@ Optional properties:
33 - snps,mb: mixed-burst
34 - snps,rb: rebuild INCRx Burst
35 - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
37 +- Multiple RX Queues parameters: below the list of all the parameters to
38 + configure the multiple RX queues:
39 + - snps,rx-queues-to-use: number of RX queues to be used in the driver
40 + - Choose one of these RX scheduling algorithms:
41 + - snps,rx-sched-sp: Strict priority
42 + - snps,rx-sched-wsp: Weighted Strict priority
44 + - Choose one of these modes:
45 + - snps,dcb-algorithm: Queue to be enabled as DCB
46 + - snps,avb-algorithm: Queue to be enabled as AVB
47 + - snps,map-to-dma-channel: Channel to map
48 + - Specifiy specific packet routing:
49 + - snps,route-avcp: AV Untagged Control packets
50 + - snps,route-ptp: PTP Packets
51 + - snps,route-dcbcp: DCB Control Packets
52 + - snps,route-up: Untagged Packets
53 + - snps,route-multi-broad: Multicast & Broadcast Packets
54 + - snps,priority: RX queue priority (Range: 0x0 to 0xF)
55 +- Multiple TX Queues parameters: below the list of all the parameters to
56 + configure the multiple TX queues:
57 + - snps,tx-queues-to-use: number of TX queues to be used in the driver
58 + - Choose one of these TX scheduling algorithms:
59 + - snps,tx-sched-wrr: Weighted Round Robin
60 + - snps,tx-sched-wfq: Weighted Fair Queuing
61 + - snps,tx-sched-dwrr: Deficit Weighted Round Robin
62 + - snps,tx-sched-sp: Strict priority
64 + - snps,weight: TX queue weight (if using a DCB weight algorithm)
65 + - Choose one of these modes:
66 + - snps,dcb-algorithm: TX queue will be working in DCB
67 + - snps,avb-algorithm: TX queue will be working in AVB
68 + [Attention] Queue 0 is reserved for legacy traffic
69 + and so no AVB is available in this queue.
70 + - Configure Credit Base Shaper (if AVB Mode selected):
71 + - snps,send_slope: enable Low Power Interface
72 + - snps,idle_slope: unlock on WoL
73 + - snps,high_credit: max write outstanding req. limit
74 + - snps,low_credit: max read outstanding req. limit
75 + - snps,priority: TX queue priority (Range: 0x0 to 0xF)
78 stmmac_axi_setup: stmmac-axi-config {
79 @@ -81,12 +122,41 @@ Examples:
80 snps,blen = <256 128 64 32 0 0 0>;
83 + mtl_rx_setup: rx-queues-config {
84 + snps,rx-queues-to-use = <1>;
88 + snps,map-to-dma-channel = <0x0>;
89 + snps,priority = <0x0>;
93 + mtl_tx_setup: tx-queues-config {
94 + snps,tx-queues-to-use = <2>;
97 + snps,weight = <0x10>;
99 + snps,priority = <0x0>;
103 + snps,avb-algorithm;
104 + snps,send_slope = <0x1000>;
105 + snps,idle_slope = <0x1000>;
106 + snps,high_credit = <0x3E800>;
107 + snps,low_credit = <0xFFC18000>;
108 + snps,priority = <0x1>;
112 gmac0: ethernet@e0800000 {
113 compatible = "st,spear600-gmac";
114 reg = <0xe0800000 0x8000>;
115 interrupt-parent = <&vic1>;
116 - interrupts = <24 23>;
117 - interrupt-names = "macirq", "eth_wake_irq";
118 + interrupts = <24 23 22>;
119 + interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
120 mac-address = [000000000000]; /* Filled in by U-Boot */
121 max-frame-size = <3800>;
123 @@ -104,4 +174,6 @@ Examples:
124 phy1: ethernet-phy@0 {
127 + snps,mtl-rx-config = <&mtl_rx_setup>;
128 + snps,mtl-tx-config = <&mtl_tx_setup>;
130 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
131 +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
133 #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
134 #define TSE_PCS_CONTROL_REG 0x00
135 #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
136 +#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
137 #define TSE_PCS_IF_MODE_REG 0x28
138 #define TSE_PCS_LINK_TIMER_0_REG 0x24
139 #define TSE_PCS_LINK_TIMER_1_REG 0x26
141 #define TSE_PCS_SW_RESET_TIMEOUT 100
142 #define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
143 #define TSE_PCS_USE_SGMII_ENA BIT(0)
144 +#define TSE_PCS_IF_USE_SGMII 0x03
146 #define SGMII_ADAPTER_CTRL_REG 0x00
147 #define SGMII_ADAPTER_DISABLE 0x0001
148 @@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, str
152 - writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
153 + writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
155 + writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
157 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
158 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
159 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
160 +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
163 static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
165 - struct stmmac_priv *priv = (struct stmmac_priv *)p;
166 - unsigned int entry = priv->cur_tx;
167 - struct dma_desc *desc = priv->dma_tx + entry;
168 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
169 unsigned int nopaged_len = skb_headlen(skb);
170 + struct stmmac_priv *priv = tx_q->priv_data;
171 + unsigned int entry = tx_q->cur_tx;
172 unsigned int bmax, des2;
173 unsigned int i = 1, len;
174 + struct dma_desc *desc;
176 + desc = tx_q->dma_tx + entry;
178 if (priv->plat->enh_desc)
179 bmax = BUF_SIZE_8KiB;
180 @@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, str
181 desc->des2 = cpu_to_le32(des2);
182 if (dma_mapping_error(priv->device, des2))
184 - priv->tx_skbuff_dma[entry].buf = des2;
185 - priv->tx_skbuff_dma[entry].len = bmax;
186 + tx_q->tx_skbuff_dma[entry].buf = des2;
187 + tx_q->tx_skbuff_dma[entry].len = bmax;
188 /* do not close the descriptor and do not set own bit */
189 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
191 + 0, false, skb->len);
194 - priv->tx_skbuff[entry] = NULL;
195 + tx_q->tx_skbuff[entry] = NULL;
196 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
197 - desc = priv->dma_tx + entry;
198 + desc = tx_q->dma_tx + entry;
201 des2 = dma_map_single(priv->device,
202 @@ -63,11 +66,11 @@ static int stmmac_jumbo_frm(void *p, str
203 desc->des2 = cpu_to_le32(des2);
204 if (dma_mapping_error(priv->device, des2))
206 - priv->tx_skbuff_dma[entry].buf = des2;
207 - priv->tx_skbuff_dma[entry].len = bmax;
208 + tx_q->tx_skbuff_dma[entry].buf = des2;
209 + tx_q->tx_skbuff_dma[entry].len = bmax;
210 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
211 STMMAC_CHAIN_MODE, 1,
217 @@ -77,17 +80,17 @@ static int stmmac_jumbo_frm(void *p, str
218 desc->des2 = cpu_to_le32(des2);
219 if (dma_mapping_error(priv->device, des2))
221 - priv->tx_skbuff_dma[entry].buf = des2;
222 - priv->tx_skbuff_dma[entry].len = len;
223 + tx_q->tx_skbuff_dma[entry].buf = des2;
224 + tx_q->tx_skbuff_dma[entry].len = len;
225 /* last descriptor can be set now */
226 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
227 STMMAC_CHAIN_MODE, 1,
234 - priv->cur_tx = entry;
235 + tx_q->cur_tx = entry;
239 @@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void *
241 static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
243 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
244 + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
245 + struct stmmac_priv *priv = rx_q->priv_data;
247 if (priv->hwts_rx_en && !priv->extend_desc)
248 /* NOTE: Device will overwrite des3 with timestamp value if
249 * 1588-2002 time stamping is enabled, hence reinitialize it
250 * to keep explicit chaining in the descriptor.
252 - p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
253 - (((priv->dirty_rx) + 1) %
254 + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
255 + (((rx_q->dirty_rx) + 1) %
257 sizeof(struct dma_desc)));
260 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
262 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
263 - unsigned int entry = priv->dirty_tx;
264 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
265 + struct stmmac_priv *priv = tx_q->priv_data;
266 + unsigned int entry = tx_q->dirty_tx;
268 - if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
269 + if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
271 /* NOTE: Device will overwrite des3 with timestamp value if
272 * 1588-2002 time stamping is enabled, hence reinitialize it
273 * to keep explicit chaining in the descriptor.
275 - p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
276 - ((priv->dirty_tx + 1) % DMA_TX_SIZE))
277 + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
278 + ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
279 * sizeof(struct dma_desc)));
282 --- a/drivers/net/ethernet/stmicro/stmmac/common.h
283 +++ b/drivers/net/ethernet/stmicro/stmmac/common.h
284 @@ -246,6 +246,15 @@ struct stmmac_extra_stats {
285 #define STMMAC_TX_MAX_FRAMES 256
286 #define STMMAC_TX_FRAMES 64
289 +enum packets_types {
290 + PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
291 + PACKET_PTPQ = 0x2, /* PTP Packets */
292 + PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
293 + PACKET_UPQ = 0x4, /* Untagged Packets */
294 + PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
298 enum rx_frame_status {
300 @@ -324,6 +333,9 @@ struct dma_features {
301 unsigned int number_tx_queues;
302 /* Alternate (enhanced) DESC mode */
303 unsigned int enh_desc;
304 + /* TX and RX FIFO sizes */
305 + unsigned int tx_fifo_size;
306 + unsigned int rx_fifo_size;
309 /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
310 @@ -361,7 +373,7 @@ struct stmmac_desc_ops {
311 /* Invoked by the xmit function to prepare the tx descriptor */
312 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
313 bool csum_flag, int mode, bool tx_own,
315 + bool ls, unsigned int tot_pkt_len);
316 void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
317 int len2, bool tx_own, bool ls,
318 unsigned int tcphdrlen,
319 @@ -413,6 +425,14 @@ struct stmmac_dma_ops {
320 int (*reset)(void __iomem *ioaddr);
321 void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
322 u32 dma_tx, u32 dma_rx, int atds);
323 + void (*init_chan)(void __iomem *ioaddr,
324 + struct stmmac_dma_cfg *dma_cfg, u32 chan);
325 + void (*init_rx_chan)(void __iomem *ioaddr,
326 + struct stmmac_dma_cfg *dma_cfg,
327 + u32 dma_rx_phy, u32 chan);
328 + void (*init_tx_chan)(void __iomem *ioaddr,
329 + struct stmmac_dma_cfg *dma_cfg,
330 + u32 dma_tx_phy, u32 chan);
331 /* Configure the AXI Bus Mode Register */
332 void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
333 /* Dump DMA registers */
334 @@ -421,25 +441,28 @@ struct stmmac_dma_ops {
335 * An invalid value enables the store-and-forward mode */
336 void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
338 + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
340 + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
341 /* To track extra statistic (if supported) */
342 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
343 void __iomem *ioaddr);
344 void (*enable_dma_transmission) (void __iomem *ioaddr);
345 - void (*enable_dma_irq) (void __iomem *ioaddr);
346 - void (*disable_dma_irq) (void __iomem *ioaddr);
347 - void (*start_tx) (void __iomem *ioaddr);
348 - void (*stop_tx) (void __iomem *ioaddr);
349 - void (*start_rx) (void __iomem *ioaddr);
350 - void (*stop_rx) (void __iomem *ioaddr);
351 + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
352 + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
353 + void (*start_tx)(void __iomem *ioaddr, u32 chan);
354 + void (*stop_tx)(void __iomem *ioaddr, u32 chan);
355 + void (*start_rx)(void __iomem *ioaddr, u32 chan);
356 + void (*stop_rx)(void __iomem *ioaddr, u32 chan);
357 int (*dma_interrupt) (void __iomem *ioaddr,
358 - struct stmmac_extra_stats *x);
359 + struct stmmac_extra_stats *x, u32 chan);
360 /* If supported then get the optional core features */
361 void (*get_hw_feature)(void __iomem *ioaddr,
362 struct dma_features *dma_cap);
363 /* Program the HW RX Watchdog */
364 - void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
365 - void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
366 - void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
367 + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
368 + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
369 + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
370 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
371 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
372 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
373 @@ -451,20 +474,44 @@ struct mac_device_info;
375 /* MAC core initialization */
376 void (*core_init)(struct mac_device_info *hw, int mtu);
377 + /* Enable the MAC RX/TX */
378 + void (*set_mac)(void __iomem *ioaddr, bool enable);
379 /* Enable and verify that the IPC module is supported */
380 int (*rx_ipc)(struct mac_device_info *hw);
381 /* Enable RX Queues */
382 - void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
383 + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
384 + /* RX Queues Priority */
385 + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
386 + /* TX Queues Priority */
387 + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
388 + /* RX Queues Routing */
389 + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
391 + /* Program RX Algorithms */
392 + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
393 + /* Program TX Algorithms */
394 + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
395 + /* Set MTL TX queues weight */
396 + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
397 + u32 weight, u32 queue);
398 + /* RX MTL queue to RX dma mapping */
399 + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
400 + /* Configure AV Algorithm */
401 + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
402 + u32 idle_slope, u32 high_credit, u32 low_credit,
404 /* Dump MAC registers */
405 void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
406 /* Handle extra events on specific interrupts hw dependent */
407 int (*host_irq_status)(struct mac_device_info *hw,
408 struct stmmac_extra_stats *x);
409 + /* Handle MTL interrupts */
410 + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
411 /* Multicast filter setting */
412 void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
413 /* Flow control setting */
414 void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
415 - unsigned int fc, unsigned int pause_time);
416 + unsigned int fc, unsigned int pause_time, u32 tx_cnt);
417 /* Set power management mode (e.g. magic frame) */
418 void (*pmt)(struct mac_device_info *hw, unsigned long mode);
419 /* Set/Get Unicast MAC addresses */
420 @@ -477,7 +524,8 @@ struct stmmac_ops {
421 void (*reset_eee_mode)(struct mac_device_info *hw);
422 void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
423 void (*set_eee_pls)(struct mac_device_info *hw, int link);
424 - void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
425 + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
426 + u32 rx_queues, u32 tx_queues);
428 void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
430 @@ -547,6 +595,11 @@ struct mac_device_info {
434 +struct stmmac_rx_routing {
439 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
440 int perfect_uc_entries,
442 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
443 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
445 #include <linux/clk.h>
446 #include <linux/clk-provider.h>
447 #include <linux/device.h>
448 +#include <linux/gpio/consumer.h>
449 #include <linux/ethtool.h>
450 #include <linux/io.h>
451 +#include <linux/iopoll.h>
452 #include <linux/ioport.h>
453 #include <linux/module.h>
454 +#include <linux/of_device.h>
455 #include <linux/of_net.h>
456 #include <linux/mfd/syscon.h>
457 #include <linux/platform_device.h>
458 +#include <linux/reset.h>
459 #include <linux/stmmac.h>
461 #include "stmmac_platform.h"
465 + struct device *dev;
466 + void __iomem *regs;
468 + struct reset_control *rst;
469 + struct clk *clk_master;
470 + struct clk *clk_slave;
471 + struct clk *clk_tx;
472 + struct clk *clk_rx;
474 + struct gpio_desc *reset;
477 static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
478 struct plat_stmmacenet_data *plat_dat)
479 @@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struc
483 +static void *dwc_qos_probe(struct platform_device *pdev,
484 + struct plat_stmmacenet_data *plat_dat,
485 + struct stmmac_resources *stmmac_res)
489 + plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
490 + if (IS_ERR(plat_dat->stmmac_clk)) {
491 + dev_err(&pdev->dev, "apb_pclk clock not found.\n");
492 + return ERR_CAST(plat_dat->stmmac_clk);
495 + err = clk_prepare_enable(plat_dat->stmmac_clk);
497 + dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
499 + return ERR_PTR(err);
502 + plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
503 + if (IS_ERR(plat_dat->pclk)) {
504 + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
505 + err = PTR_ERR(plat_dat->pclk);
509 + err = clk_prepare_enable(plat_dat->pclk);
511 + dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
519 + clk_disable_unprepare(plat_dat->stmmac_clk);
520 + return ERR_PTR(err);
523 +static int dwc_qos_remove(struct platform_device *pdev)
525 + struct net_device *ndev = platform_get_drvdata(pdev);
526 + struct stmmac_priv *priv = netdev_priv(ndev);
528 + clk_disable_unprepare(priv->plat->pclk);
529 + clk_disable_unprepare(priv->plat->stmmac_clk);
534 +#define SDMEMCOMPPADCTRL 0x8800
535 +#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
537 +#define AUTO_CAL_CONFIG 0x8804
538 +#define AUTO_CAL_CONFIG_START BIT(31)
539 +#define AUTO_CAL_CONFIG_ENABLE BIT(29)
541 +#define AUTO_CAL_STATUS 0x880c
542 +#define AUTO_CAL_STATUS_ACTIVE BIT(31)
544 +static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
546 + struct tegra_eqos *eqos = priv;
547 + unsigned long rate = 125000000;
548 + bool needs_calibration = false;
554 + needs_calibration = true;
559 + needs_calibration = true;
568 + dev_err(eqos->dev, "invalid speed %u\n", speed);
572 + if (needs_calibration) {
574 + value = readl(eqos->regs + SDMEMCOMPPADCTRL);
575 + value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
576 + writel(value, eqos->regs + SDMEMCOMPPADCTRL);
580 + value = readl(eqos->regs + AUTO_CAL_CONFIG);
581 + value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
582 + writel(value, eqos->regs + AUTO_CAL_CONFIG);
584 + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
586 + value & AUTO_CAL_STATUS_ACTIVE,
589 + dev_err(eqos->dev, "calibration did not start\n");
593 + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
595 + (value & AUTO_CAL_STATUS_ACTIVE) == 0,
598 + dev_err(eqos->dev, "calibration didn't finish\n");
603 + value = readl(eqos->regs + SDMEMCOMPPADCTRL);
604 + value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
605 + writel(value, eqos->regs + SDMEMCOMPPADCTRL);
607 + value = readl(eqos->regs + AUTO_CAL_CONFIG);
608 + value &= ~AUTO_CAL_CONFIG_ENABLE;
609 + writel(value, eqos->regs + AUTO_CAL_CONFIG);
612 + err = clk_set_rate(eqos->clk_tx, rate);
614 + dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
617 +static int tegra_eqos_init(struct platform_device *pdev, void *priv)
619 + struct tegra_eqos *eqos = priv;
620 + unsigned long rate;
623 + rate = clk_get_rate(eqos->clk_slave);
625 + value = (rate / 1000000) - 1;
626 + writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
631 +static void *tegra_eqos_probe(struct platform_device *pdev,
632 + struct plat_stmmacenet_data *data,
633 + struct stmmac_resources *res)
635 + struct tegra_eqos *eqos;
638 + eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
644 + eqos->dev = &pdev->dev;
645 + eqos->regs = res->addr;
647 + eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
648 + if (IS_ERR(eqos->clk_master)) {
649 + err = PTR_ERR(eqos->clk_master);
653 + err = clk_prepare_enable(eqos->clk_master);
657 + eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
658 + if (IS_ERR(eqos->clk_slave)) {
659 + err = PTR_ERR(eqos->clk_slave);
660 + goto disable_master;
663 + data->stmmac_clk = eqos->clk_slave;
665 + err = clk_prepare_enable(eqos->clk_slave);
667 + goto disable_master;
669 + eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
670 + if (IS_ERR(eqos->clk_rx)) {
671 + err = PTR_ERR(eqos->clk_rx);
672 + goto disable_slave;
675 + err = clk_prepare_enable(eqos->clk_rx);
677 + goto disable_slave;
679 + eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
680 + if (IS_ERR(eqos->clk_tx)) {
681 + err = PTR_ERR(eqos->clk_tx);
685 + err = clk_prepare_enable(eqos->clk_tx);
689 + eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
690 + if (IS_ERR(eqos->reset)) {
691 + err = PTR_ERR(eqos->reset);
695 + usleep_range(2000, 4000);
696 + gpiod_set_value(eqos->reset, 0);
698 + eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
699 + if (IS_ERR(eqos->rst)) {
700 + err = PTR_ERR(eqos->rst);
704 + err = reset_control_assert(eqos->rst);
708 + usleep_range(2000, 4000);
710 + err = reset_control_deassert(eqos->rst);
714 + usleep_range(2000, 4000);
716 + data->fix_mac_speed = tegra_eqos_fix_speed;
717 + data->init = tegra_eqos_init;
718 + data->bsp_priv = eqos;
720 + err = tegra_eqos_init(pdev, eqos);
728 + reset_control_assert(eqos->rst);
730 + gpiod_set_value(eqos->reset, 1);
732 + clk_disable_unprepare(eqos->clk_tx);
734 + clk_disable_unprepare(eqos->clk_rx);
736 + clk_disable_unprepare(eqos->clk_slave);
738 + clk_disable_unprepare(eqos->clk_master);
740 + eqos = ERR_PTR(err);
744 +static int tegra_eqos_remove(struct platform_device *pdev)
746 + struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
748 + reset_control_assert(eqos->rst);
749 + gpiod_set_value(eqos->reset, 1);
750 + clk_disable_unprepare(eqos->clk_tx);
751 + clk_disable_unprepare(eqos->clk_rx);
752 + clk_disable_unprepare(eqos->clk_slave);
753 + clk_disable_unprepare(eqos->clk_master);
758 +struct dwc_eth_dwmac_data {
759 + void *(*probe)(struct platform_device *pdev,
760 + struct plat_stmmacenet_data *data,
761 + struct stmmac_resources *res);
762 + int (*remove)(struct platform_device *pdev);
765 +static const struct dwc_eth_dwmac_data dwc_qos_data = {
766 + .probe = dwc_qos_probe,
767 + .remove = dwc_qos_remove,
770 +static const struct dwc_eth_dwmac_data tegra_eqos_data = {
771 + .probe = tegra_eqos_probe,
772 + .remove = tegra_eqos_remove,
775 static int dwc_eth_dwmac_probe(struct platform_device *pdev)
777 + const struct dwc_eth_dwmac_data *data;
778 struct plat_stmmacenet_data *plat_dat;
779 struct stmmac_resources stmmac_res;
780 struct resource *res;
784 + data = of_device_get_match_data(&pdev->dev);
786 memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
789 @@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct pl
790 if (IS_ERR(plat_dat))
791 return PTR_ERR(plat_dat);
793 - plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
794 - if (IS_ERR(plat_dat->stmmac_clk)) {
795 - dev_err(&pdev->dev, "apb_pclk clock not found.\n");
796 - ret = PTR_ERR(plat_dat->stmmac_clk);
797 - plat_dat->stmmac_clk = NULL;
798 - goto err_remove_config_dt;
799 + priv = data->probe(pdev, plat_dat, &stmmac_res);
800 + if (IS_ERR(priv)) {
801 + ret = PTR_ERR(priv);
802 + dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret);
803 + goto remove_config;
805 - clk_prepare_enable(plat_dat->stmmac_clk);
807 - plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
808 - if (IS_ERR(plat_dat->pclk)) {
809 - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
810 - ret = PTR_ERR(plat_dat->pclk);
811 - plat_dat->pclk = NULL;
812 - goto err_out_clk_dis_phy;
814 - clk_prepare_enable(plat_dat->pclk);
816 ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
818 - goto err_out_clk_dis_aper;
821 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
823 - goto err_out_clk_dis_aper;
829 -err_out_clk_dis_aper:
830 - clk_disable_unprepare(plat_dat->pclk);
831 -err_out_clk_dis_phy:
832 - clk_disable_unprepare(plat_dat->stmmac_clk);
833 -err_remove_config_dt:
835 + data->remove(pdev);
837 stmmac_remove_config_dt(pdev, plat_dat);
840 @@ -178,11 +479,29 @@ err_remove_config_dt:
842 static int dwc_eth_dwmac_remove(struct platform_device *pdev)
844 - return stmmac_pltfr_remove(pdev);
845 + struct net_device *ndev = platform_get_drvdata(pdev);
846 + struct stmmac_priv *priv = netdev_priv(ndev);
847 + const struct dwc_eth_dwmac_data *data;
850 + data = of_device_get_match_data(&pdev->dev);
852 + err = stmmac_dvr_remove(&pdev->dev);
854 + dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
856 + err = data->remove(pdev);
858 + dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
860 + stmmac_remove_config_dt(pdev, priv->plat);
865 static const struct of_device_id dwc_eth_dwmac_match[] = {
866 - { .compatible = "snps,dwc-qos-ethernet-4.10", },
867 + { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
868 + { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
871 MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
872 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
873 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
874 @@ -74,6 +74,10 @@ struct rk_priv_data {
875 #define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
876 #define GRF_CLR_BIT(nr) (BIT(nr+16))
878 +#define DELAY_ENABLE(soc, tx, rx) \
879 + (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
880 + ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
882 #define RK3228_GRF_MAC_CON0 0x0900
883 #define RK3228_GRF_MAC_CON1 0x0904
885 @@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct r
886 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
887 RK3228_GMAC_PHY_INTF_SEL_RGMII |
888 RK3228_GMAC_RMII_MODE_CLR |
889 - RK3228_GMAC_RXCLK_DLY_ENABLE |
890 - RK3228_GMAC_TXCLK_DLY_ENABLE);
891 + DELAY_ENABLE(RK3228, tx_delay, rx_delay));
893 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
894 RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
895 @@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct r
896 RK3288_GMAC_PHY_INTF_SEL_RGMII |
897 RK3288_GMAC_RMII_MODE_CLR);
898 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
899 - RK3288_GMAC_RXCLK_DLY_ENABLE |
900 - RK3288_GMAC_TXCLK_DLY_ENABLE |
901 + DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
902 RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
903 RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
905 @@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct r
906 RK3366_GMAC_PHY_INTF_SEL_RGMII |
907 RK3366_GMAC_RMII_MODE_CLR);
908 regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
909 - RK3366_GMAC_RXCLK_DLY_ENABLE |
910 - RK3366_GMAC_TXCLK_DLY_ENABLE |
911 + DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
912 RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
913 RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
915 @@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct r
916 RK3368_GMAC_PHY_INTF_SEL_RGMII |
917 RK3368_GMAC_RMII_MODE_CLR);
918 regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
919 - RK3368_GMAC_RXCLK_DLY_ENABLE |
920 - RK3368_GMAC_TXCLK_DLY_ENABLE |
921 + DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
922 RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
923 RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
925 @@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct r
926 RK3399_GMAC_PHY_INTF_SEL_RGMII |
927 RK3399_GMAC_RMII_MODE_CLR);
928 regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
929 - RK3399_GMAC_RXCLK_DLY_ENABLE |
930 - RK3399_GMAC_TXCLK_DLY_ENABLE |
931 + DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
932 RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
933 RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
935 @@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_pri
939 - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
940 + switch (bsp_priv->phy_iface) {
941 + case PHY_INTERFACE_MODE_RGMII:
942 dev_info(dev, "init for RGMII\n");
943 bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
945 - } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
947 + case PHY_INTERFACE_MODE_RGMII_ID:
948 + dev_info(dev, "init for RGMII_ID\n");
949 + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
951 + case PHY_INTERFACE_MODE_RGMII_RXID:
952 + dev_info(dev, "init for RGMII_RXID\n");
953 + bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
955 + case PHY_INTERFACE_MODE_RGMII_TXID:
956 + dev_info(dev, "init for RGMII_TXID\n");
957 + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
959 + case PHY_INTERFACE_MODE_RMII:
960 dev_info(dev, "init for RMII\n");
961 bsp_priv->ops->set_to_rmii(bsp_priv);
965 dev_err(dev, "NO interface defined!\n");
968 @@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, uns
969 struct rk_priv_data *bsp_priv = priv;
970 struct device *dev = &bsp_priv->pdev->dev;
972 - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
973 + switch (bsp_priv->phy_iface) {
974 + case PHY_INTERFACE_MODE_RGMII:
975 + case PHY_INTERFACE_MODE_RGMII_ID:
976 + case PHY_INTERFACE_MODE_RGMII_RXID:
977 + case PHY_INTERFACE_MODE_RGMII_TXID:
978 bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
979 - else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
981 + case PHY_INTERFACE_MODE_RMII:
982 bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
986 dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
990 static int rk_gmac_probe(struct platform_device *pdev)
991 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
992 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
993 @@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct
996 static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
997 - unsigned int fc, unsigned int pause_time)
998 + unsigned int fc, unsigned int pause_time,
1001 void __iomem *ioaddr = hw->pcsr;
1002 /* Set flow such that DZPQ in Mac Register 6 is 0,
1003 @@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __
1004 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
1007 -static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
1008 +static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
1009 + u32 rx_queues, u32 tx_queues)
1011 u32 value = readl(ioaddr + GMAC_DEBUG);
1013 @@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem
1015 static const struct stmmac_ops dwmac1000_ops = {
1016 .core_init = dwmac1000_core_init,
1017 + .set_mac = stmmac_set_mac,
1018 .rx_ipc = dwmac1000_rx_ipc_enable,
1019 .dump_regs = dwmac1000_dump_regs,
1020 .host_irq_status = dwmac1000_irq_status,
1021 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
1022 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
1023 @@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(voi
1024 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1027 -static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
1028 +static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
1031 writel(riwt, ioaddr + DMA_RX_WATCHDOG);
1033 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
1034 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
1035 @@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct m
1038 static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
1039 - unsigned int fc, unsigned int pause_time)
1040 + unsigned int fc, unsigned int pause_time,
1043 void __iomem *ioaddr = hw->pcsr;
1044 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
1045 @@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_devi
1047 static const struct stmmac_ops dwmac100_ops = {
1048 .core_init = dwmac100_core_init,
1049 + .set_mac = stmmac_set_mac,
1050 .rx_ipc = dwmac100_rx_ipc_enable,
1051 .dump_regs = dwmac100_dump_mac_regs,
1052 .host_irq_status = dwmac100_irq_status,
1053 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1054 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1056 #define GMAC_HASH_TAB_32_63 0x00000014
1057 #define GMAC_RX_FLOW_CTRL 0x00000090
1058 #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
1059 +#define GMAC_TXQ_PRTY_MAP0 0x98
1060 +#define GMAC_TXQ_PRTY_MAP1 0x9C
1061 #define GMAC_RXQ_CTRL0 0x000000a0
1062 +#define GMAC_RXQ_CTRL1 0x000000a4
1063 +#define GMAC_RXQ_CTRL2 0x000000a8
1064 +#define GMAC_RXQ_CTRL3 0x000000ac
1065 #define GMAC_INT_STATUS 0x000000b0
1066 #define GMAC_INT_EN 0x000000b4
1067 +#define GMAC_1US_TIC_COUNTER 0x000000dc
1068 #define GMAC_PCS_BASE 0x000000e0
1069 #define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
1070 #define GMAC_PMT 0x000000c0
1072 #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
1073 #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
1075 +/* RX Queues Routing */
1076 +#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
1077 +#define GMAC_RXQCTRL_AVCPQ_SHIFT 0
1078 +#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4)
1079 +#define GMAC_RXQCTRL_PTPQ_SHIFT 4
1080 +#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8)
1081 +#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8
1082 +#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12)
1083 +#define GMAC_RXQCTRL_UPQ_SHIFT 12
1084 +#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16)
1085 +#define GMAC_RXQCTRL_MCBCQ_SHIFT 16
1086 +#define GMAC_RXQCTRL_MCBCQEN BIT(20)
1087 +#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
1088 +#define GMAC_RXQCTRL_TACPQE BIT(21)
1089 +#define GMAC_RXQCTRL_TACPQE_SHIFT 21
1091 /* MAC Packet Filtering */
1092 #define GMAC_PACKET_FILTER_PR BIT(0)
1093 #define GMAC_PACKET_FILTER_HMC BIT(2)
1095 /* MAC Flow Control RX */
1096 #define GMAC_RX_FLOW_CTRL_RFE BIT(0)
1098 +/* RX Queues Priorities */
1099 +#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
1100 +#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8)
1102 +/* TX Queues Priorities */
1103 +#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
1104 +#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8)
1106 /* MAC Flow Control TX */
1107 #define GMAC_TX_FLOW_CTRL_TFE BIT(1)
1108 #define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
1109 @@ -148,6 +178,8 @@ enum power_event {
1110 /* MAC HW features1 bitmap */
1111 #define GMAC_HW_FEAT_AVSEL BIT(20)
1112 #define GMAC_HW_TSOEN BIT(18)
1113 +#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
1114 +#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
1116 /* MAC HW features2 bitmap */
1117 #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
1118 @@ -161,8 +193,25 @@ enum power_event {
1119 #define GMAC_HI_REG_AE BIT(31)
1122 +#define MTL_OPERATION_MODE 0x00000c00
1123 +#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
1124 +#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
1125 +#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
1126 +#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5)
1127 +#define MTL_OPERATION_SCHALG_SP (0x3 << 5)
1128 +#define MTL_OPERATION_RAA BIT(2)
1129 +#define MTL_OPERATION_RAA_SP (0x0 << 2)
1130 +#define MTL_OPERATION_RAA_WSP (0x1 << 2)
1132 #define MTL_INT_STATUS 0x00000c20
1133 -#define MTL_INT_Q0 BIT(0)
1134 +#define MTL_INT_QX(x) BIT(x)
1136 +#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
1137 +#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
1138 +#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
1139 +#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
1140 +#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
1141 +#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
1143 #define MTL_CHAN_BASE_ADDR 0x00000d00
1144 #define MTL_CHAN_BASE_OFFSET 0x40
1145 @@ -180,6 +229,7 @@ enum power_event {
1146 #define MTL_OP_MODE_TSF BIT(1)
1148 #define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
1149 +#define MTL_OP_MODE_TQS_SHIFT 16
1151 #define MTL_OP_MODE_TTC_MASK 0x70
1152 #define MTL_OP_MODE_TTC_SHIFT 4
1153 @@ -193,6 +243,17 @@ enum power_event {
1154 #define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
1155 #define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
1157 +#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
1158 +#define MTL_OP_MODE_RQS_SHIFT 20
1160 +#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
1161 +#define MTL_OP_MODE_RFD_SHIFT 14
1163 +#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
1164 +#define MTL_OP_MODE_RFA_SHIFT 8
1166 +#define MTL_OP_MODE_EHFC BIT(7)
1168 #define MTL_OP_MODE_RTC_MASK 0x18
1169 #define MTL_OP_MODE_RTC_SHIFT 3
1171 @@ -201,6 +262,46 @@ enum power_event {
1172 #define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
1173 #define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
1175 +/* MTL ETS Control register */
1176 +#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
1177 +#define MTL_ETS_CTRL_BASE_OFFSET 0x40
1178 +#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
1179 + ((x) * MTL_ETS_CTRL_BASE_OFFSET))
1181 +#define MTL_ETS_CTRL_CC BIT(3)
1182 +#define MTL_ETS_CTRL_AVALG BIT(2)
1184 +/* MTL Queue Quantum Weight */
1185 +#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
1186 +#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
1187 +#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
1188 + ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
1189 +#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
1191 +/* MTL sendSlopeCredit register */
1192 +#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
1193 +#define MTL_SEND_SLP_CRED_OFFSET 0x40
1194 +#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
1195 + ((x) * MTL_SEND_SLP_CRED_OFFSET))
1197 +#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
1199 +/* MTL hiCredit register */
1200 +#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
1201 +#define MTL_HIGH_CRED_OFFSET 0x40
1202 +#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
1203 + ((x) * MTL_HIGH_CRED_OFFSET))
1205 +#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
1207 +/* MTL loCredit register */
1208 +#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
1209 +#define MTL_LOW_CRED_OFFSET 0x40
1210 +#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
1211 + ((x) * MTL_LOW_CRED_OFFSET))
1213 +#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
1216 #define MTL_DEBUG_TXSTSFSTS BIT(5)
1217 #define MTL_DEBUG_TXFSTS BIT(4)
1218 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1219 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1220 @@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_
1221 writel(value, ioaddr + GMAC_INT_EN);
1224 -static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
1225 +static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
1226 + u8 mode, u32 queue)
1228 void __iomem *ioaddr = hw->pcsr;
1229 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
1231 value &= GMAC_RX_QUEUE_CLEAR(queue);
1232 - value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
1233 + if (mode == MTL_QUEUE_AVB)
1234 + value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
1235 + else if (mode == MTL_QUEUE_DCB)
1236 + value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
1238 writel(value, ioaddr + GMAC_RXQ_CTRL0);
1241 +static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
1242 + u32 prio, u32 queue)
1244 + void __iomem *ioaddr = hw->pcsr;
1245 + u32 base_register;
1248 + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
1250 + value = readl(ioaddr + base_register);
1252 + value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
1253 + value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
1254 + GMAC_RXQCTRL_PSRQX_MASK(queue);
1255 + writel(value, ioaddr + base_register);
1258 +static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
1259 + u32 prio, u32 queue)
1261 + void __iomem *ioaddr = hw->pcsr;
1262 + u32 base_register;
1265 + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
1267 + value = readl(ioaddr + base_register);
1269 + value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
1270 + value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
1271 + GMAC_TXQCTRL_PSTQX_MASK(queue);
1273 + writel(value, ioaddr + base_register);
1276 +static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
1277 + u8 packet, u32 queue)
1279 + void __iomem *ioaddr = hw->pcsr;
1282 + const struct stmmac_rx_routing route_possibilities[] = {
1283 + { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
1284 + { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
1285 + { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
1286 + { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
1287 + { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
1290 + value = readl(ioaddr + GMAC_RXQ_CTRL1);
1292 + /* routing configuration */
1293 + value &= ~route_possibilities[packet - 1].reg_mask;
1294 + value |= (queue << route_possibilities[packet-1].reg_shift) &
1295 + route_possibilities[packet - 1].reg_mask;
1297 + /* some packets require extra ops */
1298 + if (packet == PACKET_AVCPQ) {
1299 + value &= ~GMAC_RXQCTRL_TACPQE;
1300 + value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
1301 + } else if (packet == PACKET_MCBCQ) {
1302 + value &= ~GMAC_RXQCTRL_MCBCQEN;
1303 + value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
1306 + writel(value, ioaddr + GMAC_RXQ_CTRL1);
1309 +static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
1312 + void __iomem *ioaddr = hw->pcsr;
1313 + u32 value = readl(ioaddr + MTL_OPERATION_MODE);
1315 + value &= ~MTL_OPERATION_RAA;
1317 + case MTL_RX_ALGORITHM_SP:
1318 + value |= MTL_OPERATION_RAA_SP;
1320 + case MTL_RX_ALGORITHM_WSP:
1321 + value |= MTL_OPERATION_RAA_WSP;
1327 + writel(value, ioaddr + MTL_OPERATION_MODE);
1330 +static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
1333 + void __iomem *ioaddr = hw->pcsr;
1334 + u32 value = readl(ioaddr + MTL_OPERATION_MODE);
1336 + value &= ~MTL_OPERATION_SCHALG_MASK;
1338 + case MTL_TX_ALGORITHM_WRR:
1339 + value |= MTL_OPERATION_SCHALG_WRR;
1341 + case MTL_TX_ALGORITHM_WFQ:
1342 + value |= MTL_OPERATION_SCHALG_WFQ;
1344 + case MTL_TX_ALGORITHM_DWRR:
1345 + value |= MTL_OPERATION_SCHALG_DWRR;
1347 + case MTL_TX_ALGORITHM_SP:
1348 + value |= MTL_OPERATION_SCHALG_SP;
1355 +static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
1356 + u32 weight, u32 queue)
1358 + void __iomem *ioaddr = hw->pcsr;
1359 + u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
1361 + value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
1362 + value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
1363 + writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
1366 +static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
1368 + void __iomem *ioaddr = hw->pcsr;
1372 + value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
1374 + value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
1376 + if (queue == 0 || queue == 4) {
1377 + value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
1378 + value |= MTL_RXQ_DMA_Q04MDMACH(chan);
1380 + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
1381 + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
1385 + writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
1387 + writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
1390 +static void dwmac4_config_cbs(struct mac_device_info *hw,
1391 + u32 send_slope, u32 idle_slope,
1392 + u32 high_credit, u32 low_credit, u32 queue)
1394 + void __iomem *ioaddr = hw->pcsr;
1397 + pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
1398 + pr_debug("\tsend_slope: 0x%08x\n", send_slope);
1399 + pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
1400 + pr_debug("\thigh_credit: 0x%08x\n", high_credit);
1401 + pr_debug("\tlow_credit: 0x%08x\n", low_credit);
1403 + /* enable AV algorithm */
1404 + value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
1405 + value |= MTL_ETS_CTRL_AVALG;
1406 + value |= MTL_ETS_CTRL_CC;
1407 + writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
1409 + /* configure send slope */
1410 + value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
1411 + value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
1412 + value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
1413 + writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
1415 + /* configure idle slope (same register as tx weight) */
1416 + dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
1418 + /* configure high credit */
1419 + value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
1420 + value &= ~MTL_HIGH_CRED_HC_MASK;
1421 + value |= high_credit & MTL_HIGH_CRED_HC_MASK;
1422 + writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
1424 + /* configure high credit */
1425 + value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
1426 + value &= ~MTL_HIGH_CRED_LC_MASK;
1427 + value |= low_credit & MTL_HIGH_CRED_LC_MASK;
1428 + writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
1431 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
1433 void __iomem *ioaddr = hw->pcsr;
1434 @@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac
1437 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
1438 - unsigned int fc, unsigned int pause_time)
1439 + unsigned int fc, unsigned int pause_time,
1442 void __iomem *ioaddr = hw->pcsr;
1443 - u32 channel = STMMAC_CHAN0; /* FIXME */
1444 unsigned int flow = 0;
1447 pr_debug("GMAC Flow-Control:\n");
1449 @@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_
1452 pr_debug("\tTransmit Flow-Control ON\n");
1453 - flow |= GMAC_TX_FLOW_CTRL_TFE;
1454 - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
1458 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
1459 - flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
1460 - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
1462 + for (queue = 0; queue < tx_cnt; queue++) {
1463 + flow |= GMAC_TX_FLOW_CTRL_TFE;
1467 + (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
1469 + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
1473 @@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iome
1477 +static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
1479 + void __iomem *ioaddr = hw->pcsr;
1480 + u32 mtl_int_qx_status;
1483 + mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
1485 + /* Check MTL Interrupt */
1486 + if (mtl_int_qx_status & MTL_INT_QX(chan)) {
1487 + /* read Queue x Interrupt status */
1488 + u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
1490 + if (status & MTL_RX_OVERFLOW_INT) {
1491 + /* clear Interrupt */
1492 + writel(status | MTL_RX_OVERFLOW_INT,
1493 + ioaddr + MTL_CHAN_INT_CTRL(chan));
1494 + ret = CORE_IRQ_MTL_RX_OVERFLOW;
1501 static int dwmac4_irq_status(struct mac_device_info *hw,
1502 struct stmmac_extra_stats *x)
1504 void __iomem *ioaddr = hw->pcsr;
1505 - u32 mtl_int_qx_status;
1509 @@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_
1510 x->irq_receive_pmt_irq_n++;
1513 - mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
1514 - /* Check MTL Interrupt: Currently only one queue is used: Q0. */
1515 - if (mtl_int_qx_status & MTL_INT_Q0) {
1516 - /* read Queue 0 Interrupt status */
1517 - u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
1519 - if (status & MTL_RX_OVERFLOW_INT) {
1520 - /* clear Interrupt */
1521 - writel(status | MTL_RX_OVERFLOW_INT,
1522 - ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
1523 - ret = CORE_IRQ_MTL_RX_OVERFLOW;
1527 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
1528 if (intr_status & PCS_RGSMIIIS_IRQ)
1529 dwmac4_phystatus(ioaddr, x);
1530 @@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_
1534 -static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
1535 +static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
1536 + u32 rx_queues, u32 tx_queues)
1541 - /* Currently only channel 0 is supported */
1542 - value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
1543 + for (queue = 0; queue < tx_queues; queue++) {
1544 + value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
1546 - if (value & MTL_DEBUG_TXSTSFSTS)
1547 - x->mtl_tx_status_fifo_full++;
1548 - if (value & MTL_DEBUG_TXFSTS)
1549 - x->mtl_tx_fifo_not_empty++;
1550 - if (value & MTL_DEBUG_TWCSTS)
1551 - x->mmtl_fifo_ctrl++;
1552 - if (value & MTL_DEBUG_TRCSTS_MASK) {
1553 - u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
1554 - >> MTL_DEBUG_TRCSTS_SHIFT;
1555 - if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
1556 - x->mtl_tx_fifo_read_ctrl_write++;
1557 - else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
1558 - x->mtl_tx_fifo_read_ctrl_wait++;
1559 - else if (trcsts == MTL_DEBUG_TRCSTS_READ)
1560 - x->mtl_tx_fifo_read_ctrl_read++;
1562 - x->mtl_tx_fifo_read_ctrl_idle++;
1563 + if (value & MTL_DEBUG_TXSTSFSTS)
1564 + x->mtl_tx_status_fifo_full++;
1565 + if (value & MTL_DEBUG_TXFSTS)
1566 + x->mtl_tx_fifo_not_empty++;
1567 + if (value & MTL_DEBUG_TWCSTS)
1568 + x->mmtl_fifo_ctrl++;
1569 + if (value & MTL_DEBUG_TRCSTS_MASK) {
1570 + u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
1571 + >> MTL_DEBUG_TRCSTS_SHIFT;
1572 + if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
1573 + x->mtl_tx_fifo_read_ctrl_write++;
1574 + else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
1575 + x->mtl_tx_fifo_read_ctrl_wait++;
1576 + else if (trcsts == MTL_DEBUG_TRCSTS_READ)
1577 + x->mtl_tx_fifo_read_ctrl_read++;
1579 + x->mtl_tx_fifo_read_ctrl_idle++;
1581 + if (value & MTL_DEBUG_TXPAUSED)
1582 + x->mac_tx_in_pause++;
1584 - if (value & MTL_DEBUG_TXPAUSED)
1585 - x->mac_tx_in_pause++;
1587 - value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
1588 + for (queue = 0; queue < rx_queues; queue++) {
1589 + value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
1591 - if (value & MTL_DEBUG_RXFSTS_MASK) {
1592 - u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
1593 - >> MTL_DEBUG_RRCSTS_SHIFT;
1595 - if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
1596 - x->mtl_rx_fifo_fill_level_full++;
1597 - else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
1598 - x->mtl_rx_fifo_fill_above_thresh++;
1599 - else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
1600 - x->mtl_rx_fifo_fill_below_thresh++;
1602 - x->mtl_rx_fifo_fill_level_empty++;
1604 - if (value & MTL_DEBUG_RRCSTS_MASK) {
1605 - u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
1606 - MTL_DEBUG_RRCSTS_SHIFT;
1608 - if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
1609 - x->mtl_rx_fifo_read_ctrl_flush++;
1610 - else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
1611 - x->mtl_rx_fifo_read_ctrl_read_data++;
1612 - else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
1613 - x->mtl_rx_fifo_read_ctrl_status++;
1615 - x->mtl_rx_fifo_read_ctrl_idle++;
1616 + if (value & MTL_DEBUG_RXFSTS_MASK) {
1617 + u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
1618 + >> MTL_DEBUG_RRCSTS_SHIFT;
1620 + if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
1621 + x->mtl_rx_fifo_fill_level_full++;
1622 + else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
1623 + x->mtl_rx_fifo_fill_above_thresh++;
1624 + else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
1625 + x->mtl_rx_fifo_fill_below_thresh++;
1627 + x->mtl_rx_fifo_fill_level_empty++;
1629 + if (value & MTL_DEBUG_RRCSTS_MASK) {
1630 + u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
1631 + MTL_DEBUG_RRCSTS_SHIFT;
1633 + if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
1634 + x->mtl_rx_fifo_read_ctrl_flush++;
1635 + else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
1636 + x->mtl_rx_fifo_read_ctrl_read_data++;
1637 + else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
1638 + x->mtl_rx_fifo_read_ctrl_status++;
1640 + x->mtl_rx_fifo_read_ctrl_idle++;
1642 + if (value & MTL_DEBUG_RWCSTS)
1643 + x->mtl_rx_fifo_ctrl_active++;
1645 - if (value & MTL_DEBUG_RWCSTS)
1646 - x->mtl_rx_fifo_ctrl_active++;
1649 value = readl(ioaddr + GMAC_DEBUG);
1650 @@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *i
1652 static const struct stmmac_ops dwmac4_ops = {
1653 .core_init = dwmac4_core_init,
1654 + .set_mac = stmmac_set_mac,
1655 .rx_ipc = dwmac4_rx_ipc_enable,
1656 .rx_queue_enable = dwmac4_rx_queue_enable,
1657 + .rx_queue_prio = dwmac4_rx_queue_priority,
1658 + .tx_queue_prio = dwmac4_tx_queue_priority,
1659 + .rx_queue_routing = dwmac4_tx_queue_routing,
1660 + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1661 + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1662 + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1663 + .map_mtl_to_dma = dwmac4_map_mtl_dma,
1664 + .config_cbs = dwmac4_config_cbs,
1665 .dump_regs = dwmac4_dump_regs,
1666 .host_irq_status = dwmac4_irq_status,
1667 + .host_mtl_irq_status = dwmac4_irq_mtl_status,
1668 + .flow_ctrl = dwmac4_flow_ctrl,
1669 + .pmt = dwmac4_pmt,
1670 + .set_umac_addr = dwmac4_set_umac_addr,
1671 + .get_umac_addr = dwmac4_get_umac_addr,
1672 + .set_eee_mode = dwmac4_set_eee_mode,
1673 + .reset_eee_mode = dwmac4_reset_eee_mode,
1674 + .set_eee_timer = dwmac4_set_eee_timer,
1675 + .set_eee_pls = dwmac4_set_eee_pls,
1676 + .pcs_ctrl_ane = dwmac4_ctrl_ane,
1677 + .pcs_rane = dwmac4_rane,
1678 + .pcs_get_adv_lp = dwmac4_get_adv_lp,
1679 + .debug = dwmac4_debug,
1680 + .set_filter = dwmac4_set_filter,
1683 +static const struct stmmac_ops dwmac410_ops = {
1684 + .core_init = dwmac4_core_init,
1685 + .set_mac = stmmac_dwmac4_set_mac,
1686 + .rx_ipc = dwmac4_rx_ipc_enable,
1687 + .rx_queue_enable = dwmac4_rx_queue_enable,
1688 + .rx_queue_prio = dwmac4_rx_queue_priority,
1689 + .tx_queue_prio = dwmac4_tx_queue_priority,
1690 + .rx_queue_routing = dwmac4_tx_queue_routing,
1691 + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1692 + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1693 + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1694 + .map_mtl_to_dma = dwmac4_map_mtl_dma,
1695 + .config_cbs = dwmac4_config_cbs,
1696 + .dump_regs = dwmac4_dump_regs,
1697 + .host_irq_status = dwmac4_irq_status,
1698 + .host_mtl_irq_status = dwmac4_irq_mtl_status,
1699 .flow_ctrl = dwmac4_flow_ctrl,
1701 .set_umac_addr = dwmac4_set_umac_addr,
1702 @@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(voi
1703 if (mac->multicast_filter_bins)
1704 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1706 - mac->mac = &dwmac4_ops;
1708 mac->link.port = GMAC_CONFIG_PS;
1709 mac->link.duplex = GMAC_CONFIG_DM;
1710 mac->link.speed = GMAC_CONFIG_FES;
1711 @@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(voi
1713 mac->dma = &dwmac4_dma_ops;
1715 + if (*synopsys_id >= DWMAC_CORE_4_00)
1716 + mac->mac = &dwmac410_ops;
1718 + mac->mac = &dwmac4_ops;
1722 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1723 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1724 @@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestam
1726 /* Context type from W/B descriptor must be zero */
1727 if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
1731 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
1732 if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
1740 static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
1741 @@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestam
1746 + if (likely(ret == 0))
1752 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
1753 @@ -304,12 +307,13 @@ static void dwmac4_rd_init_tx_desc(struc
1755 static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1756 bool csum_flag, int mode, bool tx_own,
1758 + bool ls, unsigned int tot_pkt_len)
1760 unsigned int tdes3 = le32_to_cpu(p->des3);
1762 p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
1764 + tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
1766 tdes3 |= TDES3_FIRST_DESCRIPTOR;
1768 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1769 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1770 @@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem
1771 writel(value, ioaddr + DMA_SYS_BUS_MODE);
1774 -static void dwmac4_dma_init_channel(void __iomem *ioaddr,
1775 - struct stmmac_dma_cfg *dma_cfg,
1776 - u32 dma_tx_phy, u32 dma_rx_phy,
1778 +void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
1779 + struct stmmac_dma_cfg *dma_cfg,
1780 + u32 dma_rx_phy, u32 chan)
1783 - int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
1784 - int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
1785 + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
1787 - /* set PBL for each channels. Currently we affect same configuration
1790 - value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
1791 - if (dma_cfg->pblx8)
1792 - value = value | DMA_BUS_MODE_PBL;
1793 - writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
1794 + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
1795 + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
1796 + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
1798 + writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
1801 - value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
1802 +void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
1803 + struct stmmac_dma_cfg *dma_cfg,
1804 + u32 dma_tx_phy, u32 chan)
1807 + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
1809 + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
1810 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
1811 - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
1812 + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
1814 - value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
1815 - value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
1816 - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
1817 + writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
1820 - /* Mask interrupts by writing to CSR7 */
1821 - writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
1822 +void dwmac4_dma_init_channel(void __iomem *ioaddr,
1823 + struct stmmac_dma_cfg *dma_cfg, u32 chan)
1827 + /* common channel control register config */
1828 + value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
1829 + if (dma_cfg->pblx8)
1830 + value = value | DMA_BUS_MODE_PBL;
1831 + writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
1833 - writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
1834 - writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
1835 + /* Mask interrupts by writing to CSR7 */
1836 + writel(DMA_CHAN_INTR_DEFAULT_MASK,
1837 + ioaddr + DMA_CHAN_INTR_ENA(chan));
1840 static void dwmac4_dma_init(void __iomem *ioaddr,
1841 @@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem
1842 u32 dma_tx, u32 dma_rx, int atds)
1844 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
1847 /* Set the Fixed burst mode */
1848 if (dma_cfg->fixed_burst)
1849 @@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem
1850 value |= DMA_SYS_BUS_AAL;
1852 writel(value, ioaddr + DMA_SYS_BUS_MODE);
1854 - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
1855 - dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
1858 static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
1859 @@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __
1860 _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
1863 -static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
1864 +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
1869 - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
1870 - writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
1871 + for (chan = 0; chan < number_chan; chan++)
1872 + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
1875 -static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
1876 - int rxmode, u32 channel)
1877 +static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
1878 + u32 channel, int fifosz)
1880 - u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
1881 + unsigned int rqs = fifosz / 256 - 1;
1882 + u32 mtl_rx_op, mtl_rx_int;
1884 - /* Following code only done for channel 0, other channels not yet
1887 - mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
1888 + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
1890 + if (mode == SF_DMA_MODE) {
1891 + pr_debug("GMAC: enable RX store and forward mode\n");
1892 + mtl_rx_op |= MTL_OP_MODE_RSF;
1894 + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
1895 + mtl_rx_op &= ~MTL_OP_MODE_RSF;
1896 + mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
1898 + mtl_rx_op |= MTL_OP_MODE_RTC_32;
1899 + else if (mode <= 64)
1900 + mtl_rx_op |= MTL_OP_MODE_RTC_64;
1901 + else if (mode <= 96)
1902 + mtl_rx_op |= MTL_OP_MODE_RTC_96;
1904 + mtl_rx_op |= MTL_OP_MODE_RTC_128;
1907 + mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
1908 + mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
1910 + /* enable flow control only if each channel gets 4 KiB or more FIFO */
1911 + if (fifosz >= 4096) {
1912 + unsigned int rfd, rfa;
1914 + mtl_rx_op |= MTL_OP_MODE_EHFC;
1916 + /* Set Threshold for Activating Flow Control to min 2 frames,
1917 + * i.e. 1500 * 2 = 3000 bytes.
1919 + * Set Threshold for Deactivating Flow Control to min 1 frame,
1920 + * i.e. 1500 bytes.
1924 + /* This violates the above formula because of FIFO size
1925 + * limit therefore overflow may occur in spite of this.
1927 + rfd = 0x03; /* Full-2.5K */
1928 + rfa = 0x01; /* Full-1.5K */
1932 + rfd = 0x06; /* Full-4K */
1933 + rfa = 0x0a; /* Full-6K */
1937 + rfd = 0x06; /* Full-4K */
1938 + rfa = 0x12; /* Full-10K */
1942 + rfd = 0x06; /* Full-4K */
1943 + rfa = 0x1e; /* Full-16K */
1947 - if (txmode == SF_DMA_MODE) {
1948 + mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
1949 + mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
1951 + mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
1952 + mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
1955 + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
1957 + /* Enable MTL RX overflow */
1958 + mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
1959 + writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
1960 + ioaddr + MTL_CHAN_INT_CTRL(channel));
1963 +static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
1966 + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
1968 + if (mode == SF_DMA_MODE) {
1969 pr_debug("GMAC: enable TX store and forward mode\n");
1970 /* Transmit COE type 2 cannot be done in cut-through mode. */
1971 mtl_tx_op |= MTL_OP_MODE_TSF;
1973 - pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
1974 + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
1975 mtl_tx_op &= ~MTL_OP_MODE_TSF;
1976 mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
1977 /* Set the transmit threshold */
1980 mtl_tx_op |= MTL_OP_MODE_TTC_32;
1981 - else if (txmode <= 64)
1982 + else if (mode <= 64)
1983 mtl_tx_op |= MTL_OP_MODE_TTC_64;
1984 - else if (txmode <= 96)
1985 + else if (mode <= 96)
1986 mtl_tx_op |= MTL_OP_MODE_TTC_96;
1987 - else if (txmode <= 128)
1988 + else if (mode <= 128)
1989 mtl_tx_op |= MTL_OP_MODE_TTC_128;
1990 - else if (txmode <= 192)
1991 + else if (mode <= 192)
1992 mtl_tx_op |= MTL_OP_MODE_TTC_192;
1993 - else if (txmode <= 256)
1994 + else if (mode <= 256)
1995 mtl_tx_op |= MTL_OP_MODE_TTC_256;
1996 - else if (txmode <= 384)
1997 + else if (mode <= 384)
1998 mtl_tx_op |= MTL_OP_MODE_TTC_384;
2000 mtl_tx_op |= MTL_OP_MODE_TTC_512;
2001 @@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void
2003 mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
2004 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
2006 - mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
2008 - if (rxmode == SF_DMA_MODE) {
2009 - pr_debug("GMAC: enable RX store and forward mode\n");
2010 - mtl_rx_op |= MTL_OP_MODE_RSF;
2012 - pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
2013 - mtl_rx_op &= ~MTL_OP_MODE_RSF;
2014 - mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
2016 - mtl_rx_op |= MTL_OP_MODE_RTC_32;
2017 - else if (rxmode <= 64)
2018 - mtl_rx_op |= MTL_OP_MODE_RTC_64;
2019 - else if (rxmode <= 96)
2020 - mtl_rx_op |= MTL_OP_MODE_RTC_96;
2022 - mtl_rx_op |= MTL_OP_MODE_RTC_128;
2025 - writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
2027 - /* Enable MTL RX overflow */
2028 - mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
2029 - writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
2030 - ioaddr + MTL_CHAN_INT_CTRL(channel));
2033 -static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
2034 - int rxmode, int rxfifosz)
2036 - /* Only Channel 0 is actually configured and used */
2037 - dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
2040 static void dwmac4_get_hw_feature(void __iomem *ioaddr,
2041 @@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void _
2042 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
2043 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
2044 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
2045 + /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
2046 + * shifting and store the sizes in bytes.
2048 + dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
2049 + dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
2050 /* MAC HW feature2 */
2051 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
2052 /* TX and RX number of channels */
2053 @@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iom
2054 const struct stmmac_dma_ops dwmac4_dma_ops = {
2055 .reset = dwmac4_dma_reset,
2056 .init = dwmac4_dma_init,
2057 + .init_chan = dwmac4_dma_init_channel,
2058 + .init_rx_chan = dwmac4_dma_init_rx_chan,
2059 + .init_tx_chan = dwmac4_dma_init_tx_chan,
2060 .axi = dwmac4_dma_axi,
2061 .dump_regs = dwmac4_dump_dma_regs,
2062 - .dma_mode = dwmac4_dma_operation_mode,
2063 + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
2064 + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
2065 .enable_dma_irq = dwmac4_enable_dma_irq,
2066 .disable_dma_irq = dwmac4_disable_dma_irq,
2067 .start_tx = dwmac4_dma_start_tx,
2068 @@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_o
2069 const struct stmmac_dma_ops dwmac410_dma_ops = {
2070 .reset = dwmac4_dma_reset,
2071 .init = dwmac4_dma_init,
2072 + .init_chan = dwmac4_dma_init_channel,
2073 + .init_rx_chan = dwmac4_dma_init_rx_chan,
2074 + .init_tx_chan = dwmac4_dma_init_tx_chan,
2075 .axi = dwmac4_dma_axi,
2076 .dump_regs = dwmac4_dump_dma_regs,
2077 - .dma_mode = dwmac4_dma_operation_mode,
2078 + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
2079 + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
2080 .enable_dma_irq = dwmac410_enable_dma_irq,
2081 .disable_dma_irq = dwmac4_disable_dma_irq,
2082 .start_tx = dwmac4_dma_start_tx,
2083 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
2084 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
2085 @@ -185,17 +185,17 @@
2087 int dwmac4_dma_reset(void __iomem *ioaddr);
2088 void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
2089 -void dwmac4_enable_dma_irq(void __iomem *ioaddr);
2090 -void dwmac410_enable_dma_irq(void __iomem *ioaddr);
2091 -void dwmac4_disable_dma_irq(void __iomem *ioaddr);
2092 -void dwmac4_dma_start_tx(void __iomem *ioaddr);
2093 -void dwmac4_dma_stop_tx(void __iomem *ioaddr);
2094 -void dwmac4_dma_start_rx(void __iomem *ioaddr);
2095 -void dwmac4_dma_stop_rx(void __iomem *ioaddr);
2096 +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
2097 +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
2098 +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
2099 +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
2100 +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
2101 +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
2102 +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
2103 int dwmac4_dma_interrupt(void __iomem *ioaddr,
2104 - struct stmmac_extra_stats *x);
2105 -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
2106 -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
2107 + struct stmmac_extra_stats *x, u32 chan);
2108 +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
2109 +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
2110 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
2111 void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
2113 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
2114 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
2115 @@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioadd
2117 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
2119 - writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
2120 + writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
2123 void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
2125 - writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
2126 + writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
2129 -void dwmac4_dma_start_tx(void __iomem *ioaddr)
2130 +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
2132 - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2133 + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
2135 value |= DMA_CONTROL_ST;
2136 - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2137 + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
2139 value = readl(ioaddr + GMAC_CONFIG);
2140 value |= GMAC_CONFIG_TE;
2141 writel(value, ioaddr + GMAC_CONFIG);
2144 -void dwmac4_dma_stop_tx(void __iomem *ioaddr)
2145 +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
2147 - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2148 + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
2150 value &= ~DMA_CONTROL_ST;
2151 - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2152 + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
2154 value = readl(ioaddr + GMAC_CONFIG);
2155 value &= ~GMAC_CONFIG_TE;
2156 writel(value, ioaddr + GMAC_CONFIG);
2159 -void dwmac4_dma_start_rx(void __iomem *ioaddr)
2160 +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
2162 - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2163 + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
2165 value |= DMA_CONTROL_SR;
2167 - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2168 + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
2170 value = readl(ioaddr + GMAC_CONFIG);
2171 value |= GMAC_CONFIG_RE;
2172 writel(value, ioaddr + GMAC_CONFIG);
2175 -void dwmac4_dma_stop_rx(void __iomem *ioaddr)
2176 +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
2178 - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2179 + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
2181 value &= ~DMA_CONTROL_SR;
2182 - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2183 + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
2185 value = readl(ioaddr + GMAC_CONFIG);
2186 value &= ~GMAC_CONFIG_RE;
2187 writel(value, ioaddr + GMAC_CONFIG);
2190 -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
2191 +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
2193 - writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
2194 + writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
2197 -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
2198 +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
2200 - writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
2201 + writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
2204 -void dwmac4_enable_dma_irq(void __iomem *ioaddr)
2205 +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
2207 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
2208 - DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2209 + DMA_CHAN_INTR_ENA(chan));
2212 -void dwmac410_enable_dma_irq(void __iomem *ioaddr)
2213 +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
2215 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
2216 - ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2217 + ioaddr + DMA_CHAN_INTR_ENA(chan));
2220 -void dwmac4_disable_dma_irq(void __iomem *ioaddr)
2221 +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
2223 - writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2224 + writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
2227 int dwmac4_dma_interrupt(void __iomem *ioaddr,
2228 - struct stmmac_extra_stats *x)
2229 + struct stmmac_extra_stats *x, u32 chan)
2233 - u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
2234 + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
2236 /* ABNORMAL interrupts */
2237 if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
2238 @@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *i
2239 if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
2242 - value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2243 + value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
2244 /* to schedule NAPI on real RIE event. */
2245 if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
2246 x->rx_normal_irq_n++;
2247 @@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *i
2248 * status [21-0] expect reserved bits [5-3]
2250 writel((intr_status & 0x3fffc7),
2251 - ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
2252 + ioaddr + DMA_CHAN_STATUS(chan));
2256 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
2257 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
2258 @@ -137,13 +137,14 @@
2259 #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
2261 void dwmac_enable_dma_transmission(void __iomem *ioaddr);
2262 -void dwmac_enable_dma_irq(void __iomem *ioaddr);
2263 -void dwmac_disable_dma_irq(void __iomem *ioaddr);
2264 -void dwmac_dma_start_tx(void __iomem *ioaddr);
2265 -void dwmac_dma_stop_tx(void __iomem *ioaddr);
2266 -void dwmac_dma_start_rx(void __iomem *ioaddr);
2267 -void dwmac_dma_stop_rx(void __iomem *ioaddr);
2268 -int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
2269 +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
2270 +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
2271 +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
2272 +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
2273 +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
2274 +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
2275 +int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
2277 int dwmac_dma_reset(void __iomem *ioaddr);
2279 #endif /* __DWMAC_DMA_H__ */
2280 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
2281 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
2282 @@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void
2283 writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
2286 -void dwmac_enable_dma_irq(void __iomem *ioaddr)
2287 +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
2289 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
2292 -void dwmac_disable_dma_irq(void __iomem *ioaddr)
2293 +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
2295 writel(0, ioaddr + DMA_INTR_ENA);
2298 -void dwmac_dma_start_tx(void __iomem *ioaddr)
2299 +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
2301 u32 value = readl(ioaddr + DMA_CONTROL);
2302 value |= DMA_CONTROL_ST;
2303 writel(value, ioaddr + DMA_CONTROL);
2306 -void dwmac_dma_stop_tx(void __iomem *ioaddr)
2307 +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
2309 u32 value = readl(ioaddr + DMA_CONTROL);
2310 value &= ~DMA_CONTROL_ST;
2311 writel(value, ioaddr + DMA_CONTROL);
2314 -void dwmac_dma_start_rx(void __iomem *ioaddr)
2315 +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
2317 u32 value = readl(ioaddr + DMA_CONTROL);
2318 value |= DMA_CONTROL_SR;
2319 writel(value, ioaddr + DMA_CONTROL);
2322 -void dwmac_dma_stop_rx(void __iomem *ioaddr)
2323 +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
2325 u32 value = readl(ioaddr + DMA_CONTROL);
2326 value &= ~DMA_CONTROL_SR;
2327 @@ -156,7 +156,7 @@ static void show_rx_process_state(unsign
2330 int dwmac_dma_interrupt(void __iomem *ioaddr,
2331 - struct stmmac_extra_stats *x)
2332 + struct stmmac_extra_stats *x, u32 chan)
2335 /* read the status register (CSR5) */
2336 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2337 +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2338 @@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(str
2340 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
2341 bool csum_flag, int mode, bool tx_own,
2343 + bool ls, unsigned int tot_pkt_len)
2345 unsigned int tdes0 = le32_to_cpu(p->des0);
2347 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2348 +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2349 @@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct
2351 static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
2352 bool csum_flag, int mode, bool tx_own,
2354 + bool ls, unsigned int tot_pkt_len)
2356 unsigned int tdes1 = le32_to_cpu(p->des1);
2358 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
2359 +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
2362 static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
2364 - struct stmmac_priv *priv = (struct stmmac_priv *)p;
2365 - unsigned int entry = priv->cur_tx;
2366 - struct dma_desc *desc;
2367 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
2368 unsigned int nopaged_len = skb_headlen(skb);
2369 + struct stmmac_priv *priv = tx_q->priv_data;
2370 + unsigned int entry = tx_q->cur_tx;
2371 unsigned int bmax, len, des2;
2372 + struct dma_desc *desc;
2374 if (priv->extend_desc)
2375 - desc = (struct dma_desc *)(priv->dma_etx + entry);
2376 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2378 - desc = priv->dma_tx + entry;
2379 + desc = tx_q->dma_tx + entry;
2381 if (priv->plat->enh_desc)
2382 bmax = BUF_SIZE_8KiB;
2383 @@ -52,48 +53,51 @@ static int stmmac_jumbo_frm(void *p, str
2384 if (dma_mapping_error(priv->device, des2))
2387 - priv->tx_skbuff_dma[entry].buf = des2;
2388 - priv->tx_skbuff_dma[entry].len = bmax;
2389 - priv->tx_skbuff_dma[entry].is_jumbo = true;
2390 + tx_q->tx_skbuff_dma[entry].buf = des2;
2391 + tx_q->tx_skbuff_dma[entry].len = bmax;
2392 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
2394 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
2395 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
2396 - STMMAC_RING_MODE, 0, false);
2397 - priv->tx_skbuff[entry] = NULL;
2398 + STMMAC_RING_MODE, 0,
2400 + tx_q->tx_skbuff[entry] = NULL;
2401 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2403 if (priv->extend_desc)
2404 - desc = (struct dma_desc *)(priv->dma_etx + entry);
2405 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2407 - desc = priv->dma_tx + entry;
2408 + desc = tx_q->dma_tx + entry;
2410 des2 = dma_map_single(priv->device, skb->data + bmax, len,
2412 desc->des2 = cpu_to_le32(des2);
2413 if (dma_mapping_error(priv->device, des2))
2415 - priv->tx_skbuff_dma[entry].buf = des2;
2416 - priv->tx_skbuff_dma[entry].len = len;
2417 - priv->tx_skbuff_dma[entry].is_jumbo = true;
2418 + tx_q->tx_skbuff_dma[entry].buf = des2;
2419 + tx_q->tx_skbuff_dma[entry].len = len;
2420 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
2422 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
2423 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
2424 - STMMAC_RING_MODE, 1, true);
2425 + STMMAC_RING_MODE, 1,
2428 des2 = dma_map_single(priv->device, skb->data,
2429 nopaged_len, DMA_TO_DEVICE);
2430 desc->des2 = cpu_to_le32(des2);
2431 if (dma_mapping_error(priv->device, des2))
2433 - priv->tx_skbuff_dma[entry].buf = des2;
2434 - priv->tx_skbuff_dma[entry].len = nopaged_len;
2435 - priv->tx_skbuff_dma[entry].is_jumbo = true;
2436 + tx_q->tx_skbuff_dma[entry].buf = des2;
2437 + tx_q->tx_skbuff_dma[entry].len = nopaged_len;
2438 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
2439 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
2440 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
2441 - STMMAC_RING_MODE, 0, true);
2442 + STMMAC_RING_MODE, 0,
2446 - priv->cur_tx = entry;
2447 + tx_q->cur_tx = entry;
2451 @@ -125,12 +129,13 @@ static void stmmac_init_desc3(struct dma
2453 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
2455 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
2456 - unsigned int entry = priv->dirty_tx;
2457 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
2458 + struct stmmac_priv *priv = tx_q->priv_data;
2459 + unsigned int entry = tx_q->dirty_tx;
2461 /* des3 is only used for jumbo frames tx or time stamping */
2462 - if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
2463 - (priv->tx_skbuff_dma[entry].last_segment &&
2464 + if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
2465 + (tx_q->tx_skbuff_dma[entry].last_segment &&
2466 !priv->extend_desc && priv->hwts_tx_en)))
2469 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
2470 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
2471 @@ -46,38 +46,51 @@ struct stmmac_tx_info {
2475 -struct stmmac_priv {
2476 - /* Frequently used values are kept adjacent for cache effect */
2477 +/* Frequently used values are kept adjacent for cache effect */
2478 +struct stmmac_tx_queue {
2480 + struct stmmac_priv *priv_data;
2481 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
2482 struct dma_desc *dma_tx;
2483 struct sk_buff **tx_skbuff;
2484 + struct stmmac_tx_info *tx_skbuff_dma;
2485 unsigned int cur_tx;
2486 unsigned int dirty_tx;
2487 + dma_addr_t dma_tx_phy;
2491 +struct stmmac_rx_queue {
2493 + struct stmmac_priv *priv_data;
2494 + struct dma_extended_desc *dma_erx;
2495 + struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
2496 + struct sk_buff **rx_skbuff;
2497 + dma_addr_t *rx_skbuff_dma;
2498 + unsigned int cur_rx;
2499 + unsigned int dirty_rx;
2500 + u32 rx_zeroc_thresh;
2501 + dma_addr_t dma_rx_phy;
2503 + struct napi_struct napi ____cacheline_aligned_in_smp;
2506 +struct stmmac_priv {
2507 + /* Frequently used values are kept adjacent for cache effect */
2508 u32 tx_count_frames;
2511 - struct stmmac_tx_info *tx_skbuff_dma;
2512 - dma_addr_t dma_tx_phy;
2516 bool tx_path_in_lpi_mode;
2517 struct timer_list txtimer;
2520 - struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
2521 - struct dma_extended_desc *dma_erx;
2522 - struct sk_buff **rx_skbuff;
2523 - unsigned int cur_rx;
2524 - unsigned int dirty_rx;
2525 unsigned int dma_buf_sz;
2526 unsigned int rx_copybreak;
2527 - unsigned int rx_zeroc_thresh;
2530 - dma_addr_t *rx_skbuff_dma;
2531 - dma_addr_t dma_rx_phy;
2533 - struct napi_struct napi ____cacheline_aligned_in_smp;
2535 void __iomem *ioaddr;
2536 struct net_device *dev;
2537 @@ -85,6 +98,12 @@ struct stmmac_priv {
2538 struct mac_device_info *hw;
2542 + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
2545 + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
2550 @@ -119,8 +138,6 @@ struct stmmac_priv {
2551 spinlock_t ptp_lock;
2552 void __iomem *mmcaddr;
2553 void __iomem *ptpaddr;
2558 #ifdef CONFIG_DEBUG_FS
2559 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2560 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2561 @@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device
2562 struct ethtool_pauseparam *pause)
2564 struct stmmac_priv *priv = netdev_priv(netdev);
2565 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2566 struct phy_device *phy = netdev->phydev;
2567 int new_pause = FLOW_OFF;
2569 @@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device
2572 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
2574 + priv->pause, tx_cnt);
2578 @@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(str
2579 struct ethtool_stats *dummy, u64 *data)
2581 struct stmmac_priv *priv = netdev_priv(dev);
2582 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
2583 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
2586 /* Update the DMA HW counters for dwmac10/100 */
2587 @@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(str
2588 if ((priv->hw->mac->debug) &&
2589 (priv->synopsys_id >= DWMAC_CORE_3_50))
2590 priv->hw->mac->debug(priv->ioaddr,
2591 - (void *)&priv->xstats);
2592 + (void *)&priv->xstats,
2593 + rx_queues_count, tx_queues_count);
2595 for (i = 0; i < STMMAC_STATS_LEN; i++) {
2596 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
2597 @@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct ne
2598 struct ethtool_coalesce *ec)
2600 struct stmmac_priv *priv = netdev_priv(dev);
2601 + u32 rx_cnt = priv->plat->rx_queues_to_use;
2602 unsigned int rx_riwt;
2604 /* Check not supported parameters */
2605 @@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct ne
2606 priv->tx_coal_frames = ec->tx_max_coalesced_frames;
2607 priv->tx_coal_timer = ec->tx_coalesce_usecs;
2608 priv->rx_riwt = rx_riwt;
2609 - priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
2610 + priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
2614 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2615 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2616 @@ -139,6 +139,64 @@ static void stmmac_verify_args(void)
2620 + * stmmac_disable_all_queues - Disable all queues
2621 + * @priv: driver private structure
2623 +static void stmmac_disable_all_queues(struct stmmac_priv *priv)
2625 + u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2628 + for (queue = 0; queue < rx_queues_cnt; queue++) {
2629 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2631 + napi_disable(&rx_q->napi);
2636 + * stmmac_enable_all_queues - Enable all queues
2637 + * @priv: driver private structure
2639 +static void stmmac_enable_all_queues(struct stmmac_priv *priv)
2641 + u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2644 + for (queue = 0; queue < rx_queues_cnt; queue++) {
2645 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2647 + napi_enable(&rx_q->napi);
2652 + * stmmac_stop_all_queues - Stop all queues
2653 + * @priv: driver private structure
2655 +static void stmmac_stop_all_queues(struct stmmac_priv *priv)
2657 + u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2660 + for (queue = 0; queue < tx_queues_cnt; queue++)
2661 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2665 + * stmmac_start_all_queues - Start all queues
2666 + * @priv: driver private structure
2668 +static void stmmac_start_all_queues(struct stmmac_priv *priv)
2670 + u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2673 + for (queue = 0; queue < tx_queues_cnt; queue++)
2674 + netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
2678 * stmmac_clk_csr_set - dynamically set the MDC clock
2679 * @priv: driver private structure
2680 * Description: this is to dynamically set the MDC clock according to the csr
2681 @@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf
2682 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2685 -static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
2686 +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2688 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2691 - if (priv->dirty_tx > priv->cur_tx)
2692 - avail = priv->dirty_tx - priv->cur_tx - 1;
2693 + if (tx_q->dirty_tx > tx_q->cur_tx)
2694 + avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
2696 - avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
2697 + avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
2702 -static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
2704 + * stmmac_rx_dirty - Get RX queue dirty
2705 + * @priv: driver private structure
2706 + * @queue: RX queue index
2708 +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
2710 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2713 - if (priv->dirty_rx <= priv->cur_rx)
2714 - dirty = priv->cur_rx - priv->dirty_rx;
2715 + if (rx_q->dirty_rx <= rx_q->cur_rx)
2716 + dirty = rx_q->cur_rx - rx_q->dirty_rx;
2718 - dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
2719 + dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
2723 @@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_spe
2725 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
2727 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2730 + /* check if all TX queues have the work finished */
2731 + for (queue = 0; queue < tx_cnt; queue++) {
2732 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2734 + if (tx_q->dirty_tx != tx_q->cur_tx)
2735 + return; /* still unfinished work */
2738 /* Check and enter in LPI mode */
2739 - if ((priv->dirty_tx == priv->cur_tx) &&
2740 - (priv->tx_path_in_lpi_mode == false))
2741 + if (!priv->tx_path_in_lpi_mode)
2742 priv->hw->mac->set_eee_mode(priv->hw,
2743 priv->plat->en_tx_lpi_clockgating);
2745 @@ -365,14 +440,14 @@ static void stmmac_get_tx_hwtstamp(struc
2748 /* check tx tstamp status */
2749 - if (!priv->hw->desc->get_tx_timestamp_status(p)) {
2750 + if (priv->hw->desc->get_tx_timestamp_status(p)) {
2751 /* get the valid tstamp */
2752 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
2754 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
2755 shhwtstamp.hwtstamp = ns_to_ktime(ns);
2757 - netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
2758 + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
2759 /* pass tstamp to stack */
2760 skb_tstamp_tx(skb, &shhwtstamp);
2762 @@ -399,19 +474,19 @@ static void stmmac_get_rx_hwtstamp(struc
2765 /* Check if timestamp is available */
2766 - if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
2767 + if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
2768 /* For GMAC4, the valid timestamp is from CTX next desc. */
2769 if (priv->plat->has_gmac4)
2770 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
2772 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
2774 - netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
2775 + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
2776 shhwtstamp = skb_hwtstamps(skb);
2777 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
2778 shhwtstamp->hwtstamp = ns_to_ktime(ns);
2780 - netdev_err(priv->dev, "cannot get RX hw timestamp\n");
2781 + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
2785 @@ -688,6 +763,19 @@ static void stmmac_release_ptp(struct st
2789 + * stmmac_mac_flow_ctrl - Configure flow control in all queues
2790 + * @priv: driver private structure
2791 + * Description: It is used for configuring the flow control in all queues
2793 +static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
2795 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2797 + priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
2798 + priv->pause, tx_cnt);
2802 * stmmac_adjust_link - adjusts the link parameters
2803 * @dev: net device structure
2804 * Description: this is the helper called by the physical abstraction layer
2805 @@ -702,7 +790,6 @@ static void stmmac_adjust_link(struct ne
2806 struct phy_device *phydev = dev->phydev;
2807 unsigned long flags;
2809 - unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
2813 @@ -724,8 +811,7 @@ static void stmmac_adjust_link(struct ne
2815 /* Flow Control operation */
2817 - priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
2819 + stmmac_mac_flow_ctrl(priv, phydev->duplex);
2821 if (phydev->speed != priv->speed) {
2823 @@ -893,22 +979,56 @@ static int stmmac_init_phy(struct net_de
2827 -static void stmmac_display_rings(struct stmmac_priv *priv)
2828 +static void stmmac_display_rx_rings(struct stmmac_priv *priv)
2830 - void *head_rx, *head_tx;
2831 + u32 rx_cnt = priv->plat->rx_queues_to_use;
2835 - if (priv->extend_desc) {
2836 - head_rx = (void *)priv->dma_erx;
2837 - head_tx = (void *)priv->dma_etx;
2839 - head_rx = (void *)priv->dma_rx;
2840 - head_tx = (void *)priv->dma_tx;
2841 + /* Display RX rings */
2842 + for (queue = 0; queue < rx_cnt; queue++) {
2843 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2845 + pr_info("\tRX Queue %u rings\n", queue);
2847 + if (priv->extend_desc)
2848 + head_rx = (void *)rx_q->dma_erx;
2850 + head_rx = (void *)rx_q->dma_rx;
2852 + /* Display RX ring */
2853 + priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
2857 +static void stmmac_display_tx_rings(struct stmmac_priv *priv)
2859 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2863 + /* Display TX rings */
2864 + for (queue = 0; queue < tx_cnt; queue++) {
2865 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2867 + pr_info("\tTX Queue %d rings\n", queue);
2869 + if (priv->extend_desc)
2870 + head_tx = (void *)tx_q->dma_etx;
2872 + head_tx = (void *)tx_q->dma_tx;
2874 + priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
2878 +static void stmmac_display_rings(struct stmmac_priv *priv)
2880 + /* Display RX ring */
2881 + stmmac_display_rx_rings(priv);
2883 - /* Display Rx ring */
2884 - priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
2885 - /* Display Tx ring */
2886 - priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
2887 + /* Display TX ring */
2888 + stmmac_display_tx_rings(priv);
2891 static int stmmac_set_bfsize(int mtu, int bufsize)
2892 @@ -928,48 +1048,88 @@ static int stmmac_set_bfsize(int mtu, in
2896 - * stmmac_clear_descriptors - clear descriptors
2897 + * stmmac_clear_rx_descriptors - clear RX descriptors
2898 * @priv: driver private structure
2899 - * Description: this function is called to clear the tx and rx descriptors
2900 + * @queue: RX queue index
2901 + * Description: this function is called to clear the RX descriptors
2902 * in case of both basic and extended descriptors are used.
2904 -static void stmmac_clear_descriptors(struct stmmac_priv *priv)
2905 +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
2907 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2910 - /* Clear the Rx/Tx descriptors */
2911 + /* Clear the RX descriptors */
2912 for (i = 0; i < DMA_RX_SIZE; i++)
2913 if (priv->extend_desc)
2914 - priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
2915 + priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
2916 priv->use_riwt, priv->mode,
2917 (i == DMA_RX_SIZE - 1));
2919 - priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
2920 + priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
2921 priv->use_riwt, priv->mode,
2922 (i == DMA_RX_SIZE - 1));
2926 + * stmmac_clear_tx_descriptors - clear tx descriptors
2927 + * @priv: driver private structure
2928 + * @queue: TX queue index.
2929 + * Description: this function is called to clear the TX descriptors
2930 + * in case of both basic and extended descriptors are used.
2932 +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
2934 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2937 + /* Clear the TX descriptors */
2938 for (i = 0; i < DMA_TX_SIZE; i++)
2939 if (priv->extend_desc)
2940 - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
2941 + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
2943 (i == DMA_TX_SIZE - 1));
2945 - priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
2946 + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
2948 (i == DMA_TX_SIZE - 1));
2952 + * stmmac_clear_descriptors - clear descriptors
2953 + * @priv: driver private structure
2954 + * Description: this function is called to clear the TX and RX descriptors
2955 + * in case of both basic and extended descriptors are used.
2957 +static void stmmac_clear_descriptors(struct stmmac_priv *priv)
2959 + u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
2960 + u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
2963 + /* Clear the RX descriptors */
2964 + for (queue = 0; queue < rx_queue_cnt; queue++)
2965 + stmmac_clear_rx_descriptors(priv, queue);
2967 + /* Clear the TX descriptors */
2968 + for (queue = 0; queue < tx_queue_cnt; queue++)
2969 + stmmac_clear_tx_descriptors(priv, queue);
2973 * stmmac_init_rx_buffers - init the RX descriptor buffer.
2974 * @priv: driver private structure
2975 * @p: descriptor pointer
2976 * @i: descriptor index
2977 - * @flags: gfp flag.
2978 + * @flags: gfp flag
2979 + * @queue: RX queue index
2980 * Description: this function is called to allocate a receive buffer, perform
2981 * the DMA mapping and init the descriptor.
2983 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
2984 - int i, gfp_t flags)
2985 + int i, gfp_t flags, u32 queue)
2987 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2988 struct sk_buff *skb;
2990 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
2991 @@ -978,20 +1138,20 @@ static int stmmac_init_rx_buffers(struct
2992 "%s: Rx init fails; skb is NULL\n", __func__);
2995 - priv->rx_skbuff[i] = skb;
2996 - priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
2997 + rx_q->rx_skbuff[i] = skb;
2998 + rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
3001 - if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
3002 + if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
3003 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
3004 dev_kfree_skb_any(skb);
3008 if (priv->synopsys_id >= DWMAC_CORE_4_00)
3009 - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
3010 + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
3012 - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
3013 + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
3015 if ((priv->hw->mode->init_desc3) &&
3016 (priv->dma_buf_sz == BUF_SIZE_16KiB))
3017 @@ -1000,30 +1160,71 @@ static int stmmac_init_rx_buffers(struct
3021 -static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
3023 + * stmmac_free_rx_buffer - free RX dma buffers
3024 + * @priv: private structure
3025 + * @queue: RX queue index
3026 + * @i: buffer index.
3028 +static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
3030 - if (priv->rx_skbuff[i]) {
3031 - dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
3032 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3034 + if (rx_q->rx_skbuff[i]) {
3035 + dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
3036 priv->dma_buf_sz, DMA_FROM_DEVICE);
3037 - dev_kfree_skb_any(priv->rx_skbuff[i]);
3038 + dev_kfree_skb_any(rx_q->rx_skbuff[i]);
3040 - priv->rx_skbuff[i] = NULL;
3041 + rx_q->rx_skbuff[i] = NULL;
3045 - * init_dma_desc_rings - init the RX/TX descriptor rings
3046 + * stmmac_free_tx_buffer - free RX dma buffers
3047 + * @priv: private structure
3048 + * @queue: RX queue index
3049 + * @i: buffer index.
3051 +static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
3053 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3055 + if (tx_q->tx_skbuff_dma[i].buf) {
3056 + if (tx_q->tx_skbuff_dma[i].map_as_page)
3057 + dma_unmap_page(priv->device,
3058 + tx_q->tx_skbuff_dma[i].buf,
3059 + tx_q->tx_skbuff_dma[i].len,
3062 + dma_unmap_single(priv->device,
3063 + tx_q->tx_skbuff_dma[i].buf,
3064 + tx_q->tx_skbuff_dma[i].len,
3068 + if (tx_q->tx_skbuff[i]) {
3069 + dev_kfree_skb_any(tx_q->tx_skbuff[i]);
3070 + tx_q->tx_skbuff[i] = NULL;
3071 + tx_q->tx_skbuff_dma[i].buf = 0;
3072 + tx_q->tx_skbuff_dma[i].map_as_page = false;
3077 + * init_dma_rx_desc_rings - init the RX descriptor rings
3078 * @dev: net device structure
3080 - * Description: this function initializes the DMA RX/TX descriptors
3081 + * Description: this function initializes the DMA RX descriptors
3082 * and allocates the socket buffers. It supports the chained and ring
3085 -static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
3086 +static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
3089 struct stmmac_priv *priv = netdev_priv(dev);
3090 + u32 rx_count = priv->plat->rx_queues_to_use;
3091 unsigned int bfsize = 0;
3096 if (priv->hw->mode->set_16kib_bfsize)
3097 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
3098 @@ -1033,257 +1234,516 @@ static int init_dma_desc_rings(struct ne
3100 priv->dma_buf_sz = bfsize;
3102 - netif_dbg(priv, probe, priv->dev,
3103 - "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
3104 - __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
3106 /* RX INITIALIZATION */
3107 netif_dbg(priv, probe, priv->dev,
3108 "SKB addresses:\nskb\t\tskb data\tdma data\n");
3110 - for (i = 0; i < DMA_RX_SIZE; i++) {
3111 - struct dma_desc *p;
3112 - if (priv->extend_desc)
3113 - p = &((priv->dma_erx + i)->basic);
3115 - p = priv->dma_rx + i;
3116 + for (queue = 0; queue < rx_count; queue++) {
3117 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3119 - ret = stmmac_init_rx_buffers(priv, p, i, flags);
3121 - goto err_init_rx_buffers;
3122 + netif_dbg(priv, probe, priv->dev,
3123 + "(%s) dma_rx_phy=0x%08x\n", __func__,
3124 + (u32)rx_q->dma_rx_phy);
3126 - netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
3127 - priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
3128 - (unsigned int)priv->rx_skbuff_dma[i]);
3129 + for (i = 0; i < DMA_RX_SIZE; i++) {
3130 + struct dma_desc *p;
3132 + if (priv->extend_desc)
3133 + p = &((rx_q->dma_erx + i)->basic);
3135 + p = rx_q->dma_rx + i;
3137 + ret = stmmac_init_rx_buffers(priv, p, i, flags,
3140 + goto err_init_rx_buffers;
3142 + netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
3143 + rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
3144 + (unsigned int)rx_q->rx_skbuff_dma[i]);
3148 + rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
3150 + stmmac_clear_rx_descriptors(priv, queue);
3152 + /* Setup the chained descriptor addresses */
3153 + if (priv->mode == STMMAC_CHAIN_MODE) {
3154 + if (priv->extend_desc)
3155 + priv->hw->mode->init(rx_q->dma_erx,
3159 + priv->hw->mode->init(rx_q->dma_rx,
3165 - priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
3169 - /* Setup the chained descriptor addresses */
3170 - if (priv->mode == STMMAC_CHAIN_MODE) {
3171 - if (priv->extend_desc) {
3172 - priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
3174 - priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
3177 - priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
3179 - priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
3184 +err_init_rx_buffers:
3185 + while (queue >= 0) {
3187 + stmmac_free_rx_buffer(priv, queue, i);
3196 - /* TX INITIALIZATION */
3197 - for (i = 0; i < DMA_TX_SIZE; i++) {
3198 - struct dma_desc *p;
3199 - if (priv->extend_desc)
3200 - p = &((priv->dma_etx + i)->basic);
3202 - p = priv->dma_tx + i;
3206 - if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3214 + * init_dma_tx_desc_rings - init the TX descriptor rings
3215 + * @dev: net device structure.
3216 + * Description: this function initializes the DMA TX descriptors
3217 + * and allocates the socket buffers. It supports the chained and ring
3220 +static int init_dma_tx_desc_rings(struct net_device *dev)
3222 + struct stmmac_priv *priv = netdev_priv(dev);
3223 + u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
3227 + for (queue = 0; queue < tx_queue_cnt; queue++) {
3228 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3230 + netif_dbg(priv, probe, priv->dev,
3231 + "(%s) dma_tx_phy=0x%08x\n", __func__,
3232 + (u32)tx_q->dma_tx_phy);
3234 + /* Setup the chained descriptor addresses */
3235 + if (priv->mode == STMMAC_CHAIN_MODE) {
3236 + if (priv->extend_desc)
3237 + priv->hw->mode->init(tx_q->dma_etx,
3241 + priv->hw->mode->init(tx_q->dma_tx,
3246 + for (i = 0; i < DMA_TX_SIZE; i++) {
3247 + struct dma_desc *p;
3248 + if (priv->extend_desc)
3249 + p = &((tx_q->dma_etx + i)->basic);
3251 + p = tx_q->dma_tx + i;
3253 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3262 + tx_q->tx_skbuff_dma[i].buf = 0;
3263 + tx_q->tx_skbuff_dma[i].map_as_page = false;
3264 + tx_q->tx_skbuff_dma[i].len = 0;
3265 + tx_q->tx_skbuff_dma[i].last_segment = false;
3266 + tx_q->tx_skbuff[i] = NULL;
3269 - priv->tx_skbuff_dma[i].buf = 0;
3270 - priv->tx_skbuff_dma[i].map_as_page = false;
3271 - priv->tx_skbuff_dma[i].len = 0;
3272 - priv->tx_skbuff_dma[i].last_segment = false;
3273 - priv->tx_skbuff[i] = NULL;
3274 + tx_q->dirty_tx = 0;
3277 + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
3280 - priv->dirty_tx = 0;
3282 - netdev_reset_queue(priv->dev);
3287 + * init_dma_desc_rings - init the RX/TX descriptor rings
3288 + * @dev: net device structure
3289 + * @flags: gfp flag.
3290 + * Description: this function initializes the DMA RX/TX descriptors
3291 + * and allocates the socket buffers. It supports the chained and ring
3294 +static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
3296 + struct stmmac_priv *priv = netdev_priv(dev);
3299 + ret = init_dma_rx_desc_rings(dev, flags);
3303 + ret = init_dma_tx_desc_rings(dev);
3305 stmmac_clear_descriptors(priv);
3307 if (netif_msg_hw(priv))
3308 stmmac_display_rings(priv);
3311 -err_init_rx_buffers:
3313 - stmmac_free_rx_buffers(priv, i);
3317 -static void dma_free_rx_skbufs(struct stmmac_priv *priv)
3319 + * dma_free_rx_skbufs - free RX dma buffers
3320 + * @priv: private structure
3321 + * @queue: RX queue index
3323 +static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
3327 for (i = 0; i < DMA_RX_SIZE; i++)
3328 - stmmac_free_rx_buffers(priv, i);
3329 + stmmac_free_rx_buffer(priv, queue, i);
3332 -static void dma_free_tx_skbufs(struct stmmac_priv *priv)
3334 + * dma_free_tx_skbufs - free TX dma buffers
3335 + * @priv: private structure
3336 + * @queue: TX queue index
3338 +static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
3342 - for (i = 0; i < DMA_TX_SIZE; i++) {
3343 - if (priv->tx_skbuff_dma[i].buf) {
3344 - if (priv->tx_skbuff_dma[i].map_as_page)
3345 - dma_unmap_page(priv->device,
3346 - priv->tx_skbuff_dma[i].buf,
3347 - priv->tx_skbuff_dma[i].len,
3350 - dma_unmap_single(priv->device,
3351 - priv->tx_skbuff_dma[i].buf,
3352 - priv->tx_skbuff_dma[i].len,
3354 + for (i = 0; i < DMA_TX_SIZE; i++)
3355 + stmmac_free_tx_buffer(priv, queue, i);
3359 + * free_dma_rx_desc_resources - free RX dma desc resources
3360 + * @priv: private structure
3362 +static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
3364 + u32 rx_count = priv->plat->rx_queues_to_use;
3367 + /* Free RX queue resources */
3368 + for (queue = 0; queue < rx_count; queue++) {
3369 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3371 + /* Release the DMA RX socket buffers */
3372 + dma_free_rx_skbufs(priv, queue);
3374 + /* Free DMA regions of consistent memory previously allocated */
3375 + if (!priv->extend_desc)
3376 + dma_free_coherent(priv->device,
3377 + DMA_RX_SIZE * sizeof(struct dma_desc),
3378 + rx_q->dma_rx, rx_q->dma_rx_phy);
3380 + dma_free_coherent(priv->device, DMA_RX_SIZE *
3381 + sizeof(struct dma_extended_desc),
3382 + rx_q->dma_erx, rx_q->dma_rx_phy);
3384 + kfree(rx_q->rx_skbuff_dma);
3385 + kfree(rx_q->rx_skbuff);
3390 + * free_dma_tx_desc_resources - free TX dma desc resources
3391 + * @priv: private structure
3393 +static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
3395 + u32 tx_count = priv->plat->tx_queues_to_use;
3398 + /* Free TX queue resources */
3399 + for (queue = 0; queue < tx_count; queue++) {
3400 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3402 + /* Release the DMA TX socket buffers */
3403 + dma_free_tx_skbufs(priv, queue);
3405 + /* Free DMA regions of consistent memory previously allocated */
3406 + if (!priv->extend_desc)
3407 + dma_free_coherent(priv->device,
3408 + DMA_TX_SIZE * sizeof(struct dma_desc),
3409 + tx_q->dma_tx, tx_q->dma_tx_phy);
3411 + dma_free_coherent(priv->device, DMA_TX_SIZE *
3412 + sizeof(struct dma_extended_desc),
3413 + tx_q->dma_etx, tx_q->dma_tx_phy);
3415 + kfree(tx_q->tx_skbuff_dma);
3416 + kfree(tx_q->tx_skbuff);
3421 + * alloc_dma_rx_desc_resources - alloc RX resources.
3422 + * @priv: private structure
3423 + * Description: according to which descriptor can be used (extend or basic)
3424 + * this function allocates the resources for TX and RX paths. In case of
3425 + * reception, for example, it pre-allocated the RX socket buffer in order to
3426 + * allow zero-copy mechanism.
3428 +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
3430 + u32 rx_count = priv->plat->rx_queues_to_use;
3431 + int ret = -ENOMEM;
3434 + /* RX queues buffers and DMA */
3435 + for (queue = 0; queue < rx_count; queue++) {
3436 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3438 + rx_q->queue_index = queue;
3439 + rx_q->priv_data = priv;
3441 + rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
3442 + sizeof(dma_addr_t),
3444 + if (!rx_q->rx_skbuff_dma)
3447 + rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
3448 + sizeof(struct sk_buff *),
3450 + if (!rx_q->rx_skbuff)
3453 + if (priv->extend_desc) {
3454 + rx_q->dma_erx = dma_zalloc_coherent(priv->device,
3457 + dma_extended_desc),
3458 + &rx_q->dma_rx_phy,
3460 + if (!rx_q->dma_erx)
3464 + rx_q->dma_rx = dma_zalloc_coherent(priv->device,
3468 + &rx_q->dma_rx_phy,
3470 + if (!rx_q->dma_rx)
3478 + free_dma_rx_desc_resources(priv);
3484 + * alloc_dma_tx_desc_resources - alloc TX resources.
3485 + * @priv: private structure
3486 + * Description: according to which descriptor can be used (extend or basic)
3487 + * this function allocates the resources for TX and RX paths. In case of
3488 + * reception, for example, it pre-allocated the RX socket buffer in order to
3489 + * allow zero-copy mechanism.
3491 +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
3493 + u32 tx_count = priv->plat->tx_queues_to_use;
3494 + int ret = -ENOMEM;
3497 + /* TX queues buffers and DMA */
3498 + for (queue = 0; queue < tx_count; queue++) {
3499 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3501 + tx_q->queue_index = queue;
3502 + tx_q->priv_data = priv;
3504 + tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
3505 + sizeof(*tx_q->tx_skbuff_dma),
3507 + if (!tx_q->tx_skbuff_dma)
3510 + tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
3511 + sizeof(struct sk_buff *),
3513 + if (!tx_q->tx_skbuff)
3514 + goto err_dma_buffers;
3516 + if (priv->extend_desc) {
3517 + tx_q->dma_etx = dma_zalloc_coherent(priv->device,
3520 + dma_extended_desc),
3521 + &tx_q->dma_tx_phy,
3523 + if (!tx_q->dma_etx)
3524 + goto err_dma_buffers;
3526 + tx_q->dma_tx = dma_zalloc_coherent(priv->device,
3530 + &tx_q->dma_tx_phy,
3532 + if (!tx_q->dma_tx)
3533 + goto err_dma_buffers;
3540 + free_dma_tx_desc_resources(priv);
3546 + * alloc_dma_desc_resources - alloc TX/RX resources.
3547 + * @priv: private structure
3548 + * Description: according to which descriptor can be used (extend or basic)
3549 + * this function allocates the resources for TX and RX paths. In case of
3550 + * reception, for example, it pre-allocated the RX socket buffer in order to
3551 + * allow zero-copy mechanism.
3553 +static int alloc_dma_desc_resources(struct stmmac_priv *priv)
3555 + /* RX Allocation */
3556 + int ret = alloc_dma_rx_desc_resources(priv);
3561 + ret = alloc_dma_tx_desc_resources(priv);
3567 + * free_dma_desc_resources - free dma desc resources
3568 + * @priv: private structure
3570 +static void free_dma_desc_resources(struct stmmac_priv *priv)
3572 + /* Release the DMA RX socket buffers */
3573 + free_dma_rx_desc_resources(priv);
3575 + /* Release the DMA TX socket buffers */
3576 + free_dma_tx_desc_resources(priv);
3580 + * stmmac_mac_enable_rx_queues - Enable MAC rx queues
3581 + * @priv: driver private structure
3582 + * Description: It is used for enabling the rx queues in the MAC
3584 +static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
3586 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
3590 - if (priv->tx_skbuff[i]) {
3591 - dev_kfree_skb_any(priv->tx_skbuff[i]);
3592 - priv->tx_skbuff[i] = NULL;
3593 - priv->tx_skbuff_dma[i].buf = 0;
3594 - priv->tx_skbuff_dma[i].map_as_page = false;
3596 + for (queue = 0; queue < rx_queues_count; queue++) {
3597 + mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
3598 + priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
3603 - * alloc_dma_desc_resources - alloc TX/RX resources.
3604 - * @priv: private structure
3605 - * Description: according to which descriptor can be used (extend or basic)
3606 - * this function allocates the resources for TX and RX paths. In case of
3607 - * reception, for example, it pre-allocated the RX socket buffer in order to
3608 - * allow zero-copy mechanism.
3609 + * stmmac_start_rx_dma - start RX DMA channel
3610 + * @priv: driver private structure
3611 + * @chan: RX channel index
3613 + * This starts a RX DMA channel
3615 -static int alloc_dma_desc_resources(struct stmmac_priv *priv)
3616 +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
3618 - int ret = -ENOMEM;
3620 - priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
3622 - if (!priv->rx_skbuff_dma)
3625 - priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
3627 - if (!priv->rx_skbuff)
3628 - goto err_rx_skbuff;
3630 - priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
3631 - sizeof(*priv->tx_skbuff_dma),
3633 - if (!priv->tx_skbuff_dma)
3634 - goto err_tx_skbuff_dma;
3636 - priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
3638 - if (!priv->tx_skbuff)
3639 - goto err_tx_skbuff;
3641 - if (priv->extend_desc) {
3642 - priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
3644 - dma_extended_desc),
3645 - &priv->dma_rx_phy,
3647 - if (!priv->dma_erx)
3650 - priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
3652 - dma_extended_desc),
3653 - &priv->dma_tx_phy,
3655 - if (!priv->dma_etx) {
3656 - dma_free_coherent(priv->device, DMA_RX_SIZE *
3657 - sizeof(struct dma_extended_desc),
3658 - priv->dma_erx, priv->dma_rx_phy);
3662 - priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
3663 - sizeof(struct dma_desc),
3664 - &priv->dma_rx_phy,
3666 - if (!priv->dma_rx)
3668 + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
3669 + priv->hw->dma->start_rx(priv->ioaddr, chan);
3672 - priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
3673 - sizeof(struct dma_desc),
3674 - &priv->dma_tx_phy,
3676 - if (!priv->dma_tx) {
3677 - dma_free_coherent(priv->device, DMA_RX_SIZE *
3678 - sizeof(struct dma_desc),
3679 - priv->dma_rx, priv->dma_rx_phy);
3684 + * stmmac_start_tx_dma - start TX DMA channel
3685 + * @priv: driver private structure
3686 + * @chan: TX channel index
3688 + * This starts a TX DMA channel
3690 +static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
3692 + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
3693 + priv->hw->dma->start_tx(priv->ioaddr, chan);
3698 + * stmmac_stop_rx_dma - stop RX DMA channel
3699 + * @priv: driver private structure
3700 + * @chan: RX channel index
3702 + * This stops a RX DMA channel
3704 +static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
3706 + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
3707 + priv->hw->dma->stop_rx(priv->ioaddr, chan);
3711 - kfree(priv->tx_skbuff);
3713 - kfree(priv->tx_skbuff_dma);
3715 - kfree(priv->rx_skbuff);
3717 - kfree(priv->rx_skbuff_dma);
3720 + * stmmac_stop_tx_dma - stop TX DMA channel
3721 + * @priv: driver private structure
3722 + * @chan: TX channel index
3724 + * This stops a TX DMA channel
3726 +static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
3728 + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
3729 + priv->hw->dma->stop_tx(priv->ioaddr, chan);
3732 -static void free_dma_desc_resources(struct stmmac_priv *priv)
3734 + * stmmac_start_all_dma - start all RX and TX DMA channels
3735 + * @priv: driver private structure
3737 + * This starts all the RX and TX DMA channels
3739 +static void stmmac_start_all_dma(struct stmmac_priv *priv)
3741 - /* Release the DMA TX/RX socket buffers */
3742 - dma_free_rx_skbufs(priv);
3743 - dma_free_tx_skbufs(priv);
3745 - /* Free DMA regions of consistent memory previously allocated */
3746 - if (!priv->extend_desc) {
3747 - dma_free_coherent(priv->device,
3748 - DMA_TX_SIZE * sizeof(struct dma_desc),
3749 - priv->dma_tx, priv->dma_tx_phy);
3750 - dma_free_coherent(priv->device,
3751 - DMA_RX_SIZE * sizeof(struct dma_desc),
3752 - priv->dma_rx, priv->dma_rx_phy);
3754 - dma_free_coherent(priv->device, DMA_TX_SIZE *
3755 - sizeof(struct dma_extended_desc),
3756 - priv->dma_etx, priv->dma_tx_phy);
3757 - dma_free_coherent(priv->device, DMA_RX_SIZE *
3758 - sizeof(struct dma_extended_desc),
3759 - priv->dma_erx, priv->dma_rx_phy);
3761 - kfree(priv->rx_skbuff_dma);
3762 - kfree(priv->rx_skbuff);
3763 - kfree(priv->tx_skbuff_dma);
3764 - kfree(priv->tx_skbuff);
3765 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
3766 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
3769 + for (chan = 0; chan < rx_channels_count; chan++)
3770 + stmmac_start_rx_dma(priv, chan);
3772 + for (chan = 0; chan < tx_channels_count; chan++)
3773 + stmmac_start_tx_dma(priv, chan);
3777 - * stmmac_mac_enable_rx_queues - Enable MAC rx queues
3778 - * @priv: driver private structure
3779 - * Description: It is used for enabling the rx queues in the MAC
3780 + * stmmac_stop_all_dma - stop all RX and TX DMA channels
3781 + * @priv: driver private structure
3783 + * This stops the RX and TX DMA channels
3785 -static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
3786 +static void stmmac_stop_all_dma(struct stmmac_priv *priv)
3788 - int rx_count = priv->dma_cap.number_rx_queues;
3790 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
3791 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
3794 - /* If GMAC does not have multiple queues, then this is not necessary*/
3795 - if (rx_count == 1)
3797 + for (chan = 0; chan < rx_channels_count; chan++)
3798 + stmmac_stop_rx_dma(priv, chan);
3801 - * If the core is synthesized with multiple rx queues / multiple
3802 - * dma channels, then rx queues will be disabled by default.
3803 - * For now only rx queue 0 is enabled.
3805 - priv->hw->mac->rx_queue_enable(priv->hw, queue);
3806 + for (chan = 0; chan < tx_channels_count; chan++)
3807 + stmmac_stop_tx_dma(priv, chan);
3811 @@ -1294,11 +1754,20 @@ static void stmmac_mac_enable_rx_queues(
3813 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
3815 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
3816 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
3817 int rxfifosz = priv->plat->rx_fifo_size;
3819 - if (priv->plat->force_thresh_dma_mode)
3820 - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
3821 - else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
3826 + if (rxfifosz == 0)
3827 + rxfifosz = priv->dma_cap.rx_fifo_size;
3829 + if (priv->plat->force_thresh_dma_mode) {
3832 + } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
3834 * In case of GMAC, SF mode can be enabled
3835 * to perform the TX COE in HW. This depends on:
3836 @@ -1306,37 +1775,53 @@ static void stmmac_dma_operation_mode(st
3837 * 2) There is no bugged Jumbo frame support
3838 * that needs to not insert csum in the TDES.
3840 - priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
3842 + txmode = SF_DMA_MODE;
3843 + rxmode = SF_DMA_MODE;
3844 priv->xstats.threshold = SF_DMA_MODE;
3846 - priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
3849 + rxmode = SF_DMA_MODE;
3852 + /* configure all channels */
3853 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3854 + for (chan = 0; chan < rx_channels_count; chan++)
3855 + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
3858 + for (chan = 0; chan < tx_channels_count; chan++)
3859 + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
3861 + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
3867 * stmmac_tx_clean - to manage the transmission completion
3868 * @priv: driver private structure
3869 + * @queue: TX queue index
3870 * Description: it reclaims the transmit resources after transmission completes.
3872 -static void stmmac_tx_clean(struct stmmac_priv *priv)
3873 +static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
3875 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3876 unsigned int bytes_compl = 0, pkts_compl = 0;
3877 - unsigned int entry = priv->dirty_tx;
3878 + unsigned int entry = tx_q->dirty_tx;
3880 netif_tx_lock(priv->dev);
3882 priv->xstats.tx_clean++;
3884 - while (entry != priv->cur_tx) {
3885 - struct sk_buff *skb = priv->tx_skbuff[entry];
3886 + while (entry != tx_q->cur_tx) {
3887 + struct sk_buff *skb = tx_q->tx_skbuff[entry];
3891 if (priv->extend_desc)
3892 - p = (struct dma_desc *)(priv->dma_etx + entry);
3893 + p = (struct dma_desc *)(tx_q->dma_etx + entry);
3895 - p = priv->dma_tx + entry;
3896 + p = tx_q->dma_tx + entry;
3898 status = priv->hw->desc->tx_status(&priv->dev->stats,
3900 @@ -1362,48 +1847,51 @@ static void stmmac_tx_clean(struct stmma
3901 stmmac_get_tx_hwtstamp(priv, p, skb);
3904 - if (likely(priv->tx_skbuff_dma[entry].buf)) {
3905 - if (priv->tx_skbuff_dma[entry].map_as_page)
3906 + if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
3907 + if (tx_q->tx_skbuff_dma[entry].map_as_page)
3908 dma_unmap_page(priv->device,
3909 - priv->tx_skbuff_dma[entry].buf,
3910 - priv->tx_skbuff_dma[entry].len,
3911 + tx_q->tx_skbuff_dma[entry].buf,
3912 + tx_q->tx_skbuff_dma[entry].len,
3915 dma_unmap_single(priv->device,
3916 - priv->tx_skbuff_dma[entry].buf,
3917 - priv->tx_skbuff_dma[entry].len,
3918 + tx_q->tx_skbuff_dma[entry].buf,
3919 + tx_q->tx_skbuff_dma[entry].len,
3921 - priv->tx_skbuff_dma[entry].buf = 0;
3922 - priv->tx_skbuff_dma[entry].len = 0;
3923 - priv->tx_skbuff_dma[entry].map_as_page = false;
3924 + tx_q->tx_skbuff_dma[entry].buf = 0;
3925 + tx_q->tx_skbuff_dma[entry].len = 0;
3926 + tx_q->tx_skbuff_dma[entry].map_as_page = false;
3929 if (priv->hw->mode->clean_desc3)
3930 - priv->hw->mode->clean_desc3(priv, p);
3931 + priv->hw->mode->clean_desc3(tx_q, p);
3933 - priv->tx_skbuff_dma[entry].last_segment = false;
3934 - priv->tx_skbuff_dma[entry].is_jumbo = false;
3935 + tx_q->tx_skbuff_dma[entry].last_segment = false;
3936 + tx_q->tx_skbuff_dma[entry].is_jumbo = false;
3938 if (likely(skb != NULL)) {
3940 bytes_compl += skb->len;
3941 dev_consume_skb_any(skb);
3942 - priv->tx_skbuff[entry] = NULL;
3943 + tx_q->tx_skbuff[entry] = NULL;
3946 priv->hw->desc->release_tx_desc(p, priv->mode);
3948 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3950 - priv->dirty_tx = entry;
3951 + tx_q->dirty_tx = entry;
3953 + netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
3954 + pkts_compl, bytes_compl);
3956 - netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
3957 + if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
3959 + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
3961 - if (unlikely(netif_queue_stopped(priv->dev) &&
3962 - stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
3963 netif_dbg(priv, tx_done, priv->dev,
3964 "%s: restart transmit\n", __func__);
3965 - netif_wake_queue(priv->dev);
3966 + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
3969 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
3970 @@ -1413,45 +1901,76 @@ static void stmmac_tx_clean(struct stmma
3971 netif_tx_unlock(priv->dev);
3974 -static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
3975 +static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
3977 - priv->hw->dma->enable_dma_irq(priv->ioaddr);
3978 + priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
3981 -static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
3982 +static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
3984 - priv->hw->dma->disable_dma_irq(priv->ioaddr);
3985 + priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
3989 * stmmac_tx_err - to manage the tx error
3990 * @priv: driver private structure
3991 + * @chan: channel index
3992 * Description: it cleans the descriptors and restarts the transmission
3993 * in case of transmission errors.
3995 -static void stmmac_tx_err(struct stmmac_priv *priv)
3996 +static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
3998 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
4000 - netif_stop_queue(priv->dev);
4002 - priv->hw->dma->stop_tx(priv->ioaddr);
4003 - dma_free_tx_skbufs(priv);
4004 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
4006 + stmmac_stop_tx_dma(priv, chan);
4007 + dma_free_tx_skbufs(priv, chan);
4008 for (i = 0; i < DMA_TX_SIZE; i++)
4009 if (priv->extend_desc)
4010 - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
4011 + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
4013 (i == DMA_TX_SIZE - 1));
4015 - priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
4016 + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
4018 (i == DMA_TX_SIZE - 1));
4019 - priv->dirty_tx = 0;
4021 - netdev_reset_queue(priv->dev);
4022 - priv->hw->dma->start_tx(priv->ioaddr);
4023 + tx_q->dirty_tx = 0;
4025 + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
4026 + stmmac_start_tx_dma(priv, chan);
4028 priv->dev->stats.tx_errors++;
4029 - netif_wake_queue(priv->dev);
4030 + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
4034 + * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
4035 + * @priv: driver private structure
4036 + * @txmode: TX operating mode
4037 + * @rxmode: RX operating mode
4038 + * @chan: channel index
4039 + * Description: it is used for configuring of the DMA operation mode in
4040 + * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
4043 +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
4044 + u32 rxmode, u32 chan)
4046 + int rxfifosz = priv->plat->rx_fifo_size;
4048 + if (rxfifosz == 0)
4049 + rxfifosz = priv->dma_cap.rx_fifo_size;
4051 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4052 + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
4054 + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
4056 + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
4062 @@ -1463,31 +1982,43 @@ static void stmmac_tx_err(struct stmmac_
4064 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
4066 + u32 tx_channel_count = priv->plat->tx_queues_to_use;
4068 - int rxfifosz = priv->plat->rx_fifo_size;
4071 + for (chan = 0; chan < tx_channel_count; chan++) {
4072 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
4074 - status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
4075 - if (likely((status & handle_rx)) || (status & handle_tx)) {
4076 - if (likely(napi_schedule_prep(&priv->napi))) {
4077 - stmmac_disable_dma_irq(priv);
4078 - __napi_schedule(&priv->napi);
4079 + status = priv->hw->dma->dma_interrupt(priv->ioaddr,
4080 + &priv->xstats, chan);
4081 + if (likely((status & handle_rx)) || (status & handle_tx)) {
4082 + if (likely(napi_schedule_prep(&rx_q->napi))) {
4083 + stmmac_disable_dma_irq(priv, chan);
4084 + __napi_schedule(&rx_q->napi);
4088 - if (unlikely(status & tx_hard_error_bump_tc)) {
4089 - /* Try to bump up the dma threshold on this failure */
4090 - if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
4093 - if (priv->plat->force_thresh_dma_mode)
4094 - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
4097 - priv->hw->dma->dma_mode(priv->ioaddr, tc,
4098 - SF_DMA_MODE, rxfifosz);
4099 - priv->xstats.threshold = tc;
4101 + if (unlikely(status & tx_hard_error_bump_tc)) {
4102 + /* Try to bump up the dma threshold on this failure */
4103 + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
4106 + if (priv->plat->force_thresh_dma_mode)
4107 + stmmac_set_dma_operation_mode(priv,
4112 + stmmac_set_dma_operation_mode(priv,
4116 + priv->xstats.threshold = tc;
4118 + } else if (unlikely(status == tx_hard_error)) {
4119 + stmmac_tx_err(priv, chan);
4121 - } else if (unlikely(status == tx_hard_error))
4122 - stmmac_tx_err(priv);
4127 @@ -1594,6 +2125,13 @@ static void stmmac_check_ether_addr(stru
4129 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
4131 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
4132 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
4133 + struct stmmac_rx_queue *rx_q;
4134 + struct stmmac_tx_queue *tx_q;
4135 + u32 dummy_dma_rx_phy = 0;
4136 + u32 dummy_dma_tx_phy = 0;
4141 @@ -1611,19 +2149,49 @@ static int stmmac_init_dma_engine(struct
4145 - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
4146 - priv->dma_tx_phy, priv->dma_rx_phy, atds);
4148 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4149 - priv->rx_tail_addr = priv->dma_rx_phy +
4150 - (DMA_RX_SIZE * sizeof(struct dma_desc));
4151 - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
4154 - priv->tx_tail_addr = priv->dma_tx_phy +
4155 - (DMA_TX_SIZE * sizeof(struct dma_desc));
4156 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
4158 + /* DMA Configuration */
4159 + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
4160 + dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
4162 + /* DMA RX Channel Configuration */
4163 + for (chan = 0; chan < rx_channels_count; chan++) {
4164 + rx_q = &priv->rx_queue[chan];
4166 + priv->hw->dma->init_rx_chan(priv->ioaddr,
4167 + priv->plat->dma_cfg,
4168 + rx_q->dma_rx_phy, chan);
4170 + rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4171 + (DMA_RX_SIZE * sizeof(struct dma_desc));
4172 + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
4173 + rx_q->rx_tail_addr,
4177 + /* DMA TX Channel Configuration */
4178 + for (chan = 0; chan < tx_channels_count; chan++) {
4179 + tx_q = &priv->tx_queue[chan];
4181 + priv->hw->dma->init_chan(priv->ioaddr,
4182 + priv->plat->dma_cfg,
4185 + priv->hw->dma->init_tx_chan(priv->ioaddr,
4186 + priv->plat->dma_cfg,
4187 + tx_q->dma_tx_phy, chan);
4189 + tx_q->tx_tail_addr = tx_q->dma_tx_phy +
4190 + (DMA_TX_SIZE * sizeof(struct dma_desc));
4191 + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
4192 + tx_q->tx_tail_addr,
4196 + rx_q = &priv->rx_queue[chan];
4197 + tx_q = &priv->tx_queue[chan];
4198 + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
4199 + tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
4202 if (priv->plat->axi && priv->hw->dma->axi)
4203 @@ -1641,8 +2209,12 @@ static int stmmac_init_dma_engine(struct
4204 static void stmmac_tx_timer(unsigned long data)
4206 struct stmmac_priv *priv = (struct stmmac_priv *)data;
4207 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4210 - stmmac_tx_clean(priv);
4211 + /* let's scan all the tx queues */
4212 + for (queue = 0; queue < tx_queues_count; queue++)
4213 + stmmac_tx_clean(priv, queue);
4217 @@ -1664,6 +2236,196 @@ static void stmmac_init_tx_coalesce(stru
4218 add_timer(&priv->txtimer);
4221 +static void stmmac_set_rings_length(struct stmmac_priv *priv)
4223 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
4224 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
4227 + /* set TX ring length */
4228 + if (priv->hw->dma->set_tx_ring_len) {
4229 + for (chan = 0; chan < tx_channels_count; chan++)
4230 + priv->hw->dma->set_tx_ring_len(priv->ioaddr,
4231 + (DMA_TX_SIZE - 1), chan);
4234 + /* set RX ring length */
4235 + if (priv->hw->dma->set_rx_ring_len) {
4236 + for (chan = 0; chan < rx_channels_count; chan++)
4237 + priv->hw->dma->set_rx_ring_len(priv->ioaddr,
4238 + (DMA_RX_SIZE - 1), chan);
4243 + * stmmac_set_tx_queue_weight - Set TX queue weight
4244 + * @priv: driver private structure
4245 + * Description: It is used for setting TX queues weight
4247 +static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
4249 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4253 + for (queue = 0; queue < tx_queues_count; queue++) {
4254 + weight = priv->plat->tx_queues_cfg[queue].weight;
4255 + priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
4260 + * stmmac_configure_cbs - Configure CBS in TX queue
4261 + * @priv: driver private structure
4262 + * Description: It is used for configuring CBS in AVB TX queues
4264 +static void stmmac_configure_cbs(struct stmmac_priv *priv)
4266 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4270 + /* queue 0 is reserved for legacy traffic */
4271 + for (queue = 1; queue < tx_queues_count; queue++) {
4272 + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
4273 + if (mode_to_use == MTL_QUEUE_DCB)
4276 + priv->hw->mac->config_cbs(priv->hw,
4277 + priv->plat->tx_queues_cfg[queue].send_slope,
4278 + priv->plat->tx_queues_cfg[queue].idle_slope,
4279 + priv->plat->tx_queues_cfg[queue].high_credit,
4280 + priv->plat->tx_queues_cfg[queue].low_credit,
4286 + * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
4287 + * @priv: driver private structure
4288 + * Description: It is used for mapping RX queues to RX dma channels
4290 +static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
4292 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4296 + for (queue = 0; queue < rx_queues_count; queue++) {
4297 + chan = priv->plat->rx_queues_cfg[queue].chan;
4298 + priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
4303 + * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
4304 + * @priv: driver private structure
4305 + * Description: It is used for configuring the RX Queue Priority
4307 +static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
4309 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4313 + for (queue = 0; queue < rx_queues_count; queue++) {
4314 + if (!priv->plat->rx_queues_cfg[queue].use_prio)
4317 + prio = priv->plat->rx_queues_cfg[queue].prio;
4318 + priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
4323 + * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
4324 + * @priv: driver private structure
4325 + * Description: It is used for configuring the TX Queue Priority
4327 +static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
4329 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4333 + for (queue = 0; queue < tx_queues_count; queue++) {
4334 + if (!priv->plat->tx_queues_cfg[queue].use_prio)
4337 + prio = priv->plat->tx_queues_cfg[queue].prio;
4338 + priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
4343 + * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
4344 + * @priv: driver private structure
4345 + * Description: It is used for configuring the RX queue routing
4347 +static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
4349 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4353 + for (queue = 0; queue < rx_queues_count; queue++) {
4354 + /* no specific packet type routing specified for the queue */
4355 + if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
4358 + packet = priv->plat->rx_queues_cfg[queue].pkt_route;
4359 + priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
4364 + * stmmac_mtl_configuration - Configure MTL
4365 + * @priv: driver private structure
4366 + * Description: It is used for configurring MTL
4368 +static void stmmac_mtl_configuration(struct stmmac_priv *priv)
4370 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4371 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4373 + if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
4374 + stmmac_set_tx_queue_weight(priv);
4376 + /* Configure MTL RX algorithms */
4377 + if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
4378 + priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
4379 + priv->plat->rx_sched_algorithm);
4381 + /* Configure MTL TX algorithms */
4382 + if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
4383 + priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
4384 + priv->plat->tx_sched_algorithm);
4386 + /* Configure CBS in AVB TX queues */
4387 + if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
4388 + stmmac_configure_cbs(priv);
4390 + /* Map RX MTL to DMA channels */
4391 + if (priv->hw->mac->map_mtl_to_dma)
4392 + stmmac_rx_queue_dma_chan_map(priv);
4394 + /* Enable MAC RX Queues */
4395 + if (priv->hw->mac->rx_queue_enable)
4396 + stmmac_mac_enable_rx_queues(priv);
4398 + /* Set RX priorities */
4399 + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
4400 + stmmac_mac_config_rx_queues_prio(priv);
4402 + /* Set TX priorities */
4403 + if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
4404 + stmmac_mac_config_tx_queues_prio(priv);
4406 + /* Set RX routing */
4407 + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
4408 + stmmac_mac_config_rx_queues_routing(priv);
4412 * stmmac_hw_setup - setup mac in a usable state.
4413 * @dev : pointer to the device structure.
4414 @@ -1679,6 +2441,9 @@ static void stmmac_init_tx_coalesce(stru
4415 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
4417 struct stmmac_priv *priv = netdev_priv(dev);
4418 + u32 rx_cnt = priv->plat->rx_queues_to_use;
4419 + u32 tx_cnt = priv->plat->tx_queues_to_use;
4423 /* DMA initialization and SW reset */
4424 @@ -1708,9 +2473,9 @@ static int stmmac_hw_setup(struct net_de
4425 /* Initialize the MAC Core */
4426 priv->hw->mac->core_init(priv->hw, dev->mtu);
4428 - /* Initialize MAC RX Queues */
4429 - if (priv->hw->mac->rx_queue_enable)
4430 - stmmac_mac_enable_rx_queues(priv);
4431 + /* Initialize MTL*/
4432 + if (priv->synopsys_id >= DWMAC_CORE_4_00)
4433 + stmmac_mtl_configuration(priv);
4435 ret = priv->hw->mac->rx_ipc(priv->hw);
4437 @@ -1720,10 +2485,7 @@ static int stmmac_hw_setup(struct net_de
4440 /* Enable the MAC Rx/Tx */
4441 - if (priv->synopsys_id >= DWMAC_CORE_4_00)
4442 - stmmac_dwmac4_set_mac(priv->ioaddr, true);
4444 - stmmac_set_mac(priv->ioaddr, true);
4445 + priv->hw->mac->set_mac(priv->ioaddr, true);
4447 /* Set the HW DMA mode and the COE */
4448 stmmac_dma_operation_mode(priv);
4449 @@ -1731,6 +2493,10 @@ static int stmmac_hw_setup(struct net_de
4450 stmmac_mmc_setup(priv);
4453 + ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
4455 + netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
4457 ret = stmmac_init_ptp(priv);
4458 if (ret == -EOPNOTSUPP)
4459 netdev_warn(priv->dev, "PTP not supported by HW\n");
4460 @@ -1745,35 +2511,37 @@ static int stmmac_hw_setup(struct net_de
4463 /* Start the ball rolling... */
4464 - netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
4465 - priv->hw->dma->start_tx(priv->ioaddr);
4466 - priv->hw->dma->start_rx(priv->ioaddr);
4467 + stmmac_start_all_dma(priv);
4469 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
4471 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
4472 priv->rx_riwt = MAX_DMA_RIWT;
4473 - priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
4474 + priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
4477 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
4478 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
4480 - /* set TX ring length */
4481 - if (priv->hw->dma->set_tx_ring_len)
4482 - priv->hw->dma->set_tx_ring_len(priv->ioaddr,
4483 - (DMA_TX_SIZE - 1));
4484 - /* set RX ring length */
4485 - if (priv->hw->dma->set_rx_ring_len)
4486 - priv->hw->dma->set_rx_ring_len(priv->ioaddr,
4487 - (DMA_RX_SIZE - 1));
4488 + /* set TX and RX rings length */
4489 + stmmac_set_rings_length(priv);
4493 - priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
4495 + for (chan = 0; chan < tx_cnt; chan++)
4496 + priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
4502 +static void stmmac_hw_teardown(struct net_device *dev)
4504 + struct stmmac_priv *priv = netdev_priv(dev);
4506 + clk_disable_unprepare(priv->plat->clk_ptp_ref);
4510 * stmmac_open - open entry point of the driver
4511 * @dev : pointer to the device structure.
4512 @@ -1842,7 +2610,7 @@ static int stmmac_open(struct net_device
4513 netdev_err(priv->dev,
4514 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
4515 __func__, dev->irq, ret);
4520 /* Request the Wake IRQ in case of another line is used for WoL */
4521 @@ -1869,8 +2637,8 @@ static int stmmac_open(struct net_device
4525 - napi_enable(&priv->napi);
4526 - netif_start_queue(dev);
4527 + stmmac_enable_all_queues(priv);
4528 + stmmac_start_all_queues(priv);
4532 @@ -1879,7 +2647,12 @@ lpiirq_error:
4533 free_irq(priv->wol_irq, dev);
4535 free_irq(dev->irq, dev);
4538 + phy_stop(dev->phydev);
4540 + del_timer_sync(&priv->txtimer);
4541 + stmmac_hw_teardown(dev);
4543 free_dma_desc_resources(priv);
4545 @@ -1908,9 +2681,9 @@ static int stmmac_release(struct net_dev
4546 phy_disconnect(dev->phydev);
4549 - netif_stop_queue(dev);
4550 + stmmac_stop_all_queues(priv);
4552 - napi_disable(&priv->napi);
4553 + stmmac_disable_all_queues(priv);
4555 del_timer_sync(&priv->txtimer);
4557 @@ -1922,14 +2695,13 @@ static int stmmac_release(struct net_dev
4558 free_irq(priv->lpi_irq, dev);
4560 /* Stop TX/RX DMA and clear the descriptors */
4561 - priv->hw->dma->stop_tx(priv->ioaddr);
4562 - priv->hw->dma->stop_rx(priv->ioaddr);
4563 + stmmac_stop_all_dma(priv);
4565 /* Release and free the Rx/Tx resources */
4566 free_dma_desc_resources(priv);
4568 /* Disable the MAC Rx/Tx */
4569 - stmmac_set_mac(priv->ioaddr, false);
4570 + priv->hw->mac->set_mac(priv->ioaddr, false);
4572 netif_carrier_off(dev);
4574 @@ -1948,22 +2720,24 @@ static int stmmac_release(struct net_dev
4575 * @des: buffer start address
4576 * @total_len: total length to fill in descriptors
4577 * @last_segmant: condition for the last descriptor
4578 + * @queue: TX queue index
4580 * This function fills descriptor and request new descriptors according to
4581 * buffer length to fill
4583 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
4584 - int total_len, bool last_segment)
4585 + int total_len, bool last_segment, u32 queue)
4587 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4588 struct dma_desc *desc;
4593 tmp_len = total_len;
4595 while (tmp_len > 0) {
4596 - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
4597 - desc = priv->dma_tx + priv->cur_tx;
4598 + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
4599 + desc = tx_q->dma_tx + tx_q->cur_tx;
4601 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
4602 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4603 @@ -1971,7 +2745,7 @@ static void stmmac_tso_allocator(struct
4605 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
4607 - (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
4608 + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4611 tmp_len -= TSO_MAX_BUFF_SIZE;
4612 @@ -2007,23 +2781,28 @@ static void stmmac_tso_allocator(struct
4614 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4617 - int tmp_pay_len = 0;
4618 + struct dma_desc *desc, *first, *mss_desc = NULL;
4619 struct stmmac_priv *priv = netdev_priv(dev);
4620 int nfrags = skb_shinfo(skb)->nr_frags;
4621 + u32 queue = skb_get_queue_mapping(skb);
4622 unsigned int first_entry, des;
4623 - struct dma_desc *desc, *first, *mss_desc = NULL;
4624 + struct stmmac_tx_queue *tx_q;
4625 + int tmp_pay_len = 0;
4630 + tx_q = &priv->tx_queue[queue];
4632 /* Compute header lengths */
4633 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4635 /* Desc availability based on threshold should be enough safe */
4636 - if (unlikely(stmmac_tx_avail(priv) <
4637 + if (unlikely(stmmac_tx_avail(priv, queue) <
4638 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4639 - if (!netif_queue_stopped(dev)) {
4640 - netif_stop_queue(dev);
4641 + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4642 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4644 /* This is a hard error, log it. */
4645 netdev_err(priv->dev,
4646 "%s: Tx Ring full when queue awake\n",
4647 @@ -2038,10 +2817,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
4649 /* set new MSS value if needed */
4650 if (mss != priv->mss) {
4651 - mss_desc = priv->dma_tx + priv->cur_tx;
4652 + mss_desc = tx_q->dma_tx + tx_q->cur_tx;
4653 priv->hw->desc->set_mss(mss_desc, mss);
4655 - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
4656 + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
4659 if (netif_msg_tx_queued(priv)) {
4660 @@ -2051,9 +2830,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
4664 - first_entry = priv->cur_tx;
4665 + first_entry = tx_q->cur_tx;
4667 - desc = priv->dma_tx + first_entry;
4668 + desc = tx_q->dma_tx + first_entry;
4671 /* first descriptor: fill Headers on Buf1 */
4672 @@ -2062,9 +2841,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
4673 if (dma_mapping_error(priv->device, des))
4676 - priv->tx_skbuff_dma[first_entry].buf = des;
4677 - priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4678 - priv->tx_skbuff[first_entry] = skb;
4679 + tx_q->tx_skbuff_dma[first_entry].buf = des;
4680 + tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4682 first->des0 = cpu_to_le32(des);
4684 @@ -2075,7 +2853,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
4685 /* If needed take extra descriptors to fill the remaining payload */
4686 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4688 - stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
4689 + stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4691 /* Prepare fragments */
4692 for (i = 0; i < nfrags; i++) {
4693 @@ -2084,24 +2862,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
4694 des = skb_frag_dma_map(priv->device, frag, 0,
4695 skb_frag_size(frag),
4697 + if (dma_mapping_error(priv->device, des))
4700 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4701 - (i == nfrags - 1));
4702 + (i == nfrags - 1), queue);
4704 - priv->tx_skbuff_dma[priv->cur_tx].buf = des;
4705 - priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
4706 - priv->tx_skbuff[priv->cur_tx] = NULL;
4707 - priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
4708 + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4709 + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4710 + tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
4711 + tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4714 - priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
4715 + tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4717 + /* Only the last descriptor gets to point to the skb. */
4718 + tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4720 - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
4721 + /* We've used all descriptors we need for this skb, however,
4722 + * advance cur_tx so that it references a fresh descriptor.
4723 + * ndo_start_xmit will fill this descriptor the next time it's
4724 + * called and stmmac_tx_clean may clean up to this descriptor.
4726 + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
4728 - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
4729 + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4730 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4732 - netif_stop_queue(dev);
4733 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4736 dev->stats.tx_bytes += skb->len;
4737 @@ -2133,7 +2921,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
4738 priv->hw->desc->prepare_tso_tx_desc(first, 1,
4741 - 1, priv->tx_skbuff_dma[first_entry].last_segment,
4742 + 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4743 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
4745 /* If context desc is used to change MSS */
4746 @@ -2155,20 +2943,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
4748 if (netif_msg_pktdata(priv)) {
4749 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4750 - __func__, priv->cur_tx, priv->dirty_tx, first_entry,
4751 - priv->cur_tx, first, nfrags);
4752 + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4753 + tx_q->cur_tx, first, nfrags);
4755 - priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
4756 + priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
4759 pr_info(">>> frame to be transmitted: ");
4760 print_pkt(skb->data, skb_headlen(skb));
4763 - netdev_sent_queue(dev, skb->len);
4764 + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4766 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
4768 + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
4771 return NETDEV_TX_OK;
4773 @@ -2192,21 +2980,27 @@ static netdev_tx_t stmmac_xmit(struct sk
4774 struct stmmac_priv *priv = netdev_priv(dev);
4775 unsigned int nopaged_len = skb_headlen(skb);
4776 int i, csum_insertion = 0, is_jumbo = 0;
4777 + u32 queue = skb_get_queue_mapping(skb);
4778 int nfrags = skb_shinfo(skb)->nr_frags;
4779 - unsigned int entry, first_entry;
4781 + unsigned int first_entry;
4782 struct dma_desc *desc, *first;
4783 + struct stmmac_tx_queue *tx_q;
4784 unsigned int enh_desc;
4787 + tx_q = &priv->tx_queue[queue];
4789 /* Manage oversized TCP frames for GMAC4 device */
4790 if (skb_is_gso(skb) && priv->tso) {
4791 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4792 return stmmac_tso_xmit(skb, dev);
4795 - if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
4796 - if (!netif_queue_stopped(dev)) {
4797 - netif_stop_queue(dev);
4798 + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4799 + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4800 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4802 /* This is a hard error, log it. */
4803 netdev_err(priv->dev,
4804 "%s: Tx Ring full when queue awake\n",
4805 @@ -2218,20 +3012,18 @@ static netdev_tx_t stmmac_xmit(struct sk
4806 if (priv->tx_path_in_lpi_mode)
4807 stmmac_disable_eee_mode(priv);
4809 - entry = priv->cur_tx;
4810 + entry = tx_q->cur_tx;
4811 first_entry = entry;
4813 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4815 if (likely(priv->extend_desc))
4816 - desc = (struct dma_desc *)(priv->dma_etx + entry);
4817 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4819 - desc = priv->dma_tx + entry;
4820 + desc = tx_q->dma_tx + entry;
4824 - priv->tx_skbuff[first_entry] = skb;
4826 enh_desc = priv->plat->enh_desc;
4827 /* To program the descriptors according to the size of the frame */
4829 @@ -2239,7 +3031,7 @@ static netdev_tx_t stmmac_xmit(struct sk
4831 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
4833 - entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
4834 + entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
4835 if (unlikely(entry < 0))
4838 @@ -2252,48 +3044,56 @@ static netdev_tx_t stmmac_xmit(struct sk
4839 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
4841 if (likely(priv->extend_desc))
4842 - desc = (struct dma_desc *)(priv->dma_etx + entry);
4843 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4845 - desc = priv->dma_tx + entry;
4846 + desc = tx_q->dma_tx + entry;
4848 des = skb_frag_dma_map(priv->device, frag, 0, len,
4850 if (dma_mapping_error(priv->device, des))
4851 goto dma_map_err; /* should reuse desc w/o issues */
4853 - priv->tx_skbuff[entry] = NULL;
4854 + tx_q->tx_skbuff[entry] = NULL;
4856 - priv->tx_skbuff_dma[entry].buf = des;
4857 + tx_q->tx_skbuff_dma[entry].buf = des;
4858 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
4859 desc->des0 = cpu_to_le32(des);
4861 desc->des2 = cpu_to_le32(des);
4863 - priv->tx_skbuff_dma[entry].map_as_page = true;
4864 - priv->tx_skbuff_dma[entry].len = len;
4865 - priv->tx_skbuff_dma[entry].last_segment = last_segment;
4866 + tx_q->tx_skbuff_dma[entry].map_as_page = true;
4867 + tx_q->tx_skbuff_dma[entry].len = len;
4868 + tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4870 /* Prepare the descriptor and set the own bit too */
4871 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
4872 - priv->mode, 1, last_segment);
4873 + priv->mode, 1, last_segment,
4877 - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
4878 + /* Only the last descriptor gets to point to the skb. */
4879 + tx_q->tx_skbuff[entry] = skb;
4881 - priv->cur_tx = entry;
4882 + /* We've used all descriptors we need for this skb, however,
4883 + * advance cur_tx so that it references a fresh descriptor.
4884 + * ndo_start_xmit will fill this descriptor the next time it's
4885 + * called and stmmac_tx_clean may clean up to this descriptor.
4887 + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
4888 + tx_q->cur_tx = entry;
4890 if (netif_msg_pktdata(priv)) {
4893 netdev_dbg(priv->dev,
4894 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4895 - __func__, priv->cur_tx, priv->dirty_tx, first_entry,
4896 + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4897 entry, first, nfrags);
4899 if (priv->extend_desc)
4900 - tx_head = (void *)priv->dma_etx;
4901 + tx_head = (void *)tx_q->dma_etx;
4903 - tx_head = (void *)priv->dma_tx;
4904 + tx_head = (void *)tx_q->dma_tx;
4906 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
4908 @@ -2301,10 +3101,10 @@ static netdev_tx_t stmmac_xmit(struct sk
4909 print_pkt(skb->data, skb->len);
4912 - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
4913 + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4914 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4916 - netif_stop_queue(dev);
4917 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4920 dev->stats.tx_bytes += skb->len;
4921 @@ -2339,14 +3139,14 @@ static netdev_tx_t stmmac_xmit(struct sk
4922 if (dma_mapping_error(priv->device, des))
4925 - priv->tx_skbuff_dma[first_entry].buf = des;
4926 + tx_q->tx_skbuff_dma[first_entry].buf = des;
4927 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
4928 first->des0 = cpu_to_le32(des);
4930 first->des2 = cpu_to_le32(des);
4932 - priv->tx_skbuff_dma[first_entry].len = nopaged_len;
4933 - priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
4934 + tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4935 + tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4937 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4938 priv->hwts_tx_en)) {
4939 @@ -2358,7 +3158,7 @@ static netdev_tx_t stmmac_xmit(struct sk
4940 /* Prepare the first descriptor setting the OWN bit too */
4941 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
4942 csum_insertion, priv->mode, 1,
4944 + last_segment, skb->len);
4946 /* The own bit must be the latest setting done when prepare the
4947 * descriptor and then barrier is needed to make sure that
4948 @@ -2367,13 +3167,13 @@ static netdev_tx_t stmmac_xmit(struct sk
4952 - netdev_sent_queue(dev, skb->len);
4953 + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4955 if (priv->synopsys_id < DWMAC_CORE_4_00)
4956 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
4958 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
4960 + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
4963 return NETDEV_TX_OK;
4965 @@ -2401,9 +3201,9 @@ static void stmmac_rx_vlan(struct net_de
4969 -static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
4970 +static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
4972 - if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
4973 + if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
4977 @@ -2412,30 +3212,33 @@ static inline int stmmac_rx_threshold_co
4979 * stmmac_rx_refill - refill used skb preallocated buffers
4980 * @priv: driver private structure
4981 + * @queue: RX queue index
4982 * Description : this is to reallocate the skb for the reception process
4983 * that is based on zero-copy.
4985 -static inline void stmmac_rx_refill(struct stmmac_priv *priv)
4986 +static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4988 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4989 + int dirty = stmmac_rx_dirty(priv, queue);
4990 + unsigned int entry = rx_q->dirty_rx;
4992 int bfsize = priv->dma_buf_sz;
4993 - unsigned int entry = priv->dirty_rx;
4994 - int dirty = stmmac_rx_dirty(priv);
4996 while (dirty-- > 0) {
4999 if (priv->extend_desc)
5000 - p = (struct dma_desc *)(priv->dma_erx + entry);
5001 + p = (struct dma_desc *)(rx_q->dma_erx + entry);
5003 - p = priv->dma_rx + entry;
5004 + p = rx_q->dma_rx + entry;
5006 - if (likely(priv->rx_skbuff[entry] == NULL)) {
5007 + if (likely(!rx_q->rx_skbuff[entry])) {
5008 struct sk_buff *skb;
5010 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
5011 if (unlikely(!skb)) {
5012 /* so for a while no zero-copy! */
5013 - priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
5014 + rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
5015 if (unlikely(net_ratelimit()))
5016 dev_err(priv->device,
5017 "fail to alloc skb entry %d\n",
5018 @@ -2443,28 +3246,28 @@ static inline void stmmac_rx_refill(stru
5022 - priv->rx_skbuff[entry] = skb;
5023 - priv->rx_skbuff_dma[entry] =
5024 + rx_q->rx_skbuff[entry] = skb;
5025 + rx_q->rx_skbuff_dma[entry] =
5026 dma_map_single(priv->device, skb->data, bfsize,
5028 if (dma_mapping_error(priv->device,
5029 - priv->rx_skbuff_dma[entry])) {
5030 + rx_q->rx_skbuff_dma[entry])) {
5031 netdev_err(priv->dev, "Rx DMA map failed\n");
5036 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
5037 - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
5038 + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
5041 - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
5042 + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
5044 if (priv->hw->mode->refill_desc3)
5045 - priv->hw->mode->refill_desc3(priv, p);
5046 + priv->hw->mode->refill_desc3(rx_q, p);
5048 - if (priv->rx_zeroc_thresh > 0)
5049 - priv->rx_zeroc_thresh--;
5050 + if (rx_q->rx_zeroc_thresh > 0)
5051 + rx_q->rx_zeroc_thresh--;
5053 netif_dbg(priv, rx_status, priv->dev,
5054 "refill entry #%d\n", entry);
5055 @@ -2480,31 +3283,33 @@ static inline void stmmac_rx_refill(stru
5057 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
5059 - priv->dirty_rx = entry;
5060 + rx_q->dirty_rx = entry;
5064 * stmmac_rx - manage the receive process
5065 * @priv: driver private structure
5066 - * @limit: napi bugget.
5067 + * @limit: napi bugget
5068 + * @queue: RX queue index.
5069 * Description : this the function called by the napi poll method.
5070 * It gets all the frames inside the ring.
5072 -static int stmmac_rx(struct stmmac_priv *priv, int limit)
5073 +static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5075 - unsigned int entry = priv->cur_rx;
5076 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5077 + unsigned int entry = rx_q->cur_rx;
5078 + int coe = priv->hw->rx_csum;
5079 unsigned int next_entry;
5080 unsigned int count = 0;
5081 - int coe = priv->hw->rx_csum;
5083 if (netif_msg_rx_status(priv)) {
5086 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5087 if (priv->extend_desc)
5088 - rx_head = (void *)priv->dma_erx;
5089 + rx_head = (void *)rx_q->dma_erx;
5091 - rx_head = (void *)priv->dma_rx;
5092 + rx_head = (void *)rx_q->dma_rx;
5094 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
5096 @@ -2514,9 +3319,9 @@ static int stmmac_rx(struct stmmac_priv
5097 struct dma_desc *np;
5099 if (priv->extend_desc)
5100 - p = (struct dma_desc *)(priv->dma_erx + entry);
5101 + p = (struct dma_desc *)(rx_q->dma_erx + entry);
5103 - p = priv->dma_rx + entry;
5104 + p = rx_q->dma_rx + entry;
5106 /* read the status of the incoming frame */
5107 status = priv->hw->desc->rx_status(&priv->dev->stats,
5108 @@ -2527,20 +3332,20 @@ static int stmmac_rx(struct stmmac_priv
5112 - priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
5113 - next_entry = priv->cur_rx;
5114 + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
5115 + next_entry = rx_q->cur_rx;
5117 if (priv->extend_desc)
5118 - np = (struct dma_desc *)(priv->dma_erx + next_entry);
5119 + np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5121 - np = priv->dma_rx + next_entry;
5122 + np = rx_q->dma_rx + next_entry;
5126 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
5127 priv->hw->desc->rx_extended_status(&priv->dev->stats,
5132 if (unlikely(status == discard_frame)) {
5133 priv->dev->stats.rx_errors++;
5134 @@ -2550,9 +3355,9 @@ static int stmmac_rx(struct stmmac_priv
5135 * them in stmmac_rx_refill() function so that
5136 * device can reuse it.
5138 - priv->rx_skbuff[entry] = NULL;
5139 + rx_q->rx_skbuff[entry] = NULL;
5140 dma_unmap_single(priv->device,
5141 - priv->rx_skbuff_dma[entry],
5142 + rx_q->rx_skbuff_dma[entry],
5146 @@ -2600,7 +3405,7 @@ static int stmmac_rx(struct stmmac_priv
5148 if (unlikely(!priv->plat->has_gmac4 &&
5149 ((frame_len < priv->rx_copybreak) ||
5150 - stmmac_rx_threshold_count(priv)))) {
5151 + stmmac_rx_threshold_count(rx_q)))) {
5152 skb = netdev_alloc_skb_ip_align(priv->dev,
5154 if (unlikely(!skb)) {
5155 @@ -2612,21 +3417,21 @@ static int stmmac_rx(struct stmmac_priv
5158 dma_sync_single_for_cpu(priv->device,
5159 - priv->rx_skbuff_dma
5160 + rx_q->rx_skbuff_dma
5163 skb_copy_to_linear_data(skb,
5166 rx_skbuff[entry]->data,
5169 skb_put(skb, frame_len);
5170 dma_sync_single_for_device(priv->device,
5171 - priv->rx_skbuff_dma
5172 + rx_q->rx_skbuff_dma
5176 - skb = priv->rx_skbuff[entry];
5177 + skb = rx_q->rx_skbuff[entry];
5178 if (unlikely(!skb)) {
5179 netdev_err(priv->dev,
5180 "%s: Inconsistent Rx chain\n",
5181 @@ -2635,12 +3440,12 @@ static int stmmac_rx(struct stmmac_priv
5184 prefetch(skb->data - NET_IP_ALIGN);
5185 - priv->rx_skbuff[entry] = NULL;
5186 - priv->rx_zeroc_thresh++;
5187 + rx_q->rx_skbuff[entry] = NULL;
5188 + rx_q->rx_zeroc_thresh++;
5190 skb_put(skb, frame_len);
5191 dma_unmap_single(priv->device,
5192 - priv->rx_skbuff_dma[entry],
5193 + rx_q->rx_skbuff_dma[entry],
5197 @@ -2662,7 +3467,7 @@ static int stmmac_rx(struct stmmac_priv
5199 skb->ip_summed = CHECKSUM_UNNECESSARY;
5201 - napi_gro_receive(&priv->napi, skb);
5202 + napi_gro_receive(&rx_q->napi, skb);
5204 priv->dev->stats.rx_packets++;
5205 priv->dev->stats.rx_bytes += frame_len;
5206 @@ -2670,7 +3475,7 @@ static int stmmac_rx(struct stmmac_priv
5210 - stmmac_rx_refill(priv);
5211 + stmmac_rx_refill(priv, queue);
5213 priv->xstats.rx_pkt_n += count;
5215 @@ -2687,16 +3492,24 @@ static int stmmac_rx(struct stmmac_priv
5217 static int stmmac_poll(struct napi_struct *napi, int budget)
5219 - struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
5220 + struct stmmac_rx_queue *rx_q =
5221 + container_of(napi, struct stmmac_rx_queue, napi);
5222 + struct stmmac_priv *priv = rx_q->priv_data;
5223 + u32 tx_count = priv->plat->tx_queues_to_use;
5224 + u32 chan = rx_q->queue_index;
5228 priv->xstats.napi_poll++;
5229 - stmmac_tx_clean(priv);
5231 - work_done = stmmac_rx(priv, budget);
5232 + /* check all the queues */
5233 + for (queue = 0; queue < tx_count; queue++)
5234 + stmmac_tx_clean(priv, queue);
5236 + work_done = stmmac_rx(priv, budget, rx_q->queue_index);
5237 if (work_done < budget) {
5238 napi_complete_done(napi, work_done);
5239 - stmmac_enable_dma_irq(priv);
5240 + stmmac_enable_dma_irq(priv, chan);
5244 @@ -2712,9 +3525,12 @@ static int stmmac_poll(struct napi_struc
5245 static void stmmac_tx_timeout(struct net_device *dev)
5247 struct stmmac_priv *priv = netdev_priv(dev);
5248 + u32 tx_count = priv->plat->tx_queues_to_use;
5251 /* Clear Tx resources and restart transmitting again */
5252 - stmmac_tx_err(priv);
5253 + for (chan = 0; chan < tx_count; chan++)
5254 + stmmac_tx_err(priv, chan);
5258 @@ -2837,6 +3653,12 @@ static irqreturn_t stmmac_interrupt(int
5260 struct net_device *dev = (struct net_device *)dev_id;
5261 struct stmmac_priv *priv = netdev_priv(dev);
5262 + u32 rx_cnt = priv->plat->rx_queues_to_use;
5263 + u32 tx_cnt = priv->plat->tx_queues_to_use;
5267 + queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5270 pm_wakeup_event(priv->device, 0);
5271 @@ -2850,16 +3672,30 @@ static irqreturn_t stmmac_interrupt(int
5272 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
5273 int status = priv->hw->mac->host_irq_status(priv->hw,
5276 if (unlikely(status)) {
5277 /* For LPI we need to save the tx status */
5278 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5279 priv->tx_path_in_lpi_mode = true;
5280 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5281 priv->tx_path_in_lpi_mode = false;
5282 - if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
5283 - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
5284 - priv->rx_tail_addr,
5288 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
5289 + for (queue = 0; queue < queues_count; queue++) {
5290 + struct stmmac_rx_queue *rx_q =
5291 + &priv->rx_queue[queue];
5294 + priv->hw->mac->host_mtl_irq_status(priv->hw,
5297 + if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
5298 + priv->hw->dma->set_rx_tail_ptr)
5299 + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
5300 + rx_q->rx_tail_addr,
5305 /* PCS link status */
5306 @@ -2944,7 +3780,7 @@ static void sysfs_display_ring(void *hea
5309 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
5310 - i, (unsigned int)virt_to_phys(ep),
5311 + i, (unsigned int)virt_to_phys(p),
5312 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5313 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5315 @@ -2957,17 +3793,40 @@ static int stmmac_sysfs_ring_read(struct
5317 struct net_device *dev = seq->private;
5318 struct stmmac_priv *priv = netdev_priv(dev);
5319 + u32 rx_count = priv->plat->rx_queues_to_use;
5320 + u32 tx_count = priv->plat->tx_queues_to_use;
5323 - if (priv->extend_desc) {
5324 - seq_printf(seq, "Extended RX descriptor ring:\n");
5325 - sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
5326 - seq_printf(seq, "Extended TX descriptor ring:\n");
5327 - sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
5329 - seq_printf(seq, "RX descriptor ring:\n");
5330 - sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
5331 - seq_printf(seq, "TX descriptor ring:\n");
5332 - sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
5333 + for (queue = 0; queue < rx_count; queue++) {
5334 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5336 + seq_printf(seq, "RX Queue %d:\n", queue);
5338 + if (priv->extend_desc) {
5339 + seq_printf(seq, "Extended descriptor ring:\n");
5340 + sysfs_display_ring((void *)rx_q->dma_erx,
5341 + DMA_RX_SIZE, 1, seq);
5343 + seq_printf(seq, "Descriptor ring:\n");
5344 + sysfs_display_ring((void *)rx_q->dma_rx,
5345 + DMA_RX_SIZE, 0, seq);
5349 + for (queue = 0; queue < tx_count; queue++) {
5350 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5352 + seq_printf(seq, "TX Queue %d:\n", queue);
5354 + if (priv->extend_desc) {
5355 + seq_printf(seq, "Extended descriptor ring:\n");
5356 + sysfs_display_ring((void *)tx_q->dma_etx,
5357 + DMA_TX_SIZE, 1, seq);
5359 + seq_printf(seq, "Descriptor ring:\n");
5360 + sysfs_display_ring((void *)tx_q->dma_tx,
5361 + DMA_TX_SIZE, 0, seq);
5366 @@ -3250,11 +4109,14 @@ int stmmac_dvr_probe(struct device *devi
5367 struct plat_stmmacenet_data *plat_dat,
5368 struct stmmac_resources *res)
5371 struct net_device *ndev = NULL;
5372 struct stmmac_priv *priv;
5376 - ndev = alloc_etherdev(sizeof(struct stmmac_priv));
5377 + ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
5378 + MTL_MAX_TX_QUEUES,
5379 + MTL_MAX_RX_QUEUES);
5383 @@ -3296,6 +4158,10 @@ int stmmac_dvr_probe(struct device *devi
5387 + /* Configure real RX and TX queues */
5388 + netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
5389 + netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
5391 ndev->netdev_ops = &stmmac_netdev_ops;
5393 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5394 @@ -3328,7 +4194,12 @@ int stmmac_dvr_probe(struct device *devi
5395 "Enable RX Mitigation via HW Watchdog Timer\n");
5398 - netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
5399 + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
5400 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5402 + netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
5403 + (8 * priv->plat->rx_queues_to_use));
5406 spin_lock_init(&priv->lock);
5408 @@ -3373,7 +4244,11 @@ error_netdev_register:
5409 priv->hw->pcs != STMMAC_PCS_RTBI)
5410 stmmac_mdio_unregister(ndev);
5411 error_mdio_register:
5412 - netif_napi_del(&priv->napi);
5413 + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
5414 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5416 + netif_napi_del(&rx_q->napi);
5421 @@ -3394,10 +4269,9 @@ int stmmac_dvr_remove(struct device *dev
5423 netdev_info(priv->dev, "%s: removing driver", __func__);
5425 - priv->hw->dma->stop_rx(priv->ioaddr);
5426 - priv->hw->dma->stop_tx(priv->ioaddr);
5427 + stmmac_stop_all_dma(priv);
5429 - stmmac_set_mac(priv->ioaddr, false);
5430 + priv->hw->mac->set_mac(priv->ioaddr, false);
5431 netif_carrier_off(ndev);
5432 unregister_netdev(ndev);
5433 if (priv->plat->stmmac_rst)
5434 @@ -3436,20 +4310,19 @@ int stmmac_suspend(struct device *dev)
5435 spin_lock_irqsave(&priv->lock, flags);
5437 netif_device_detach(ndev);
5438 - netif_stop_queue(ndev);
5439 + stmmac_stop_all_queues(priv);
5441 - napi_disable(&priv->napi);
5442 + stmmac_disable_all_queues(priv);
5444 /* Stop TX/RX DMA */
5445 - priv->hw->dma->stop_tx(priv->ioaddr);
5446 - priv->hw->dma->stop_rx(priv->ioaddr);
5447 + stmmac_stop_all_dma(priv);
5449 /* Enable Power down mode by programming the PMT regs */
5450 if (device_may_wakeup(priv->device)) {
5451 priv->hw->mac->pmt(priv->hw, priv->wolopts);
5454 - stmmac_set_mac(priv->ioaddr, false);
5455 + priv->hw->mac->set_mac(priv->ioaddr, false);
5456 pinctrl_pm_select_sleep_state(priv->device);
5457 /* Disable clock in case of PWM is off */
5458 clk_disable(priv->plat->pclk);
5459 @@ -3465,6 +4338,31 @@ int stmmac_suspend(struct device *dev)
5460 EXPORT_SYMBOL_GPL(stmmac_suspend);
5463 + * stmmac_reset_queues_param - reset queue parameters
5464 + * @dev: device pointer
5466 +static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5468 + u32 rx_cnt = priv->plat->rx_queues_to_use;
5469 + u32 tx_cnt = priv->plat->tx_queues_to_use;
5472 + for (queue = 0; queue < rx_cnt; queue++) {
5473 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5476 + rx_q->dirty_rx = 0;
5479 + for (queue = 0; queue < tx_cnt; queue++) {
5480 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5483 + tx_q->dirty_tx = 0;
5488 * stmmac_resume - resume callback
5489 * @dev: device pointer
5490 * Description: when resume this function is invoked to setup the DMA and CORE
5491 @@ -3504,10 +4402,8 @@ int stmmac_resume(struct device *dev)
5493 spin_lock_irqsave(&priv->lock, flags);
5496 - priv->dirty_rx = 0;
5497 - priv->dirty_tx = 0;
5499 + stmmac_reset_queues_param(priv);
5501 /* reset private mss value to force mss context settings at
5502 * next tso xmit (only used for gmac4).
5504 @@ -3519,9 +4415,9 @@ int stmmac_resume(struct device *dev)
5505 stmmac_init_tx_coalesce(priv);
5506 stmmac_set_rx_mode(ndev);
5508 - napi_enable(&priv->napi);
5509 + stmmac_enable_all_queues(priv);
5511 - netif_start_queue(ndev);
5512 + stmmac_start_all_queues(priv);
5514 spin_unlock_irqrestore(&priv->lock, flags);
5516 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
5517 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
5520 struct stmmac_pci_dmi_data {
5522 + const char *asset_tag;
5526 @@ -46,6 +47,7 @@ struct stmmac_pci_info {
5527 static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
5529 const char *name = dmi_get_system_info(DMI_BOARD_NAME);
5530 + const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG);
5531 unsigned int func = PCI_FUNC(info->pdev->devfn);
5532 struct stmmac_pci_dmi_data *dmi;
5534 @@ -57,18 +59,19 @@ static int stmmac_pci_find_phy_addr(stru
5537 for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
5538 - if (!strcmp(dmi->name, name) && dmi->func == func)
5539 + if (!strcmp(dmi->name, name) && dmi->func == func) {
5540 + /* If asset tag is provided, match on it as well. */
5541 + if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag))
5543 return dmi->phy_addr;
5550 -static void stmmac_default_data(struct plat_stmmacenet_data *plat)
5551 +static void common_default_data(struct plat_stmmacenet_data *plat)
5554 - plat->phy_addr = 0;
5555 - plat->interface = PHY_INTERFACE_MODE_GMII;
5556 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
5558 plat->force_sf_dma_mode = 1;
5559 @@ -76,10 +79,6 @@ static void stmmac_default_data(struct p
5560 plat->mdio_bus_data->phy_reset = NULL;
5561 plat->mdio_bus_data->phy_mask = 0;
5563 - plat->dma_cfg->pbl = 32;
5564 - plat->dma_cfg->pblx8 = true;
5567 /* Set default value for multicast hash bins */
5568 plat->multicast_filter_bins = HASH_TABLE_SIZE;
5570 @@ -88,6 +87,31 @@ static void stmmac_default_data(struct p
5572 /* Set the maxmtu to a default of JUMBO_LEN */
5573 plat->maxmtu = JUMBO_LEN;
5575 + /* Set default number of RX and TX queues to use */
5576 + plat->tx_queues_to_use = 1;
5577 + plat->rx_queues_to_use = 1;
5579 + /* Disable Priority config by default */
5580 + plat->tx_queues_cfg[0].use_prio = false;
5581 + plat->rx_queues_cfg[0].use_prio = false;
5583 + /* Disable RX queues routing by default */
5584 + plat->rx_queues_cfg[0].pkt_route = 0x0;
5587 +static void stmmac_default_data(struct plat_stmmacenet_data *plat)
5589 + /* Set common default data first */
5590 + common_default_data(plat);
5593 + plat->phy_addr = 0;
5594 + plat->interface = PHY_INTERFACE_MODE_GMII;
5596 + plat->dma_cfg->pbl = 32;
5597 + plat->dma_cfg->pblx8 = true;
5601 static int quark_default_data(struct plat_stmmacenet_data *plat,
5602 @@ -96,6 +120,9 @@ static int quark_default_data(struct pla
5603 struct pci_dev *pdev = info->pdev;
5606 + /* Set common default data first */
5607 + common_default_data(plat);
5610 * Refuse to load the driver and register net device if MAC controller
5611 * does not connect to any PHY interface.
5612 @@ -107,27 +134,12 @@ static int quark_default_data(struct pla
5613 plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
5614 plat->phy_addr = ret;
5615 plat->interface = PHY_INTERFACE_MODE_RMII;
5616 - plat->clk_csr = 2;
5617 - plat->has_gmac = 1;
5618 - plat->force_sf_dma_mode = 1;
5620 - plat->mdio_bus_data->phy_reset = NULL;
5621 - plat->mdio_bus_data->phy_mask = 0;
5623 plat->dma_cfg->pbl = 16;
5624 plat->dma_cfg->pblx8 = true;
5625 plat->dma_cfg->fixed_burst = 1;
5628 - /* Set default value for multicast hash bins */
5629 - plat->multicast_filter_bins = HASH_TABLE_SIZE;
5631 - /* Set default value for unicast filter entries */
5632 - plat->unicast_filter_entries = 1;
5634 - /* Set the maxmtu to a default of JUMBO_LEN */
5635 - plat->maxmtu = JUMBO_LEN;
5640 @@ -142,6 +154,24 @@ static struct stmmac_pci_dmi_data quark_
5645 + .name = "SIMATIC IOT2000",
5646 + .asset_tag = "6ES7647-0AA00-0YA2",
5651 + .name = "SIMATIC IOT2000",
5652 + .asset_tag = "6ES7647-0AA00-1YA2",
5657 + .name = "SIMATIC IOT2000",
5658 + .asset_tag = "6ES7647-0AA00-1YA2",
5665 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
5666 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
5667 @@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_set
5671 - axi = kzalloc(sizeof(*axi), GFP_KERNEL);
5672 + axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
5675 return ERR_PTR(-ENOMEM);
5676 @@ -132,6 +132,155 @@ static struct stmmac_axi *stmmac_axi_set
5680 + * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
5681 + * @pdev: platform device
5683 +static void stmmac_mtl_setup(struct platform_device *pdev,
5684 + struct plat_stmmacenet_data *plat)
5686 + struct device_node *q_node;
5687 + struct device_node *rx_node;
5688 + struct device_node *tx_node;
5691 + /* For backwards-compatibility with device trees that don't have any
5692 + * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
5693 + * to one RX and TX queues each.
5695 + plat->rx_queues_to_use = 1;
5696 + plat->tx_queues_to_use = 1;
5698 + rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
5702 + tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
5704 + of_node_put(rx_node);
5708 + /* Processing RX queues common config */
5709 + if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
5710 + &plat->rx_queues_to_use))
5711 + plat->rx_queues_to_use = 1;
5713 + if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
5714 + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
5715 + else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
5716 + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
5718 + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
5720 + /* Processing individual RX queue config */
5721 + for_each_child_of_node(rx_node, q_node) {
5722 + if (queue >= plat->rx_queues_to_use)
5725 + if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
5726 + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5727 + else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
5728 + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
5730 + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5732 + if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
5733 + &plat->rx_queues_cfg[queue].chan))
5734 + plat->rx_queues_cfg[queue].chan = queue;
5735 + /* TODO: Dynamic mapping to be included in the future */
5737 + if (of_property_read_u32(q_node, "snps,priority",
5738 + &plat->rx_queues_cfg[queue].prio)) {
5739 + plat->rx_queues_cfg[queue].prio = 0;
5740 + plat->rx_queues_cfg[queue].use_prio = false;
5742 + plat->rx_queues_cfg[queue].use_prio = true;
5745 + /* RX queue specific packet type routing */
5746 + if (of_property_read_bool(q_node, "snps,route-avcp"))
5747 + plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
5748 + else if (of_property_read_bool(q_node, "snps,route-ptp"))
5749 + plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
5750 + else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
5751 + plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
5752 + else if (of_property_read_bool(q_node, "snps,route-up"))
5753 + plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
5754 + else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
5755 + plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
5757 + plat->rx_queues_cfg[queue].pkt_route = 0x0;
5762 + /* Processing TX queues common config */
5763 + if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
5764 + &plat->tx_queues_to_use))
5765 + plat->tx_queues_to_use = 1;
5767 + if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
5768 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
5769 + else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
5770 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
5771 + else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
5772 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
5773 + else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
5774 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
5776 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
5780 + /* Processing individual TX queue config */
5781 + for_each_child_of_node(tx_node, q_node) {
5782 + if (queue >= plat->tx_queues_to_use)
5785 + if (of_property_read_u8(q_node, "snps,weight",
5786 + &plat->tx_queues_cfg[queue].weight))
5787 + plat->tx_queues_cfg[queue].weight = 0x10 + queue;
5789 + if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
5790 + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5791 + } else if (of_property_read_bool(q_node,
5792 + "snps,avb-algorithm")) {
5793 + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
5795 + /* Credit Base Shaper parameters used by AVB */
5796 + if (of_property_read_u32(q_node, "snps,send_slope",
5797 + &plat->tx_queues_cfg[queue].send_slope))
5798 + plat->tx_queues_cfg[queue].send_slope = 0x0;
5799 + if (of_property_read_u32(q_node, "snps,idle_slope",
5800 + &plat->tx_queues_cfg[queue].idle_slope))
5801 + plat->tx_queues_cfg[queue].idle_slope = 0x0;
5802 + if (of_property_read_u32(q_node, "snps,high_credit",
5803 + &plat->tx_queues_cfg[queue].high_credit))
5804 + plat->tx_queues_cfg[queue].high_credit = 0x0;
5805 + if (of_property_read_u32(q_node, "snps,low_credit",
5806 + &plat->tx_queues_cfg[queue].low_credit))
5807 + plat->tx_queues_cfg[queue].low_credit = 0x0;
5809 + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5812 + if (of_property_read_u32(q_node, "snps,priority",
5813 + &plat->tx_queues_cfg[queue].prio)) {
5814 + plat->tx_queues_cfg[queue].prio = 0;
5815 + plat->tx_queues_cfg[queue].use_prio = false;
5817 + plat->tx_queues_cfg[queue].use_prio = true;
5823 + of_node_put(rx_node);
5824 + of_node_put(tx_node);
5825 + of_node_put(q_node);
5829 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
5830 * @plat: driver data platform structure
5831 * @np: device tree node
5832 @@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_d
5834 plat->axi = stmmac_axi_setup(pdev);
5836 + stmmac_mtl_setup(pdev, plat);
5839 plat->stmmac_clk = devm_clk_get(&pdev->dev,
5840 STMMAC_RESOURCE_NAME);
5841 @@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_d
5842 clk_prepare_enable(plat->pclk);
5844 /* Fall-back to main clock in case of no PTP ref is passed */
5845 - plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
5846 + plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
5847 if (IS_ERR(plat->clk_ptp_ref)) {
5848 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
5849 plat->clk_ptp_ref = NULL;
5850 dev_warn(&pdev->dev, "PTP uses main clock\n");
5852 - clk_prepare_enable(plat->clk_ptp_ref);
5853 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
5854 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
5856 --- a/include/linux/stmmac.h
5857 +++ b/include/linux/stmmac.h
5860 #include <linux/platform_device.h>
5862 +#define MTL_MAX_RX_QUEUES 8
5863 +#define MTL_MAX_TX_QUEUES 8
5865 #define STMMAC_RX_COE_NONE 0
5866 #define STMMAC_RX_COE_TYPE1 1
5867 #define STMMAC_RX_COE_TYPE2 2
5869 #define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
5870 #define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
5872 +/* MTL algorithms identifiers */
5873 +#define MTL_TX_ALGORITHM_WRR 0x0
5874 +#define MTL_TX_ALGORITHM_WFQ 0x1
5875 +#define MTL_TX_ALGORITHM_DWRR 0x2
5876 +#define MTL_TX_ALGORITHM_SP 0x3
5877 +#define MTL_RX_ALGORITHM_SP 0x4
5878 +#define MTL_RX_ALGORITHM_WSP 0x5
5880 +/* RX/TX Queue Mode */
5881 +#define MTL_QUEUE_AVB 0x0
5882 +#define MTL_QUEUE_DCB 0x1
5884 /* The MDC clock could be set higher than the IEEE 802.3
5885 * specified frequency limit 0f 2.5 MHz, by programming a clock divider
5886 * of value different than the above defined values. The resultant MDIO
5887 @@ -109,6 +124,26 @@ struct stmmac_axi {
5891 +struct stmmac_rxq_cfg {
5899 +struct stmmac_txq_cfg {
5902 + /* Credit Base Shaper parameters */
5911 struct plat_stmmacenet_data {
5914 @@ -133,6 +168,12 @@ struct plat_stmmacenet_data {
5915 int unicast_filter_entries;
5918 + u8 rx_queues_to_use;
5919 + u8 tx_queues_to_use;
5920 + u8 rx_sched_algorithm;
5921 + u8 tx_sched_algorithm;
5922 + struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
5923 + struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
5924 void (*fix_mac_speed)(void *priv, unsigned int speed);
5925 int (*init)(struct platform_device *pdev, void *priv);
5926 void (*exit)(struct platform_device *pdev, void *priv);