1 --- a/Documentation/devicetree/bindings/net/stmmac.txt
2 +++ b/Documentation/devicetree/bindings/net/stmmac.txt
3 @@ -7,9 +7,12 @@ Required properties:
4 - interrupt-parent: Should be the phandle for the interrupt controller
5 that services interrupts for this device
6 - interrupts: Should contain the STMMAC interrupts
7 -- interrupt-names: Should contain the interrupt names "macirq"
8 - "eth_wake_irq" if this interrupt is supported in the "interrupts"
10 +- interrupt-names: Should contain a list of interrupt names corresponding to
11 + the interrupts in the interrupts property, if available.
12 + Valid interrupt names are:
13 + - "macirq" (combined signal for various interrupt events)
14 + - "eth_wake_irq" (the interrupt to manage the remote wake-up packet detection)
15 + - "eth_lpi" (the interrupt that occurs when Tx or Rx enters/exits LPI state)
16 - phy-mode: See ethernet.txt file in the same directory.
17 - snps,reset-gpio gpio number for phy reset.
18 - snps,reset-active-low boolean flag to indicate if phy reset is active low.
19 @@ -28,9 +31,9 @@ Optional properties:
20 clocks may be specified in derived bindings.
21 - clock-names: One name for each entry in the clocks property, the
22 first one should be "stmmaceth" and the second one should be "pclk".
23 -- clk_ptp_ref: this is the PTP reference clock; in case of the PTP is
24 - available this clock is used for programming the Timestamp Addend Register.
25 - If not passed then the system clock will be used and this is fine on some
26 +- ptp_ref: this is the PTP reference clock; in case of the PTP is available
27 + this clock is used for programming the Timestamp Addend Register. If not
28 + passed then the system clock will be used and this is fine on some
30 - tx-fifo-depth: See ethernet.txt file in the same directory
31 - rx-fifo-depth: See ethernet.txt file in the same directory
32 @@ -72,7 +75,45 @@ Optional properties:
33 - snps,mb: mixed-burst
34 - snps,rb: rebuild INCRx Burst
35 - mdio: with compatible = "snps,dwmac-mdio", create and register mdio bus.
37 +- Multiple RX Queues parameters: below the list of all the parameters to
38 + configure the multiple RX queues:
39 + - snps,rx-queues-to-use: number of RX queues to be used in the driver
40 + - Choose one of these RX scheduling algorithms:
41 + - snps,rx-sched-sp: Strict priority
42 + - snps,rx-sched-wsp: Weighted Strict priority
44 + - Choose one of these modes:
45 + - snps,dcb-algorithm: Queue to be enabled as DCB
46 + - snps,avb-algorithm: Queue to be enabled as AVB
47 + - snps,map-to-dma-channel: Channel to map
48 + - Specifiy specific packet routing:
49 + - snps,route-avcp: AV Untagged Control packets
50 + - snps,route-ptp: PTP Packets
51 + - snps,route-dcbcp: DCB Control Packets
52 + - snps,route-up: Untagged Packets
53 + - snps,route-multi-broad: Multicast & Broadcast Packets
54 + - snps,priority: RX queue priority (Range: 0x0 to 0xF)
55 +- Multiple TX Queues parameters: below the list of all the parameters to
56 + configure the multiple TX queues:
57 + - snps,tx-queues-to-use: number of TX queues to be used in the driver
58 + - Choose one of these TX scheduling algorithms:
59 + - snps,tx-sched-wrr: Weighted Round Robin
60 + - snps,tx-sched-wfq: Weighted Fair Queuing
61 + - snps,tx-sched-dwrr: Deficit Weighted Round Robin
62 + - snps,tx-sched-sp: Strict priority
64 + - snps,weight: TX queue weight (if using a DCB weight algorithm)
65 + - Choose one of these modes:
66 + - snps,dcb-algorithm: TX queue will be working in DCB
67 + - snps,avb-algorithm: TX queue will be working in AVB
68 + [Attention] Queue 0 is reserved for legacy traffic
69 + and so no AVB is available in this queue.
70 + - Configure Credit Base Shaper (if AVB Mode selected):
71 + - snps,send_slope: enable Low Power Interface
72 + - snps,idle_slope: unlock on WoL
73 + - snps,high_credit: max write outstanding req. limit
74 + - snps,low_credit: max read outstanding req. limit
75 + - snps,priority: TX queue priority (Range: 0x0 to 0xF)
78 stmmac_axi_setup: stmmac-axi-config {
79 @@ -81,12 +122,41 @@ Examples:
80 snps,blen = <256 128 64 32 0 0 0>;
83 + mtl_rx_setup: rx-queues-config {
84 + snps,rx-queues-to-use = <1>;
88 + snps,map-to-dma-channel = <0x0>;
89 + snps,priority = <0x0>;
93 + mtl_tx_setup: tx-queues-config {
94 + snps,tx-queues-to-use = <2>;
97 + snps,weight = <0x10>;
99 + snps,priority = <0x0>;
103 + snps,avb-algorithm;
104 + snps,send_slope = <0x1000>;
105 + snps,idle_slope = <0x1000>;
106 + snps,high_credit = <0x3E800>;
107 + snps,low_credit = <0xFFC18000>;
108 + snps,priority = <0x1>;
112 gmac0: ethernet@e0800000 {
113 compatible = "st,spear600-gmac";
114 reg = <0xe0800000 0x8000>;
115 interrupt-parent = <&vic1>;
116 - interrupts = <24 23>;
117 - interrupt-names = "macirq", "eth_wake_irq";
118 + interrupts = <24 23 22>;
119 + interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
120 mac-address = [000000000000]; /* Filled in by U-Boot */
121 max-frame-size = <3800>;
123 @@ -104,4 +174,6 @@ Examples:
124 phy1: ethernet-phy@0 {
127 + snps,mtl-rx-config = <&mtl_rx_setup>;
128 + snps,mtl-tx-config = <&mtl_tx_setup>;
130 --- a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
131 +++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
133 #define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
134 #define TSE_PCS_CONTROL_REG 0x00
135 #define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
136 +#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
137 #define TSE_PCS_IF_MODE_REG 0x28
138 #define TSE_PCS_LINK_TIMER_0_REG 0x24
139 #define TSE_PCS_LINK_TIMER_1_REG 0x26
141 #define TSE_PCS_SW_RESET_TIMEOUT 100
142 #define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
143 #define TSE_PCS_USE_SGMII_ENA BIT(0)
144 +#define TSE_PCS_IF_USE_SGMII 0x03
146 #define SGMII_ADAPTER_CTRL_REG 0x00
147 #define SGMII_ADAPTER_DISABLE 0x0001
148 @@ -101,7 +103,9 @@ int tse_pcs_init(void __iomem *base, str
152 - writew(TSE_PCS_USE_SGMII_ENA, base + TSE_PCS_IF_MODE_REG);
153 + writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
155 + writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
157 writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
158 writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
159 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
160 +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
163 static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
165 - struct stmmac_priv *priv = (struct stmmac_priv *)p;
166 - unsigned int entry = priv->cur_tx;
167 - struct dma_desc *desc = priv->dma_tx + entry;
168 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
169 unsigned int nopaged_len = skb_headlen(skb);
170 + struct stmmac_priv *priv = tx_q->priv_data;
171 + unsigned int entry = tx_q->cur_tx;
172 unsigned int bmax, des2;
173 unsigned int i = 1, len;
174 + struct dma_desc *desc;
176 + desc = tx_q->dma_tx + entry;
178 if (priv->plat->enh_desc)
179 bmax = BUF_SIZE_8KiB;
180 @@ -45,16 +48,16 @@ static int stmmac_jumbo_frm(void *p, str
181 desc->des2 = cpu_to_le32(des2);
182 if (dma_mapping_error(priv->device, des2))
184 - priv->tx_skbuff_dma[entry].buf = des2;
185 - priv->tx_skbuff_dma[entry].len = bmax;
186 + tx_q->tx_skbuff_dma[entry].buf = des2;
187 + tx_q->tx_skbuff_dma[entry].len = bmax;
188 /* do not close the descriptor and do not set own bit */
189 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
191 + 0, false, skb->len);
194 - priv->tx_skbuff[entry] = NULL;
195 + tx_q->tx_skbuff[entry] = NULL;
196 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
197 - desc = priv->dma_tx + entry;
198 + desc = tx_q->dma_tx + entry;
201 des2 = dma_map_single(priv->device,
202 @@ -63,11 +66,11 @@ static int stmmac_jumbo_frm(void *p, str
203 desc->des2 = cpu_to_le32(des2);
204 if (dma_mapping_error(priv->device, des2))
206 - priv->tx_skbuff_dma[entry].buf = des2;
207 - priv->tx_skbuff_dma[entry].len = bmax;
208 + tx_q->tx_skbuff_dma[entry].buf = des2;
209 + tx_q->tx_skbuff_dma[entry].len = bmax;
210 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum,
211 STMMAC_CHAIN_MODE, 1,
217 @@ -77,17 +80,17 @@ static int stmmac_jumbo_frm(void *p, str
218 desc->des2 = cpu_to_le32(des2);
219 if (dma_mapping_error(priv->device, des2))
221 - priv->tx_skbuff_dma[entry].buf = des2;
222 - priv->tx_skbuff_dma[entry].len = len;
223 + tx_q->tx_skbuff_dma[entry].buf = des2;
224 + tx_q->tx_skbuff_dma[entry].len = len;
225 /* last descriptor can be set now */
226 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
227 STMMAC_CHAIN_MODE, 1,
234 - priv->cur_tx = entry;
235 + tx_q->cur_tx = entry;
239 @@ -136,32 +139,34 @@ static void stmmac_init_dma_chain(void *
241 static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
243 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
244 + struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
245 + struct stmmac_priv *priv = rx_q->priv_data;
247 if (priv->hwts_rx_en && !priv->extend_desc)
248 /* NOTE: Device will overwrite des3 with timestamp value if
249 * 1588-2002 time stamping is enabled, hence reinitialize it
250 * to keep explicit chaining in the descriptor.
252 - p->des3 = cpu_to_le32((unsigned int)(priv->dma_rx_phy +
253 - (((priv->dirty_rx) + 1) %
254 + p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
255 + (((rx_q->dirty_rx) + 1) %
257 sizeof(struct dma_desc)));
260 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
262 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
263 - unsigned int entry = priv->dirty_tx;
264 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
265 + struct stmmac_priv *priv = tx_q->priv_data;
266 + unsigned int entry = tx_q->dirty_tx;
268 - if (priv->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
269 + if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
271 /* NOTE: Device will overwrite des3 with timestamp value if
272 * 1588-2002 time stamping is enabled, hence reinitialize it
273 * to keep explicit chaining in the descriptor.
275 - p->des3 = cpu_to_le32((unsigned int)((priv->dma_tx_phy +
276 - ((priv->dirty_tx + 1) % DMA_TX_SIZE))
277 + p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
278 + ((tx_q->dirty_tx + 1) % DMA_TX_SIZE))
279 * sizeof(struct dma_desc)));
282 --- a/drivers/net/ethernet/stmicro/stmmac/common.h
283 +++ b/drivers/net/ethernet/stmicro/stmmac/common.h
284 @@ -246,6 +246,15 @@ struct stmmac_extra_stats {
285 #define STMMAC_TX_MAX_FRAMES 256
286 #define STMMAC_TX_FRAMES 64
289 +enum packets_types {
290 + PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
291 + PACKET_PTPQ = 0x2, /* PTP Packets */
292 + PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
293 + PACKET_UPQ = 0x4, /* Untagged Packets */
294 + PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
298 enum rx_frame_status {
300 @@ -324,6 +333,9 @@ struct dma_features {
301 unsigned int number_tx_queues;
302 /* Alternate (enhanced) DESC mode */
303 unsigned int enh_desc;
304 + /* TX and RX FIFO sizes */
305 + unsigned int tx_fifo_size;
306 + unsigned int rx_fifo_size;
309 /* GMAC TX FIFO is 8K, Rx FIFO is 16K */
310 @@ -361,7 +373,7 @@ struct stmmac_desc_ops {
311 /* Invoked by the xmit function to prepare the tx descriptor */
312 void (*prepare_tx_desc) (struct dma_desc *p, int is_fs, int len,
313 bool csum_flag, int mode, bool tx_own,
315 + bool ls, unsigned int tot_pkt_len);
316 void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
317 int len2, bool tx_own, bool ls,
318 unsigned int tcphdrlen,
319 @@ -413,6 +425,14 @@ struct stmmac_dma_ops {
320 int (*reset)(void __iomem *ioaddr);
321 void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
322 u32 dma_tx, u32 dma_rx, int atds);
323 + void (*init_chan)(void __iomem *ioaddr,
324 + struct stmmac_dma_cfg *dma_cfg, u32 chan);
325 + void (*init_rx_chan)(void __iomem *ioaddr,
326 + struct stmmac_dma_cfg *dma_cfg,
327 + u32 dma_rx_phy, u32 chan);
328 + void (*init_tx_chan)(void __iomem *ioaddr,
329 + struct stmmac_dma_cfg *dma_cfg,
330 + u32 dma_tx_phy, u32 chan);
331 /* Configure the AXI Bus Mode Register */
332 void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
333 /* Dump DMA registers */
334 @@ -421,25 +441,28 @@ struct stmmac_dma_ops {
335 * An invalid value enables the store-and-forward mode */
336 void (*dma_mode)(void __iomem *ioaddr, int txmode, int rxmode,
338 + void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
340 + void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel);
341 /* To track extra statistic (if supported) */
342 void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
343 void __iomem *ioaddr);
344 void (*enable_dma_transmission) (void __iomem *ioaddr);
345 - void (*enable_dma_irq) (void __iomem *ioaddr);
346 - void (*disable_dma_irq) (void __iomem *ioaddr);
347 - void (*start_tx) (void __iomem *ioaddr);
348 - void (*stop_tx) (void __iomem *ioaddr);
349 - void (*start_rx) (void __iomem *ioaddr);
350 - void (*stop_rx) (void __iomem *ioaddr);
351 + void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan);
352 + void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan);
353 + void (*start_tx)(void __iomem *ioaddr, u32 chan);
354 + void (*stop_tx)(void __iomem *ioaddr, u32 chan);
355 + void (*start_rx)(void __iomem *ioaddr, u32 chan);
356 + void (*stop_rx)(void __iomem *ioaddr, u32 chan);
357 int (*dma_interrupt) (void __iomem *ioaddr,
358 - struct stmmac_extra_stats *x);
359 + struct stmmac_extra_stats *x, u32 chan);
360 /* If supported then get the optional core features */
361 void (*get_hw_feature)(void __iomem *ioaddr,
362 struct dma_features *dma_cap);
363 /* Program the HW RX Watchdog */
364 - void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
365 - void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len);
366 - void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len);
367 + void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 number_chan);
368 + void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
369 + void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
370 void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
371 void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
372 void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
373 @@ -451,20 +474,44 @@ struct mac_device_info;
375 /* MAC core initialization */
376 void (*core_init)(struct mac_device_info *hw, int mtu);
377 + /* Enable the MAC RX/TX */
378 + void (*set_mac)(void __iomem *ioaddr, bool enable);
379 /* Enable and verify that the IPC module is supported */
380 int (*rx_ipc)(struct mac_device_info *hw);
381 /* Enable RX Queues */
382 - void (*rx_queue_enable)(struct mac_device_info *hw, u32 queue);
383 + void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
384 + /* RX Queues Priority */
385 + void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
386 + /* TX Queues Priority */
387 + void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
388 + /* RX Queues Routing */
389 + void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
391 + /* Program RX Algorithms */
392 + void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
393 + /* Program TX Algorithms */
394 + void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
395 + /* Set MTL TX queues weight */
396 + void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
397 + u32 weight, u32 queue);
398 + /* RX MTL queue to RX dma mapping */
399 + void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
400 + /* Configure AV Algorithm */
401 + void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
402 + u32 idle_slope, u32 high_credit, u32 low_credit,
404 /* Dump MAC registers */
405 void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
406 /* Handle extra events on specific interrupts hw dependent */
407 int (*host_irq_status)(struct mac_device_info *hw,
408 struct stmmac_extra_stats *x);
409 + /* Handle MTL interrupts */
410 + int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
411 /* Multicast filter setting */
412 void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
413 /* Flow control setting */
414 void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
415 - unsigned int fc, unsigned int pause_time);
416 + unsigned int fc, unsigned int pause_time, u32 tx_cnt);
417 /* Set power management mode (e.g. magic frame) */
418 void (*pmt)(struct mac_device_info *hw, unsigned long mode);
419 /* Set/Get Unicast MAC addresses */
420 @@ -477,7 +524,8 @@ struct stmmac_ops {
421 void (*reset_eee_mode)(struct mac_device_info *hw);
422 void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
423 void (*set_eee_pls)(struct mac_device_info *hw, int link);
424 - void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x);
425 + void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
426 + u32 rx_queues, u32 tx_queues);
428 void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
430 @@ -547,6 +595,11 @@ struct mac_device_info {
434 +struct stmmac_rx_routing {
439 struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
440 int perfect_uc_entries,
442 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
443 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
445 #include <linux/clk.h>
446 #include <linux/clk-provider.h>
447 #include <linux/device.h>
448 +#include <linux/gpio/consumer.h>
449 #include <linux/ethtool.h>
450 #include <linux/io.h>
451 +#include <linux/iopoll.h>
452 #include <linux/ioport.h>
453 #include <linux/module.h>
454 +#include <linux/of_device.h>
455 #include <linux/of_net.h>
456 #include <linux/mfd/syscon.h>
457 #include <linux/platform_device.h>
458 +#include <linux/reset.h>
459 #include <linux/stmmac.h>
461 #include "stmmac_platform.h"
465 + struct device *dev;
466 + void __iomem *regs;
468 + struct reset_control *rst;
469 + struct clk *clk_master;
470 + struct clk *clk_slave;
471 + struct clk *clk_tx;
472 + struct clk *clk_rx;
474 + struct gpio_desc *reset;
477 static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
478 struct plat_stmmacenet_data *plat_dat)
479 @@ -106,13 +124,309 @@ static int dwc_eth_dwmac_config_dt(struc
483 +static void *dwc_qos_probe(struct platform_device *pdev,
484 + struct plat_stmmacenet_data *plat_dat,
485 + struct stmmac_resources *stmmac_res)
489 + plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
490 + if (IS_ERR(plat_dat->stmmac_clk)) {
491 + dev_err(&pdev->dev, "apb_pclk clock not found.\n");
492 + return ERR_CAST(plat_dat->stmmac_clk);
495 + err = clk_prepare_enable(plat_dat->stmmac_clk);
497 + dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
499 + return ERR_PTR(err);
502 + plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
503 + if (IS_ERR(plat_dat->pclk)) {
504 + dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
505 + err = PTR_ERR(plat_dat->pclk);
509 + err = clk_prepare_enable(plat_dat->pclk);
511 + dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
519 + clk_disable_unprepare(plat_dat->stmmac_clk);
520 + return ERR_PTR(err);
523 +static int dwc_qos_remove(struct platform_device *pdev)
525 + struct net_device *ndev = platform_get_drvdata(pdev);
526 + struct stmmac_priv *priv = netdev_priv(ndev);
528 + clk_disable_unprepare(priv->plat->pclk);
529 + clk_disable_unprepare(priv->plat->stmmac_clk);
534 +#define SDMEMCOMPPADCTRL 0x8800
535 +#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
537 +#define AUTO_CAL_CONFIG 0x8804
538 +#define AUTO_CAL_CONFIG_START BIT(31)
539 +#define AUTO_CAL_CONFIG_ENABLE BIT(29)
541 +#define AUTO_CAL_STATUS 0x880c
542 +#define AUTO_CAL_STATUS_ACTIVE BIT(31)
544 +static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
546 + struct tegra_eqos *eqos = priv;
547 + unsigned long rate = 125000000;
548 + bool needs_calibration = false;
554 + needs_calibration = true;
559 + needs_calibration = true;
568 + dev_err(eqos->dev, "invalid speed %u\n", speed);
572 + if (needs_calibration) {
574 + value = readl(eqos->regs + SDMEMCOMPPADCTRL);
575 + value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
576 + writel(value, eqos->regs + SDMEMCOMPPADCTRL);
580 + value = readl(eqos->regs + AUTO_CAL_CONFIG);
581 + value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
582 + writel(value, eqos->regs + AUTO_CAL_CONFIG);
584 + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
586 + value & AUTO_CAL_STATUS_ACTIVE,
589 + dev_err(eqos->dev, "calibration did not start\n");
593 + err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
595 + (value & AUTO_CAL_STATUS_ACTIVE) == 0,
598 + dev_err(eqos->dev, "calibration didn't finish\n");
603 + value = readl(eqos->regs + SDMEMCOMPPADCTRL);
604 + value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
605 + writel(value, eqos->regs + SDMEMCOMPPADCTRL);
607 + value = readl(eqos->regs + AUTO_CAL_CONFIG);
608 + value &= ~AUTO_CAL_CONFIG_ENABLE;
609 + writel(value, eqos->regs + AUTO_CAL_CONFIG);
612 + err = clk_set_rate(eqos->clk_tx, rate);
614 + dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
617 +static int tegra_eqos_init(struct platform_device *pdev, void *priv)
619 + struct tegra_eqos *eqos = priv;
620 + unsigned long rate;
623 + rate = clk_get_rate(eqos->clk_slave);
625 + value = (rate / 1000000) - 1;
626 + writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
631 +static void *tegra_eqos_probe(struct platform_device *pdev,
632 + struct plat_stmmacenet_data *data,
633 + struct stmmac_resources *res)
635 + struct tegra_eqos *eqos;
638 + eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
644 + eqos->dev = &pdev->dev;
645 + eqos->regs = res->addr;
647 + eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
648 + if (IS_ERR(eqos->clk_master)) {
649 + err = PTR_ERR(eqos->clk_master);
653 + err = clk_prepare_enable(eqos->clk_master);
657 + eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
658 + if (IS_ERR(eqos->clk_slave)) {
659 + err = PTR_ERR(eqos->clk_slave);
660 + goto disable_master;
663 + data->stmmac_clk = eqos->clk_slave;
665 + err = clk_prepare_enable(eqos->clk_slave);
667 + goto disable_master;
669 + eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
670 + if (IS_ERR(eqos->clk_rx)) {
671 + err = PTR_ERR(eqos->clk_rx);
672 + goto disable_slave;
675 + err = clk_prepare_enable(eqos->clk_rx);
677 + goto disable_slave;
679 + eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
680 + if (IS_ERR(eqos->clk_tx)) {
681 + err = PTR_ERR(eqos->clk_tx);
685 + err = clk_prepare_enable(eqos->clk_tx);
689 + eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
690 + if (IS_ERR(eqos->reset)) {
691 + err = PTR_ERR(eqos->reset);
695 + usleep_range(2000, 4000);
696 + gpiod_set_value(eqos->reset, 0);
698 + eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
699 + if (IS_ERR(eqos->rst)) {
700 + err = PTR_ERR(eqos->rst);
704 + err = reset_control_assert(eqos->rst);
708 + usleep_range(2000, 4000);
710 + err = reset_control_deassert(eqos->rst);
714 + usleep_range(2000, 4000);
716 + data->fix_mac_speed = tegra_eqos_fix_speed;
717 + data->init = tegra_eqos_init;
718 + data->bsp_priv = eqos;
720 + err = tegra_eqos_init(pdev, eqos);
728 + reset_control_assert(eqos->rst);
730 + gpiod_set_value(eqos->reset, 1);
732 + clk_disable_unprepare(eqos->clk_tx);
734 + clk_disable_unprepare(eqos->clk_rx);
736 + clk_disable_unprepare(eqos->clk_slave);
738 + clk_disable_unprepare(eqos->clk_master);
740 + eqos = ERR_PTR(err);
744 +static int tegra_eqos_remove(struct platform_device *pdev)
746 + struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
748 + reset_control_assert(eqos->rst);
749 + gpiod_set_value(eqos->reset, 1);
750 + clk_disable_unprepare(eqos->clk_tx);
751 + clk_disable_unprepare(eqos->clk_rx);
752 + clk_disable_unprepare(eqos->clk_slave);
753 + clk_disable_unprepare(eqos->clk_master);
758 +struct dwc_eth_dwmac_data {
759 + void *(*probe)(struct platform_device *pdev,
760 + struct plat_stmmacenet_data *data,
761 + struct stmmac_resources *res);
762 + int (*remove)(struct platform_device *pdev);
765 +static const struct dwc_eth_dwmac_data dwc_qos_data = {
766 + .probe = dwc_qos_probe,
767 + .remove = dwc_qos_remove,
770 +static const struct dwc_eth_dwmac_data tegra_eqos_data = {
771 + .probe = tegra_eqos_probe,
772 + .remove = tegra_eqos_remove,
775 static int dwc_eth_dwmac_probe(struct platform_device *pdev)
777 + const struct dwc_eth_dwmac_data *data;
778 struct plat_stmmacenet_data *plat_dat;
779 struct stmmac_resources stmmac_res;
780 struct resource *res;
784 + data = of_device_get_match_data(&pdev->dev);
786 memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
789 @@ -138,39 +452,26 @@ static int dwc_eth_dwmac_probe(struct pl
790 if (IS_ERR(plat_dat))
791 return PTR_ERR(plat_dat);
793 - plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
794 - if (IS_ERR(plat_dat->stmmac_clk)) {
795 - dev_err(&pdev->dev, "apb_pclk clock not found.\n");
796 - ret = PTR_ERR(plat_dat->stmmac_clk);
797 - plat_dat->stmmac_clk = NULL;
798 - goto err_remove_config_dt;
799 + priv = data->probe(pdev, plat_dat, &stmmac_res);
800 + if (IS_ERR(priv)) {
801 + ret = PTR_ERR(priv);
802 + dev_err(&pdev->dev, "failed to probe subdriver: %d\n", ret);
803 + goto remove_config;
805 - clk_prepare_enable(plat_dat->stmmac_clk);
807 - plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
808 - if (IS_ERR(plat_dat->pclk)) {
809 - dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
810 - ret = PTR_ERR(plat_dat->pclk);
811 - plat_dat->pclk = NULL;
812 - goto err_out_clk_dis_phy;
814 - clk_prepare_enable(plat_dat->pclk);
816 ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
818 - goto err_out_clk_dis_aper;
821 ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
823 - goto err_out_clk_dis_aper;
829 -err_out_clk_dis_aper:
830 - clk_disable_unprepare(plat_dat->pclk);
831 -err_out_clk_dis_phy:
832 - clk_disable_unprepare(plat_dat->stmmac_clk);
833 -err_remove_config_dt:
835 + data->remove(pdev);
837 stmmac_remove_config_dt(pdev, plat_dat);
840 @@ -178,11 +479,29 @@ err_remove_config_dt:
842 static int dwc_eth_dwmac_remove(struct platform_device *pdev)
844 - return stmmac_pltfr_remove(pdev);
845 + struct net_device *ndev = platform_get_drvdata(pdev);
846 + struct stmmac_priv *priv = netdev_priv(ndev);
847 + const struct dwc_eth_dwmac_data *data;
850 + data = of_device_get_match_data(&pdev->dev);
852 + err = stmmac_dvr_remove(&pdev->dev);
854 + dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
856 + err = data->remove(pdev);
858 + dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
860 + stmmac_remove_config_dt(pdev, priv->plat);
865 static const struct of_device_id dwc_eth_dwmac_match[] = {
866 - { .compatible = "snps,dwc-qos-ethernet-4.10", },
867 + { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
868 + { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
871 MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
872 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
873 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
874 @@ -74,6 +74,10 @@ struct rk_priv_data {
875 #define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
876 #define GRF_CLR_BIT(nr) (BIT(nr+16))
878 +#define DELAY_ENABLE(soc, tx, rx) \
879 + (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
880 + ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
882 #define RK3228_GRF_MAC_CON0 0x0900
883 #define RK3228_GRF_MAC_CON1 0x0904
885 @@ -115,8 +119,7 @@ static void rk3228_set_to_rgmii(struct r
886 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
887 RK3228_GMAC_PHY_INTF_SEL_RGMII |
888 RK3228_GMAC_RMII_MODE_CLR |
889 - RK3228_GMAC_RXCLK_DLY_ENABLE |
890 - RK3228_GMAC_TXCLK_DLY_ENABLE);
891 + DELAY_ENABLE(RK3228, tx_delay, rx_delay));
893 regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
894 RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
895 @@ -232,8 +235,7 @@ static void rk3288_set_to_rgmii(struct r
896 RK3288_GMAC_PHY_INTF_SEL_RGMII |
897 RK3288_GMAC_RMII_MODE_CLR);
898 regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
899 - RK3288_GMAC_RXCLK_DLY_ENABLE |
900 - RK3288_GMAC_TXCLK_DLY_ENABLE |
901 + DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
902 RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
903 RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
905 @@ -460,8 +462,7 @@ static void rk3366_set_to_rgmii(struct r
906 RK3366_GMAC_PHY_INTF_SEL_RGMII |
907 RK3366_GMAC_RMII_MODE_CLR);
908 regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
909 - RK3366_GMAC_RXCLK_DLY_ENABLE |
910 - RK3366_GMAC_TXCLK_DLY_ENABLE |
911 + DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
912 RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
913 RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
915 @@ -572,8 +573,7 @@ static void rk3368_set_to_rgmii(struct r
916 RK3368_GMAC_PHY_INTF_SEL_RGMII |
917 RK3368_GMAC_RMII_MODE_CLR);
918 regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
919 - RK3368_GMAC_RXCLK_DLY_ENABLE |
920 - RK3368_GMAC_TXCLK_DLY_ENABLE |
921 + DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
922 RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
923 RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
925 @@ -684,8 +684,7 @@ static void rk3399_set_to_rgmii(struct r
926 RK3399_GMAC_PHY_INTF_SEL_RGMII |
927 RK3399_GMAC_RMII_MODE_CLR);
928 regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
929 - RK3399_GMAC_RXCLK_DLY_ENABLE |
930 - RK3399_GMAC_TXCLK_DLY_ENABLE |
931 + DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
932 RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
933 RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
935 @@ -985,14 +984,29 @@ static int rk_gmac_powerup(struct rk_pri
939 - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII) {
940 + switch (bsp_priv->phy_iface) {
941 + case PHY_INTERFACE_MODE_RGMII:
942 dev_info(dev, "init for RGMII\n");
943 bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
945 - } else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII) {
947 + case PHY_INTERFACE_MODE_RGMII_ID:
948 + dev_info(dev, "init for RGMII_ID\n");
949 + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
951 + case PHY_INTERFACE_MODE_RGMII_RXID:
952 + dev_info(dev, "init for RGMII_RXID\n");
953 + bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
955 + case PHY_INTERFACE_MODE_RGMII_TXID:
956 + dev_info(dev, "init for RGMII_TXID\n");
957 + bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
959 + case PHY_INTERFACE_MODE_RMII:
960 dev_info(dev, "init for RMII\n");
961 bsp_priv->ops->set_to_rmii(bsp_priv);
965 dev_err(dev, "NO interface defined!\n");
968 @@ -1022,12 +1036,19 @@ static void rk_fix_speed(void *priv, uns
969 struct rk_priv_data *bsp_priv = priv;
970 struct device *dev = &bsp_priv->pdev->dev;
972 - if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RGMII)
973 + switch (bsp_priv->phy_iface) {
974 + case PHY_INTERFACE_MODE_RGMII:
975 + case PHY_INTERFACE_MODE_RGMII_ID:
976 + case PHY_INTERFACE_MODE_RGMII_RXID:
977 + case PHY_INTERFACE_MODE_RGMII_TXID:
978 bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
979 - else if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
981 + case PHY_INTERFACE_MODE_RMII:
982 bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
986 dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
990 static int rk_gmac_probe(struct platform_device *pdev)
991 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
992 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
993 @@ -216,7 +216,8 @@ static void dwmac1000_set_filter(struct
996 static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
997 - unsigned int fc, unsigned int pause_time)
998 + unsigned int fc, unsigned int pause_time,
1001 void __iomem *ioaddr = hw->pcsr;
1002 /* Set flow such that DZPQ in Mac Register 6 is 0,
1003 @@ -412,7 +413,8 @@ static void dwmac1000_get_adv_lp(void __
1004 dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
1007 -static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
1008 +static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
1009 + u32 rx_queues, u32 tx_queues)
1011 u32 value = readl(ioaddr + GMAC_DEBUG);
1013 @@ -488,6 +490,7 @@ static void dwmac1000_debug(void __iomem
1015 static const struct stmmac_ops dwmac1000_ops = {
1016 .core_init = dwmac1000_core_init,
1017 + .set_mac = stmmac_set_mac,
1018 .rx_ipc = dwmac1000_rx_ipc_enable,
1019 .dump_regs = dwmac1000_dump_regs,
1020 .host_irq_status = dwmac1000_irq_status,
1021 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
1022 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
1023 @@ -247,7 +247,8 @@ static void dwmac1000_get_hw_feature(voi
1024 dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1027 -static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt)
1028 +static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
1031 writel(riwt, ioaddr + DMA_RX_WATCHDOG);
1033 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
1034 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
1035 @@ -131,7 +131,8 @@ static void dwmac100_set_filter(struct m
1038 static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
1039 - unsigned int fc, unsigned int pause_time)
1040 + unsigned int fc, unsigned int pause_time,
1043 void __iomem *ioaddr = hw->pcsr;
1044 unsigned int flow = MAC_FLOW_CTRL_ENABLE;
1045 @@ -149,6 +150,7 @@ static void dwmac100_pmt(struct mac_devi
1047 static const struct stmmac_ops dwmac100_ops = {
1048 .core_init = dwmac100_core_init,
1049 + .set_mac = stmmac_set_mac,
1050 .rx_ipc = dwmac100_rx_ipc_enable,
1051 .dump_regs = dwmac100_dump_mac_regs,
1052 .host_irq_status = dwmac100_irq_status,
1053 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1054 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
1056 #define GMAC_HASH_TAB_32_63 0x00000014
1057 #define GMAC_RX_FLOW_CTRL 0x00000090
1058 #define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
1059 +#define GMAC_TXQ_PRTY_MAP0 0x98
1060 +#define GMAC_TXQ_PRTY_MAP1 0x9C
1061 #define GMAC_RXQ_CTRL0 0x000000a0
1062 +#define GMAC_RXQ_CTRL1 0x000000a4
1063 +#define GMAC_RXQ_CTRL2 0x000000a8
1064 +#define GMAC_RXQ_CTRL3 0x000000ac
1065 #define GMAC_INT_STATUS 0x000000b0
1066 #define GMAC_INT_EN 0x000000b4
1067 +#define GMAC_1US_TIC_COUNTER 0x000000dc
1068 #define GMAC_PCS_BASE 0x000000e0
1069 #define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
1070 #define GMAC_PMT 0x000000c0
1072 #define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
1073 #define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
1075 +/* RX Queues Routing */
1076 +#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
1077 +#define GMAC_RXQCTRL_AVCPQ_SHIFT 0
1078 +#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4)
1079 +#define GMAC_RXQCTRL_PTPQ_SHIFT 4
1080 +#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8)
1081 +#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8
1082 +#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12)
1083 +#define GMAC_RXQCTRL_UPQ_SHIFT 12
1084 +#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16)
1085 +#define GMAC_RXQCTRL_MCBCQ_SHIFT 16
1086 +#define GMAC_RXQCTRL_MCBCQEN BIT(20)
1087 +#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
1088 +#define GMAC_RXQCTRL_TACPQE BIT(21)
1089 +#define GMAC_RXQCTRL_TACPQE_SHIFT 21
1091 /* MAC Packet Filtering */
1092 #define GMAC_PACKET_FILTER_PR BIT(0)
1093 #define GMAC_PACKET_FILTER_HMC BIT(2)
1095 /* MAC Flow Control RX */
1096 #define GMAC_RX_FLOW_CTRL_RFE BIT(0)
1098 +/* RX Queues Priorities */
1099 +#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
1100 +#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8)
1102 +/* TX Queues Priorities */
1103 +#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
1104 +#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8)
1106 /* MAC Flow Control TX */
1107 #define GMAC_TX_FLOW_CTRL_TFE BIT(1)
1108 #define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
1109 @@ -148,6 +178,8 @@ enum power_event {
1110 /* MAC HW features1 bitmap */
1111 #define GMAC_HW_FEAT_AVSEL BIT(20)
1112 #define GMAC_HW_TSOEN BIT(18)
1113 +#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
1114 +#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
1116 /* MAC HW features2 bitmap */
1117 #define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
1118 @@ -161,8 +193,25 @@ enum power_event {
1119 #define GMAC_HI_REG_AE BIT(31)
1122 +#define MTL_OPERATION_MODE 0x00000c00
1123 +#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
1124 +#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
1125 +#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
1126 +#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5)
1127 +#define MTL_OPERATION_SCHALG_SP (0x3 << 5)
1128 +#define MTL_OPERATION_RAA BIT(2)
1129 +#define MTL_OPERATION_RAA_SP (0x0 << 2)
1130 +#define MTL_OPERATION_RAA_WSP (0x1 << 2)
1132 #define MTL_INT_STATUS 0x00000c20
1133 -#define MTL_INT_Q0 BIT(0)
1134 +#define MTL_INT_QX(x) BIT(x)
1136 +#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
1137 +#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
1138 +#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
1139 +#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
1140 +#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
1141 +#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
1143 #define MTL_CHAN_BASE_ADDR 0x00000d00
1144 #define MTL_CHAN_BASE_OFFSET 0x40
1145 @@ -180,6 +229,7 @@ enum power_event {
1146 #define MTL_OP_MODE_TSF BIT(1)
1148 #define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
1149 +#define MTL_OP_MODE_TQS_SHIFT 16
1151 #define MTL_OP_MODE_TTC_MASK 0x70
1152 #define MTL_OP_MODE_TTC_SHIFT 4
1153 @@ -193,6 +243,17 @@ enum power_event {
1154 #define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
1155 #define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
1157 +#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
1158 +#define MTL_OP_MODE_RQS_SHIFT 20
1160 +#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
1161 +#define MTL_OP_MODE_RFD_SHIFT 14
1163 +#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
1164 +#define MTL_OP_MODE_RFA_SHIFT 8
1166 +#define MTL_OP_MODE_EHFC BIT(7)
1168 #define MTL_OP_MODE_RTC_MASK 0x18
1169 #define MTL_OP_MODE_RTC_SHIFT 3
1171 @@ -201,6 +262,46 @@ enum power_event {
1172 #define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
1173 #define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
1175 +/* MTL ETS Control register */
1176 +#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
1177 +#define MTL_ETS_CTRL_BASE_OFFSET 0x40
1178 +#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
1179 + ((x) * MTL_ETS_CTRL_BASE_OFFSET))
1181 +#define MTL_ETS_CTRL_CC BIT(3)
1182 +#define MTL_ETS_CTRL_AVALG BIT(2)
1184 +/* MTL Queue Quantum Weight */
1185 +#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
1186 +#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
1187 +#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
1188 + ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
1189 +#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
1191 +/* MTL sendSlopeCredit register */
1192 +#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
1193 +#define MTL_SEND_SLP_CRED_OFFSET 0x40
1194 +#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
1195 + ((x) * MTL_SEND_SLP_CRED_OFFSET))
1197 +#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
1199 +/* MTL hiCredit register */
1200 +#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
1201 +#define MTL_HIGH_CRED_OFFSET 0x40
1202 +#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
1203 + ((x) * MTL_HIGH_CRED_OFFSET))
1205 +#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
1207 +/* MTL loCredit register */
1208 +#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
1209 +#define MTL_LOW_CRED_OFFSET 0x40
1210 +#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
1211 + ((x) * MTL_LOW_CRED_OFFSET))
1213 +#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
1216 #define MTL_DEBUG_TXSTSFSTS BIT(5)
1217 #define MTL_DEBUG_TXFSTS BIT(4)
1218 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1219 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
1220 @@ -59,17 +59,211 @@ static void dwmac4_core_init(struct mac_
1221 writel(value, ioaddr + GMAC_INT_EN);
1224 -static void dwmac4_rx_queue_enable(struct mac_device_info *hw, u32 queue)
1225 +static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
1226 + u8 mode, u32 queue)
1228 void __iomem *ioaddr = hw->pcsr;
1229 u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
1231 value &= GMAC_RX_QUEUE_CLEAR(queue);
1232 - value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
1233 + if (mode == MTL_QUEUE_AVB)
1234 + value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
1235 + else if (mode == MTL_QUEUE_DCB)
1236 + value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
1238 writel(value, ioaddr + GMAC_RXQ_CTRL0);
1241 +static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
1242 + u32 prio, u32 queue)
1244 + void __iomem *ioaddr = hw->pcsr;
1245 + u32 base_register;
1248 + base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
1250 + value = readl(ioaddr + base_register);
1252 + value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
1253 + value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
1254 + GMAC_RXQCTRL_PSRQX_MASK(queue);
1255 + writel(value, ioaddr + base_register);
1258 +static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
1259 + u32 prio, u32 queue)
1261 + void __iomem *ioaddr = hw->pcsr;
1262 + u32 base_register;
1265 + base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
1267 + value = readl(ioaddr + base_register);
1269 + value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
1270 + value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
1271 + GMAC_TXQCTRL_PSTQX_MASK(queue);
1273 + writel(value, ioaddr + base_register);
1276 +static void dwmac4_tx_queue_routing(struct mac_device_info *hw,
1277 + u8 packet, u32 queue)
1279 + void __iomem *ioaddr = hw->pcsr;
1282 + const struct stmmac_rx_routing route_possibilities[] = {
1283 + { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
1284 + { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
1285 + { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
1286 + { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
1287 + { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
1290 + value = readl(ioaddr + GMAC_RXQ_CTRL1);
1292 + /* routing configuration */
1293 + value &= ~route_possibilities[packet - 1].reg_mask;
1294 + value |= (queue << route_possibilities[packet-1].reg_shift) &
1295 + route_possibilities[packet - 1].reg_mask;
1297 + /* some packets require extra ops */
1298 + if (packet == PACKET_AVCPQ) {
1299 + value &= ~GMAC_RXQCTRL_TACPQE;
1300 + value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
1301 + } else if (packet == PACKET_MCBCQ) {
1302 + value &= ~GMAC_RXQCTRL_MCBCQEN;
1303 + value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
1306 + writel(value, ioaddr + GMAC_RXQ_CTRL1);
1309 +static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
1312 + void __iomem *ioaddr = hw->pcsr;
1313 + u32 value = readl(ioaddr + MTL_OPERATION_MODE);
1315 + value &= ~MTL_OPERATION_RAA;
1317 + case MTL_RX_ALGORITHM_SP:
1318 + value |= MTL_OPERATION_RAA_SP;
1320 + case MTL_RX_ALGORITHM_WSP:
1321 + value |= MTL_OPERATION_RAA_WSP;
1327 + writel(value, ioaddr + MTL_OPERATION_MODE);
1330 +static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
1333 + void __iomem *ioaddr = hw->pcsr;
1334 + u32 value = readl(ioaddr + MTL_OPERATION_MODE);
1336 + value &= ~MTL_OPERATION_SCHALG_MASK;
1338 + case MTL_TX_ALGORITHM_WRR:
1339 + value |= MTL_OPERATION_SCHALG_WRR;
1341 + case MTL_TX_ALGORITHM_WFQ:
1342 + value |= MTL_OPERATION_SCHALG_WFQ;
1344 + case MTL_TX_ALGORITHM_DWRR:
1345 + value |= MTL_OPERATION_SCHALG_DWRR;
1347 + case MTL_TX_ALGORITHM_SP:
1348 + value |= MTL_OPERATION_SCHALG_SP;
1355 +static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
1356 + u32 weight, u32 queue)
1358 + void __iomem *ioaddr = hw->pcsr;
1359 + u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
1361 + value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
1362 + value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
1363 + writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
1366 +static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
1368 + void __iomem *ioaddr = hw->pcsr;
1372 + value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
1374 + value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
1376 + if (queue == 0 || queue == 4) {
1377 + value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
1378 + value |= MTL_RXQ_DMA_Q04MDMACH(chan);
1380 + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
1381 + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
1385 + writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
1387 + writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
1390 +static void dwmac4_config_cbs(struct mac_device_info *hw,
1391 + u32 send_slope, u32 idle_slope,
1392 + u32 high_credit, u32 low_credit, u32 queue)
1394 + void __iomem *ioaddr = hw->pcsr;
1397 + pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
1398 + pr_debug("\tsend_slope: 0x%08x\n", send_slope);
1399 + pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
1400 + pr_debug("\thigh_credit: 0x%08x\n", high_credit);
1401 + pr_debug("\tlow_credit: 0x%08x\n", low_credit);
1403 + /* enable AV algorithm */
1404 + value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
1405 + value |= MTL_ETS_CTRL_AVALG;
1406 + value |= MTL_ETS_CTRL_CC;
1407 + writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
1409 + /* configure send slope */
1410 + value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
1411 + value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
1412 + value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
1413 + writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
1415 + /* configure idle slope (same register as tx weight) */
1416 + dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
1418 + /* configure high credit */
1419 + value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
1420 + value &= ~MTL_HIGH_CRED_HC_MASK;
1421 + value |= high_credit & MTL_HIGH_CRED_HC_MASK;
1422 + writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
1424 + /* configure high credit */
1425 + value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
1426 + value &= ~MTL_HIGH_CRED_LC_MASK;
1427 + value |= low_credit & MTL_HIGH_CRED_LC_MASK;
1428 + writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
1431 static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
1433 void __iomem *ioaddr = hw->pcsr;
1434 @@ -251,11 +445,12 @@ static void dwmac4_set_filter(struct mac
1437 static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
1438 - unsigned int fc, unsigned int pause_time)
1439 + unsigned int fc, unsigned int pause_time,
1442 void __iomem *ioaddr = hw->pcsr;
1443 - u32 channel = STMMAC_CHAN0; /* FIXME */
1444 unsigned int flow = 0;
1447 pr_debug("GMAC Flow-Control:\n");
1449 @@ -265,13 +460,18 @@ static void dwmac4_flow_ctrl(struct mac_
1452 pr_debug("\tTransmit Flow-Control ON\n");
1453 - flow |= GMAC_TX_FLOW_CTRL_TFE;
1454 - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
1458 pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
1459 - flow |= (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
1460 - writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(channel));
1462 + for (queue = 0; queue < tx_cnt; queue++) {
1463 + flow |= GMAC_TX_FLOW_CTRL_TFE;
1467 + (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
1469 + writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
1473 @@ -325,11 +525,34 @@ static void dwmac4_phystatus(void __iome
1477 +static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
1479 + void __iomem *ioaddr = hw->pcsr;
1480 + u32 mtl_int_qx_status;
1483 + mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
1485 + /* Check MTL Interrupt */
1486 + if (mtl_int_qx_status & MTL_INT_QX(chan)) {
1487 + /* read Queue x Interrupt status */
1488 + u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
1490 + if (status & MTL_RX_OVERFLOW_INT) {
1491 + /* clear Interrupt */
1492 + writel(status | MTL_RX_OVERFLOW_INT,
1493 + ioaddr + MTL_CHAN_INT_CTRL(chan));
1494 + ret = CORE_IRQ_MTL_RX_OVERFLOW;
1501 static int dwmac4_irq_status(struct mac_device_info *hw,
1502 struct stmmac_extra_stats *x)
1504 void __iomem *ioaddr = hw->pcsr;
1505 - u32 mtl_int_qx_status;
1509 @@ -348,20 +571,6 @@ static int dwmac4_irq_status(struct mac_
1510 x->irq_receive_pmt_irq_n++;
1513 - mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
1514 - /* Check MTL Interrupt: Currently only one queue is used: Q0. */
1515 - if (mtl_int_qx_status & MTL_INT_Q0) {
1516 - /* read Queue 0 Interrupt status */
1517 - u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
1519 - if (status & MTL_RX_OVERFLOW_INT) {
1520 - /* clear Interrupt */
1521 - writel(status | MTL_RX_OVERFLOW_INT,
1522 - ioaddr + MTL_CHAN_INT_CTRL(STMMAC_CHAN0));
1523 - ret = CORE_IRQ_MTL_RX_OVERFLOW;
1527 dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
1528 if (intr_status & PCS_RGSMIIIS_IRQ)
1529 dwmac4_phystatus(ioaddr, x);
1530 @@ -369,64 +578,69 @@ static int dwmac4_irq_status(struct mac_
1534 -static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x)
1535 +static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
1536 + u32 rx_queues, u32 tx_queues)
1541 - /* Currently only channel 0 is supported */
1542 - value = readl(ioaddr + MTL_CHAN_TX_DEBUG(STMMAC_CHAN0));
1543 + for (queue = 0; queue < tx_queues; queue++) {
1544 + value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
1546 - if (value & MTL_DEBUG_TXSTSFSTS)
1547 - x->mtl_tx_status_fifo_full++;
1548 - if (value & MTL_DEBUG_TXFSTS)
1549 - x->mtl_tx_fifo_not_empty++;
1550 - if (value & MTL_DEBUG_TWCSTS)
1551 - x->mmtl_fifo_ctrl++;
1552 - if (value & MTL_DEBUG_TRCSTS_MASK) {
1553 - u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
1554 - >> MTL_DEBUG_TRCSTS_SHIFT;
1555 - if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
1556 - x->mtl_tx_fifo_read_ctrl_write++;
1557 - else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
1558 - x->mtl_tx_fifo_read_ctrl_wait++;
1559 - else if (trcsts == MTL_DEBUG_TRCSTS_READ)
1560 - x->mtl_tx_fifo_read_ctrl_read++;
1562 - x->mtl_tx_fifo_read_ctrl_idle++;
1563 + if (value & MTL_DEBUG_TXSTSFSTS)
1564 + x->mtl_tx_status_fifo_full++;
1565 + if (value & MTL_DEBUG_TXFSTS)
1566 + x->mtl_tx_fifo_not_empty++;
1567 + if (value & MTL_DEBUG_TWCSTS)
1568 + x->mmtl_fifo_ctrl++;
1569 + if (value & MTL_DEBUG_TRCSTS_MASK) {
1570 + u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
1571 + >> MTL_DEBUG_TRCSTS_SHIFT;
1572 + if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
1573 + x->mtl_tx_fifo_read_ctrl_write++;
1574 + else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
1575 + x->mtl_tx_fifo_read_ctrl_wait++;
1576 + else if (trcsts == MTL_DEBUG_TRCSTS_READ)
1577 + x->mtl_tx_fifo_read_ctrl_read++;
1579 + x->mtl_tx_fifo_read_ctrl_idle++;
1581 + if (value & MTL_DEBUG_TXPAUSED)
1582 + x->mac_tx_in_pause++;
1584 - if (value & MTL_DEBUG_TXPAUSED)
1585 - x->mac_tx_in_pause++;
1587 - value = readl(ioaddr + MTL_CHAN_RX_DEBUG(STMMAC_CHAN0));
1588 + for (queue = 0; queue < rx_queues; queue++) {
1589 + value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
1591 - if (value & MTL_DEBUG_RXFSTS_MASK) {
1592 - u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
1593 - >> MTL_DEBUG_RRCSTS_SHIFT;
1595 - if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
1596 - x->mtl_rx_fifo_fill_level_full++;
1597 - else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
1598 - x->mtl_rx_fifo_fill_above_thresh++;
1599 - else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
1600 - x->mtl_rx_fifo_fill_below_thresh++;
1602 - x->mtl_rx_fifo_fill_level_empty++;
1604 - if (value & MTL_DEBUG_RRCSTS_MASK) {
1605 - u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
1606 - MTL_DEBUG_RRCSTS_SHIFT;
1608 - if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
1609 - x->mtl_rx_fifo_read_ctrl_flush++;
1610 - else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
1611 - x->mtl_rx_fifo_read_ctrl_read_data++;
1612 - else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
1613 - x->mtl_rx_fifo_read_ctrl_status++;
1615 - x->mtl_rx_fifo_read_ctrl_idle++;
1616 + if (value & MTL_DEBUG_RXFSTS_MASK) {
1617 + u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
1618 + >> MTL_DEBUG_RRCSTS_SHIFT;
1620 + if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
1621 + x->mtl_rx_fifo_fill_level_full++;
1622 + else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
1623 + x->mtl_rx_fifo_fill_above_thresh++;
1624 + else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
1625 + x->mtl_rx_fifo_fill_below_thresh++;
1627 + x->mtl_rx_fifo_fill_level_empty++;
1629 + if (value & MTL_DEBUG_RRCSTS_MASK) {
1630 + u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
1631 + MTL_DEBUG_RRCSTS_SHIFT;
1633 + if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
1634 + x->mtl_rx_fifo_read_ctrl_flush++;
1635 + else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
1636 + x->mtl_rx_fifo_read_ctrl_read_data++;
1637 + else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
1638 + x->mtl_rx_fifo_read_ctrl_status++;
1640 + x->mtl_rx_fifo_read_ctrl_idle++;
1642 + if (value & MTL_DEBUG_RWCSTS)
1643 + x->mtl_rx_fifo_ctrl_active++;
1645 - if (value & MTL_DEBUG_RWCSTS)
1646 - x->mtl_rx_fifo_ctrl_active++;
1649 value = readl(ioaddr + GMAC_DEBUG);
1650 @@ -455,10 +669,51 @@ static void dwmac4_debug(void __iomem *i
1652 static const struct stmmac_ops dwmac4_ops = {
1653 .core_init = dwmac4_core_init,
1654 + .set_mac = stmmac_set_mac,
1655 .rx_ipc = dwmac4_rx_ipc_enable,
1656 .rx_queue_enable = dwmac4_rx_queue_enable,
1657 + .rx_queue_prio = dwmac4_rx_queue_priority,
1658 + .tx_queue_prio = dwmac4_tx_queue_priority,
1659 + .rx_queue_routing = dwmac4_tx_queue_routing,
1660 + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1661 + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1662 + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1663 + .map_mtl_to_dma = dwmac4_map_mtl_dma,
1664 + .config_cbs = dwmac4_config_cbs,
1665 .dump_regs = dwmac4_dump_regs,
1666 .host_irq_status = dwmac4_irq_status,
1667 + .host_mtl_irq_status = dwmac4_irq_mtl_status,
1668 + .flow_ctrl = dwmac4_flow_ctrl,
1669 + .pmt = dwmac4_pmt,
1670 + .set_umac_addr = dwmac4_set_umac_addr,
1671 + .get_umac_addr = dwmac4_get_umac_addr,
1672 + .set_eee_mode = dwmac4_set_eee_mode,
1673 + .reset_eee_mode = dwmac4_reset_eee_mode,
1674 + .set_eee_timer = dwmac4_set_eee_timer,
1675 + .set_eee_pls = dwmac4_set_eee_pls,
1676 + .pcs_ctrl_ane = dwmac4_ctrl_ane,
1677 + .pcs_rane = dwmac4_rane,
1678 + .pcs_get_adv_lp = dwmac4_get_adv_lp,
1679 + .debug = dwmac4_debug,
1680 + .set_filter = dwmac4_set_filter,
1683 +static const struct stmmac_ops dwmac410_ops = {
1684 + .core_init = dwmac4_core_init,
1685 + .set_mac = stmmac_dwmac4_set_mac,
1686 + .rx_ipc = dwmac4_rx_ipc_enable,
1687 + .rx_queue_enable = dwmac4_rx_queue_enable,
1688 + .rx_queue_prio = dwmac4_rx_queue_priority,
1689 + .tx_queue_prio = dwmac4_tx_queue_priority,
1690 + .rx_queue_routing = dwmac4_tx_queue_routing,
1691 + .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
1692 + .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
1693 + .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
1694 + .map_mtl_to_dma = dwmac4_map_mtl_dma,
1695 + .config_cbs = dwmac4_config_cbs,
1696 + .dump_regs = dwmac4_dump_regs,
1697 + .host_irq_status = dwmac4_irq_status,
1698 + .host_mtl_irq_status = dwmac4_irq_mtl_status,
1699 .flow_ctrl = dwmac4_flow_ctrl,
1701 .set_umac_addr = dwmac4_set_umac_addr,
1702 @@ -492,8 +747,6 @@ struct mac_device_info *dwmac4_setup(voi
1703 if (mac->multicast_filter_bins)
1704 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1706 - mac->mac = &dwmac4_ops;
1708 mac->link.port = GMAC_CONFIG_PS;
1709 mac->link.duplex = GMAC_CONFIG_DM;
1710 mac->link.speed = GMAC_CONFIG_FES;
1711 @@ -514,5 +767,10 @@ struct mac_device_info *dwmac4_setup(voi
1713 mac->dma = &dwmac4_dma_ops;
1715 + if (*synopsys_id >= DWMAC_CORE_4_00)
1716 + mac->mac = &dwmac410_ops;
1718 + mac->mac = &dwmac4_ops;
1722 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1723 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
1724 @@ -214,13 +214,13 @@ static int dwmac4_wrback_get_tx_timestam
1726 /* Context type from W/B descriptor must be zero */
1727 if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
1731 /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
1732 if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
1740 static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
1741 @@ -282,7 +282,10 @@ static int dwmac4_wrback_get_rx_timestam
1746 + if (likely(ret == 0))
1752 static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
1753 @@ -304,12 +307,13 @@ static void dwmac4_rd_init_tx_desc(struc
1755 static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
1756 bool csum_flag, int mode, bool tx_own,
1758 + bool ls, unsigned int tot_pkt_len)
1760 unsigned int tdes3 = le32_to_cpu(p->des3);
1762 p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
1764 + tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
1766 tdes3 |= TDES3_FIRST_DESCRIPTOR;
1768 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1769 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
1770 @@ -71,36 +71,48 @@ static void dwmac4_dma_axi(void __iomem
1771 writel(value, ioaddr + DMA_SYS_BUS_MODE);
1774 -static void dwmac4_dma_init_channel(void __iomem *ioaddr,
1775 - struct stmmac_dma_cfg *dma_cfg,
1776 - u32 dma_tx_phy, u32 dma_rx_phy,
1778 +void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
1779 + struct stmmac_dma_cfg *dma_cfg,
1780 + u32 dma_rx_phy, u32 chan)
1783 - int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
1784 - int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
1785 + u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
1787 - /* set PBL for each channels. Currently we affect same configuration
1790 - value = readl(ioaddr + DMA_CHAN_CONTROL(channel));
1791 - if (dma_cfg->pblx8)
1792 - value = value | DMA_BUS_MODE_PBL;
1793 - writel(value, ioaddr + DMA_CHAN_CONTROL(channel));
1794 + value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
1795 + value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
1796 + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
1798 + writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
1801 - value = readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
1802 +void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
1803 + struct stmmac_dma_cfg *dma_cfg,
1804 + u32 dma_tx_phy, u32 chan)
1807 + u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
1809 + value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
1810 value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
1811 - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(channel));
1812 + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
1814 - value = readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
1815 - value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
1816 - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(channel));
1817 + writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
1820 - /* Mask interrupts by writing to CSR7 */
1821 - writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr + DMA_CHAN_INTR_ENA(channel));
1822 +void dwmac4_dma_init_channel(void __iomem *ioaddr,
1823 + struct stmmac_dma_cfg *dma_cfg, u32 chan)
1827 + /* common channel control register config */
1828 + value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
1829 + if (dma_cfg->pblx8)
1830 + value = value | DMA_BUS_MODE_PBL;
1831 + writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
1833 - writel(dma_tx_phy, ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
1834 - writel(dma_rx_phy, ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
1835 + /* Mask interrupts by writing to CSR7 */
1836 + writel(DMA_CHAN_INTR_DEFAULT_MASK,
1837 + ioaddr + DMA_CHAN_INTR_ENA(chan));
1840 static void dwmac4_dma_init(void __iomem *ioaddr,
1841 @@ -108,7 +120,6 @@ static void dwmac4_dma_init(void __iomem
1842 u32 dma_tx, u32 dma_rx, int atds)
1844 u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
1847 /* Set the Fixed burst mode */
1848 if (dma_cfg->fixed_burst)
1849 @@ -122,9 +133,6 @@ static void dwmac4_dma_init(void __iomem
1850 value |= DMA_SYS_BUS_AAL;
1852 writel(value, ioaddr + DMA_SYS_BUS_MODE);
1854 - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
1855 - dwmac4_dma_init_channel(ioaddr, dma_cfg, dma_tx, dma_rx, i);
1858 static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
1859 @@ -174,46 +182,121 @@ static void dwmac4_dump_dma_regs(void __
1860 _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
1863 -static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt)
1864 +static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 number_chan)
1869 - for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
1870 - writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(i));
1871 + for (chan = 0; chan < number_chan; chan++)
1872 + writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(chan));
1875 -static void dwmac4_dma_chan_op_mode(void __iomem *ioaddr, int txmode,
1876 - int rxmode, u32 channel)
1877 +static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
1878 + u32 channel, int fifosz)
1880 - u32 mtl_tx_op, mtl_rx_op, mtl_rx_int;
1881 + unsigned int rqs = fifosz / 256 - 1;
1882 + u32 mtl_rx_op, mtl_rx_int;
1884 - /* Following code only done for channel 0, other channels not yet
1887 - mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
1888 + mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
1890 + if (mode == SF_DMA_MODE) {
1891 + pr_debug("GMAC: enable RX store and forward mode\n");
1892 + mtl_rx_op |= MTL_OP_MODE_RSF;
1894 + pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
1895 + mtl_rx_op &= ~MTL_OP_MODE_RSF;
1896 + mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
1898 + mtl_rx_op |= MTL_OP_MODE_RTC_32;
1899 + else if (mode <= 64)
1900 + mtl_rx_op |= MTL_OP_MODE_RTC_64;
1901 + else if (mode <= 96)
1902 + mtl_rx_op |= MTL_OP_MODE_RTC_96;
1904 + mtl_rx_op |= MTL_OP_MODE_RTC_128;
1907 + mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
1908 + mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
1910 + /* enable flow control only if each channel gets 4 KiB or more FIFO */
1911 + if (fifosz >= 4096) {
1912 + unsigned int rfd, rfa;
1914 + mtl_rx_op |= MTL_OP_MODE_EHFC;
1916 + /* Set Threshold for Activating Flow Control to min 2 frames,
1917 + * i.e. 1500 * 2 = 3000 bytes.
1919 + * Set Threshold for Deactivating Flow Control to min 1 frame,
1920 + * i.e. 1500 bytes.
1924 + /* This violates the above formula because of FIFO size
1925 + * limit therefore overflow may occur in spite of this.
1927 + rfd = 0x03; /* Full-2.5K */
1928 + rfa = 0x01; /* Full-1.5K */
1932 + rfd = 0x06; /* Full-4K */
1933 + rfa = 0x0a; /* Full-6K */
1937 + rfd = 0x06; /* Full-4K */
1938 + rfa = 0x12; /* Full-10K */
1942 + rfd = 0x06; /* Full-4K */
1943 + rfa = 0x1e; /* Full-16K */
1947 - if (txmode == SF_DMA_MODE) {
1948 + mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
1949 + mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
1951 + mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
1952 + mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
1955 + writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
1957 + /* Enable MTL RX overflow */
1958 + mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
1959 + writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
1960 + ioaddr + MTL_CHAN_INT_CTRL(channel));
1963 +static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
1966 + u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
1968 + if (mode == SF_DMA_MODE) {
1969 pr_debug("GMAC: enable TX store and forward mode\n");
1970 /* Transmit COE type 2 cannot be done in cut-through mode. */
1971 mtl_tx_op |= MTL_OP_MODE_TSF;
1973 - pr_debug("GMAC: disabling TX SF (threshold %d)\n", txmode);
1974 + pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
1975 mtl_tx_op &= ~MTL_OP_MODE_TSF;
1976 mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
1977 /* Set the transmit threshold */
1980 mtl_tx_op |= MTL_OP_MODE_TTC_32;
1981 - else if (txmode <= 64)
1982 + else if (mode <= 64)
1983 mtl_tx_op |= MTL_OP_MODE_TTC_64;
1984 - else if (txmode <= 96)
1985 + else if (mode <= 96)
1986 mtl_tx_op |= MTL_OP_MODE_TTC_96;
1987 - else if (txmode <= 128)
1988 + else if (mode <= 128)
1989 mtl_tx_op |= MTL_OP_MODE_TTC_128;
1990 - else if (txmode <= 192)
1991 + else if (mode <= 192)
1992 mtl_tx_op |= MTL_OP_MODE_TTC_192;
1993 - else if (txmode <= 256)
1994 + else if (mode <= 256)
1995 mtl_tx_op |= MTL_OP_MODE_TTC_256;
1996 - else if (txmode <= 384)
1997 + else if (mode <= 384)
1998 mtl_tx_op |= MTL_OP_MODE_TTC_384;
2000 mtl_tx_op |= MTL_OP_MODE_TTC_512;
2001 @@ -230,39 +313,6 @@ static void dwmac4_dma_chan_op_mode(void
2003 mtl_tx_op |= MTL_OP_MODE_TXQEN | MTL_OP_MODE_TQS_MASK;
2004 writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
2006 - mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
2008 - if (rxmode == SF_DMA_MODE) {
2009 - pr_debug("GMAC: enable RX store and forward mode\n");
2010 - mtl_rx_op |= MTL_OP_MODE_RSF;
2012 - pr_debug("GMAC: disable RX SF mode (threshold %d)\n", rxmode);
2013 - mtl_rx_op &= ~MTL_OP_MODE_RSF;
2014 - mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
2016 - mtl_rx_op |= MTL_OP_MODE_RTC_32;
2017 - else if (rxmode <= 64)
2018 - mtl_rx_op |= MTL_OP_MODE_RTC_64;
2019 - else if (rxmode <= 96)
2020 - mtl_rx_op |= MTL_OP_MODE_RTC_96;
2022 - mtl_rx_op |= MTL_OP_MODE_RTC_128;
2025 - writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
2027 - /* Enable MTL RX overflow */
2028 - mtl_rx_int = readl(ioaddr + MTL_CHAN_INT_CTRL(channel));
2029 - writel(mtl_rx_int | MTL_RX_OVERFLOW_INT_EN,
2030 - ioaddr + MTL_CHAN_INT_CTRL(channel));
2033 -static void dwmac4_dma_operation_mode(void __iomem *ioaddr, int txmode,
2034 - int rxmode, int rxfifosz)
2036 - /* Only Channel 0 is actually configured and used */
2037 - dwmac4_dma_chan_op_mode(ioaddr, txmode, rxmode, 0);
2040 static void dwmac4_get_hw_feature(void __iomem *ioaddr,
2041 @@ -294,6 +344,11 @@ static void dwmac4_get_hw_feature(void _
2042 hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
2043 dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
2044 dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
2045 + /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
2046 + * shifting and store the sizes in bytes.
2048 + dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
2049 + dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
2050 /* MAC HW feature2 */
2051 hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
2052 /* TX and RX number of channels */
2053 @@ -332,9 +387,13 @@ static void dwmac4_enable_tso(void __iom
2054 const struct stmmac_dma_ops dwmac4_dma_ops = {
2055 .reset = dwmac4_dma_reset,
2056 .init = dwmac4_dma_init,
2057 + .init_chan = dwmac4_dma_init_channel,
2058 + .init_rx_chan = dwmac4_dma_init_rx_chan,
2059 + .init_tx_chan = dwmac4_dma_init_tx_chan,
2060 .axi = dwmac4_dma_axi,
2061 .dump_regs = dwmac4_dump_dma_regs,
2062 - .dma_mode = dwmac4_dma_operation_mode,
2063 + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
2064 + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
2065 .enable_dma_irq = dwmac4_enable_dma_irq,
2066 .disable_dma_irq = dwmac4_disable_dma_irq,
2067 .start_tx = dwmac4_dma_start_tx,
2068 @@ -354,9 +413,13 @@ const struct stmmac_dma_ops dwmac4_dma_o
2069 const struct stmmac_dma_ops dwmac410_dma_ops = {
2070 .reset = dwmac4_dma_reset,
2071 .init = dwmac4_dma_init,
2072 + .init_chan = dwmac4_dma_init_channel,
2073 + .init_rx_chan = dwmac4_dma_init_rx_chan,
2074 + .init_tx_chan = dwmac4_dma_init_tx_chan,
2075 .axi = dwmac4_dma_axi,
2076 .dump_regs = dwmac4_dump_dma_regs,
2077 - .dma_mode = dwmac4_dma_operation_mode,
2078 + .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
2079 + .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
2080 .enable_dma_irq = dwmac410_enable_dma_irq,
2081 .disable_dma_irq = dwmac4_disable_dma_irq,
2082 .start_tx = dwmac4_dma_start_tx,
2083 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
2084 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
2085 @@ -185,17 +185,17 @@
2087 int dwmac4_dma_reset(void __iomem *ioaddr);
2088 void dwmac4_enable_dma_transmission(void __iomem *ioaddr, u32 tail_ptr);
2089 -void dwmac4_enable_dma_irq(void __iomem *ioaddr);
2090 -void dwmac410_enable_dma_irq(void __iomem *ioaddr);
2091 -void dwmac4_disable_dma_irq(void __iomem *ioaddr);
2092 -void dwmac4_dma_start_tx(void __iomem *ioaddr);
2093 -void dwmac4_dma_stop_tx(void __iomem *ioaddr);
2094 -void dwmac4_dma_start_rx(void __iomem *ioaddr);
2095 -void dwmac4_dma_stop_rx(void __iomem *ioaddr);
2096 +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan);
2097 +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan);
2098 +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan);
2099 +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
2100 +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
2101 +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
2102 +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
2103 int dwmac4_dma_interrupt(void __iomem *ioaddr,
2104 - struct stmmac_extra_stats *x);
2105 -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len);
2106 -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len);
2107 + struct stmmac_extra_stats *x, u32 chan);
2108 +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
2109 +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
2110 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
2111 void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
2113 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
2114 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
2115 @@ -37,96 +37,96 @@ int dwmac4_dma_reset(void __iomem *ioadd
2117 void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
2119 - writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(0));
2120 + writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
2123 void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
2125 - writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(0));
2126 + writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
2129 -void dwmac4_dma_start_tx(void __iomem *ioaddr)
2130 +void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
2132 - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2133 + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
2135 value |= DMA_CONTROL_ST;
2136 - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2137 + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
2139 value = readl(ioaddr + GMAC_CONFIG);
2140 value |= GMAC_CONFIG_TE;
2141 writel(value, ioaddr + GMAC_CONFIG);
2144 -void dwmac4_dma_stop_tx(void __iomem *ioaddr)
2145 +void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
2147 - u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2148 + u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
2150 value &= ~DMA_CONTROL_ST;
2151 - writel(value, ioaddr + DMA_CHAN_TX_CONTROL(STMMAC_CHAN0));
2152 + writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
2154 value = readl(ioaddr + GMAC_CONFIG);
2155 value &= ~GMAC_CONFIG_TE;
2156 writel(value, ioaddr + GMAC_CONFIG);
2159 -void dwmac4_dma_start_rx(void __iomem *ioaddr)
2160 +void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
2162 - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2163 + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
2165 value |= DMA_CONTROL_SR;
2167 - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2168 + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
2170 value = readl(ioaddr + GMAC_CONFIG);
2171 value |= GMAC_CONFIG_RE;
2172 writel(value, ioaddr + GMAC_CONFIG);
2175 -void dwmac4_dma_stop_rx(void __iomem *ioaddr)
2176 +void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
2178 - u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2179 + u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
2181 value &= ~DMA_CONTROL_SR;
2182 - writel(value, ioaddr + DMA_CHAN_RX_CONTROL(STMMAC_CHAN0));
2183 + writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
2185 value = readl(ioaddr + GMAC_CONFIG);
2186 value &= ~GMAC_CONFIG_RE;
2187 writel(value, ioaddr + GMAC_CONFIG);
2190 -void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len)
2191 +void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
2193 - writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(STMMAC_CHAN0));
2194 + writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
2197 -void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len)
2198 +void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
2200 - writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(STMMAC_CHAN0));
2201 + writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
2204 -void dwmac4_enable_dma_irq(void __iomem *ioaddr)
2205 +void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan)
2207 writel(DMA_CHAN_INTR_DEFAULT_MASK, ioaddr +
2208 - DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2209 + DMA_CHAN_INTR_ENA(chan));
2212 -void dwmac410_enable_dma_irq(void __iomem *ioaddr)
2213 +void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan)
2215 writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
2216 - ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2217 + ioaddr + DMA_CHAN_INTR_ENA(chan));
2220 -void dwmac4_disable_dma_irq(void __iomem *ioaddr)
2221 +void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan)
2223 - writel(0, ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2224 + writel(0, ioaddr + DMA_CHAN_INTR_ENA(chan));
2227 int dwmac4_dma_interrupt(void __iomem *ioaddr,
2228 - struct stmmac_extra_stats *x)
2229 + struct stmmac_extra_stats *x, u32 chan)
2233 - u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(0));
2234 + u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
2236 /* ABNORMAL interrupts */
2237 if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
2238 @@ -153,7 +153,7 @@ int dwmac4_dma_interrupt(void __iomem *i
2239 if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
2242 - value = readl(ioaddr + DMA_CHAN_INTR_ENA(STMMAC_CHAN0));
2243 + value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
2244 /* to schedule NAPI on real RIE event. */
2245 if (likely(value & DMA_CHAN_INTR_ENA_RIE)) {
2246 x->rx_normal_irq_n++;
2247 @@ -172,7 +172,7 @@ int dwmac4_dma_interrupt(void __iomem *i
2248 * status [21-0] expect reserved bits [5-3]
2250 writel((intr_status & 0x3fffc7),
2251 - ioaddr + DMA_CHAN_STATUS(STMMAC_CHAN0));
2252 + ioaddr + DMA_CHAN_STATUS(chan));
2256 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
2257 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
2258 @@ -137,13 +137,14 @@
2259 #define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
2261 void dwmac_enable_dma_transmission(void __iomem *ioaddr);
2262 -void dwmac_enable_dma_irq(void __iomem *ioaddr);
2263 -void dwmac_disable_dma_irq(void __iomem *ioaddr);
2264 -void dwmac_dma_start_tx(void __iomem *ioaddr);
2265 -void dwmac_dma_stop_tx(void __iomem *ioaddr);
2266 -void dwmac_dma_start_rx(void __iomem *ioaddr);
2267 -void dwmac_dma_stop_rx(void __iomem *ioaddr);
2268 -int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x);
2269 +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan);
2270 +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan);
2271 +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
2272 +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
2273 +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
2274 +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
2275 +int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
2277 int dwmac_dma_reset(void __iomem *ioaddr);
2279 #endif /* __DWMAC_DMA_H__ */
2280 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
2281 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
2282 @@ -47,38 +47,38 @@ void dwmac_enable_dma_transmission(void
2283 writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
2286 -void dwmac_enable_dma_irq(void __iomem *ioaddr)
2287 +void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan)
2289 writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
2292 -void dwmac_disable_dma_irq(void __iomem *ioaddr)
2293 +void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan)
2295 writel(0, ioaddr + DMA_INTR_ENA);
2298 -void dwmac_dma_start_tx(void __iomem *ioaddr)
2299 +void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
2301 u32 value = readl(ioaddr + DMA_CONTROL);
2302 value |= DMA_CONTROL_ST;
2303 writel(value, ioaddr + DMA_CONTROL);
2306 -void dwmac_dma_stop_tx(void __iomem *ioaddr)
2307 +void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
2309 u32 value = readl(ioaddr + DMA_CONTROL);
2310 value &= ~DMA_CONTROL_ST;
2311 writel(value, ioaddr + DMA_CONTROL);
2314 -void dwmac_dma_start_rx(void __iomem *ioaddr)
2315 +void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
2317 u32 value = readl(ioaddr + DMA_CONTROL);
2318 value |= DMA_CONTROL_SR;
2319 writel(value, ioaddr + DMA_CONTROL);
2322 -void dwmac_dma_stop_rx(void __iomem *ioaddr)
2323 +void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
2325 u32 value = readl(ioaddr + DMA_CONTROL);
2326 value &= ~DMA_CONTROL_SR;
2327 @@ -156,7 +156,7 @@ static void show_rx_process_state(unsign
2330 int dwmac_dma_interrupt(void __iomem *ioaddr,
2331 - struct stmmac_extra_stats *x)
2332 + struct stmmac_extra_stats *x, u32 chan)
2335 /* read the status register (CSR5) */
2336 --- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2337 +++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
2338 @@ -315,7 +315,7 @@ static void enh_desc_release_tx_desc(str
2340 static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
2341 bool csum_flag, int mode, bool tx_own,
2343 + bool ls, unsigned int tot_pkt_len)
2345 unsigned int tdes0 = le32_to_cpu(p->des0);
2347 --- a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2348 +++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
2349 @@ -191,7 +191,7 @@ static void ndesc_release_tx_desc(struct
2351 static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
2352 bool csum_flag, int mode, bool tx_own,
2354 + bool ls, unsigned int tot_pkt_len)
2356 unsigned int tdes1 = le32_to_cpu(p->des1);
2358 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
2359 +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
2362 static int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum)
2364 - struct stmmac_priv *priv = (struct stmmac_priv *)p;
2365 - unsigned int entry = priv->cur_tx;
2366 - struct dma_desc *desc;
2367 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
2368 unsigned int nopaged_len = skb_headlen(skb);
2369 + struct stmmac_priv *priv = tx_q->priv_data;
2370 + unsigned int entry = tx_q->cur_tx;
2371 unsigned int bmax, len, des2;
2372 + struct dma_desc *desc;
2374 if (priv->extend_desc)
2375 - desc = (struct dma_desc *)(priv->dma_etx + entry);
2376 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2378 - desc = priv->dma_tx + entry;
2379 + desc = tx_q->dma_tx + entry;
2381 if (priv->plat->enh_desc)
2382 bmax = BUF_SIZE_8KiB;
2383 @@ -52,48 +53,51 @@ static int stmmac_jumbo_frm(void *p, str
2384 if (dma_mapping_error(priv->device, des2))
2387 - priv->tx_skbuff_dma[entry].buf = des2;
2388 - priv->tx_skbuff_dma[entry].len = bmax;
2389 - priv->tx_skbuff_dma[entry].is_jumbo = true;
2390 + tx_q->tx_skbuff_dma[entry].buf = des2;
2391 + tx_q->tx_skbuff_dma[entry].len = bmax;
2392 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
2394 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
2395 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum,
2396 - STMMAC_RING_MODE, 0, false);
2397 - priv->tx_skbuff[entry] = NULL;
2398 + STMMAC_RING_MODE, 0,
2400 + tx_q->tx_skbuff[entry] = NULL;
2401 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
2403 if (priv->extend_desc)
2404 - desc = (struct dma_desc *)(priv->dma_etx + entry);
2405 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2407 - desc = priv->dma_tx + entry;
2408 + desc = tx_q->dma_tx + entry;
2410 des2 = dma_map_single(priv->device, skb->data + bmax, len,
2412 desc->des2 = cpu_to_le32(des2);
2413 if (dma_mapping_error(priv->device, des2))
2415 - priv->tx_skbuff_dma[entry].buf = des2;
2416 - priv->tx_skbuff_dma[entry].len = len;
2417 - priv->tx_skbuff_dma[entry].is_jumbo = true;
2418 + tx_q->tx_skbuff_dma[entry].buf = des2;
2419 + tx_q->tx_skbuff_dma[entry].len = len;
2420 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
2422 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
2423 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum,
2424 - STMMAC_RING_MODE, 1, true);
2425 + STMMAC_RING_MODE, 1,
2428 des2 = dma_map_single(priv->device, skb->data,
2429 nopaged_len, DMA_TO_DEVICE);
2430 desc->des2 = cpu_to_le32(des2);
2431 if (dma_mapping_error(priv->device, des2))
2433 - priv->tx_skbuff_dma[entry].buf = des2;
2434 - priv->tx_skbuff_dma[entry].len = nopaged_len;
2435 - priv->tx_skbuff_dma[entry].is_jumbo = true;
2436 + tx_q->tx_skbuff_dma[entry].buf = des2;
2437 + tx_q->tx_skbuff_dma[entry].len = nopaged_len;
2438 + tx_q->tx_skbuff_dma[entry].is_jumbo = true;
2439 desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
2440 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum,
2441 - STMMAC_RING_MODE, 0, true);
2442 + STMMAC_RING_MODE, 0,
2446 - priv->cur_tx = entry;
2447 + tx_q->cur_tx = entry;
2451 @@ -125,12 +129,13 @@ static void stmmac_init_desc3(struct dma
2453 static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
2455 - struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
2456 - unsigned int entry = priv->dirty_tx;
2457 + struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
2458 + struct stmmac_priv *priv = tx_q->priv_data;
2459 + unsigned int entry = tx_q->dirty_tx;
2461 /* des3 is only used for jumbo frames tx or time stamping */
2462 - if (unlikely(priv->tx_skbuff_dma[entry].is_jumbo ||
2463 - (priv->tx_skbuff_dma[entry].last_segment &&
2464 + if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
2465 + (tx_q->tx_skbuff_dma[entry].last_segment &&
2466 !priv->extend_desc && priv->hwts_tx_en)))
2469 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
2470 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
2471 @@ -46,38 +46,51 @@ struct stmmac_tx_info {
2475 -struct stmmac_priv {
2476 - /* Frequently used values are kept adjacent for cache effect */
2477 +/* Frequently used values are kept adjacent for cache effect */
2478 +struct stmmac_tx_queue {
2480 + struct stmmac_priv *priv_data;
2481 struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
2482 struct dma_desc *dma_tx;
2483 struct sk_buff **tx_skbuff;
2484 + struct stmmac_tx_info *tx_skbuff_dma;
2485 unsigned int cur_tx;
2486 unsigned int dirty_tx;
2487 + dma_addr_t dma_tx_phy;
2491 +struct stmmac_rx_queue {
2493 + struct stmmac_priv *priv_data;
2494 + struct dma_extended_desc *dma_erx;
2495 + struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
2496 + struct sk_buff **rx_skbuff;
2497 + dma_addr_t *rx_skbuff_dma;
2498 + unsigned int cur_rx;
2499 + unsigned int dirty_rx;
2500 + u32 rx_zeroc_thresh;
2501 + dma_addr_t dma_rx_phy;
2503 + struct napi_struct napi ____cacheline_aligned_in_smp;
2506 +struct stmmac_priv {
2507 + /* Frequently used values are kept adjacent for cache effect */
2508 u32 tx_count_frames;
2511 - struct stmmac_tx_info *tx_skbuff_dma;
2512 - dma_addr_t dma_tx_phy;
2516 bool tx_path_in_lpi_mode;
2517 struct timer_list txtimer;
2520 - struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
2521 - struct dma_extended_desc *dma_erx;
2522 - struct sk_buff **rx_skbuff;
2523 - unsigned int cur_rx;
2524 - unsigned int dirty_rx;
2525 unsigned int dma_buf_sz;
2526 unsigned int rx_copybreak;
2527 - unsigned int rx_zeroc_thresh;
2530 - dma_addr_t *rx_skbuff_dma;
2531 - dma_addr_t dma_rx_phy;
2533 - struct napi_struct napi ____cacheline_aligned_in_smp;
2535 void __iomem *ioaddr;
2536 struct net_device *dev;
2537 @@ -85,6 +98,12 @@ struct stmmac_priv {
2538 struct mac_device_info *hw;
2542 + struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
2545 + struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
2550 @@ -119,8 +138,6 @@ struct stmmac_priv {
2551 spinlock_t ptp_lock;
2552 void __iomem *mmcaddr;
2553 void __iomem *ptpaddr;
2558 #ifdef CONFIG_DEBUG_FS
2559 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2560 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2561 @@ -481,6 +481,7 @@ stmmac_set_pauseparam(struct net_device
2562 struct ethtool_pauseparam *pause)
2564 struct stmmac_priv *priv = netdev_priv(netdev);
2565 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2566 struct phy_device *phy = netdev->phydev;
2567 int new_pause = FLOW_OFF;
2569 @@ -511,7 +512,7 @@ stmmac_set_pauseparam(struct net_device
2572 priv->hw->mac->flow_ctrl(priv->hw, phy->duplex, priv->flow_ctrl,
2574 + priv->pause, tx_cnt);
2578 @@ -519,6 +520,8 @@ static void stmmac_get_ethtool_stats(str
2579 struct ethtool_stats *dummy, u64 *data)
2581 struct stmmac_priv *priv = netdev_priv(dev);
2582 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
2583 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
2586 /* Update the DMA HW counters for dwmac10/100 */
2587 @@ -549,7 +552,8 @@ static void stmmac_get_ethtool_stats(str
2588 if ((priv->hw->mac->debug) &&
2589 (priv->synopsys_id >= DWMAC_CORE_3_50))
2590 priv->hw->mac->debug(priv->ioaddr,
2591 - (void *)&priv->xstats);
2592 + (void *)&priv->xstats,
2593 + rx_queues_count, tx_queues_count);
2595 for (i = 0; i < STMMAC_STATS_LEN; i++) {
2596 char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
2597 @@ -726,6 +730,7 @@ static int stmmac_set_coalesce(struct ne
2598 struct ethtool_coalesce *ec)
2600 struct stmmac_priv *priv = netdev_priv(dev);
2601 + u32 rx_cnt = priv->plat->rx_queues_to_use;
2602 unsigned int rx_riwt;
2604 /* Check not supported parameters */
2605 @@ -764,7 +769,7 @@ static int stmmac_set_coalesce(struct ne
2606 priv->tx_coal_frames = ec->tx_max_coalesced_frames;
2607 priv->tx_coal_timer = ec->tx_coalesce_usecs;
2608 priv->rx_riwt = rx_riwt;
2609 - priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt);
2610 + priv->hw->dma->rx_watchdog(priv->ioaddr, priv->rx_riwt, rx_cnt);
2614 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2615 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2616 @@ -139,6 +139,64 @@ static void stmmac_verify_args(void)
2620 + * stmmac_disable_all_queues - Disable all queues
2621 + * @priv: driver private structure
2623 +static void stmmac_disable_all_queues(struct stmmac_priv *priv)
2625 + u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2628 + for (queue = 0; queue < rx_queues_cnt; queue++) {
2629 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2631 + napi_disable(&rx_q->napi);
2636 + * stmmac_enable_all_queues - Enable all queues
2637 + * @priv: driver private structure
2639 +static void stmmac_enable_all_queues(struct stmmac_priv *priv)
2641 + u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
2644 + for (queue = 0; queue < rx_queues_cnt; queue++) {
2645 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2647 + napi_enable(&rx_q->napi);
2652 + * stmmac_stop_all_queues - Stop all queues
2653 + * @priv: driver private structure
2655 +static void stmmac_stop_all_queues(struct stmmac_priv *priv)
2657 + u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2660 + for (queue = 0; queue < tx_queues_cnt; queue++)
2661 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
2665 + * stmmac_start_all_queues - Start all queues
2666 + * @priv: driver private structure
2668 +static void stmmac_start_all_queues(struct stmmac_priv *priv)
2670 + u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
2673 + for (queue = 0; queue < tx_queues_cnt; queue++)
2674 + netif_tx_start_queue(netdev_get_tx_queue(priv->dev, queue));
2678 * stmmac_clk_csr_set - dynamically set the MDC clock
2679 * @priv: driver private structure
2680 * Description: this is to dynamically set the MDC clock according to the csr
2681 @@ -185,26 +243,33 @@ static void print_pkt(unsigned char *buf
2682 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
2685 -static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
2686 +static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
2688 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2691 - if (priv->dirty_tx > priv->cur_tx)
2692 - avail = priv->dirty_tx - priv->cur_tx - 1;
2693 + if (tx_q->dirty_tx > tx_q->cur_tx)
2694 + avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
2696 - avail = DMA_TX_SIZE - priv->cur_tx + priv->dirty_tx - 1;
2697 + avail = DMA_TX_SIZE - tx_q->cur_tx + tx_q->dirty_tx - 1;
2702 -static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv)
2704 + * stmmac_rx_dirty - Get RX queue dirty
2705 + * @priv: driver private structure
2706 + * @queue: RX queue index
2708 +static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
2710 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2713 - if (priv->dirty_rx <= priv->cur_rx)
2714 - dirty = priv->cur_rx - priv->dirty_rx;
2715 + if (rx_q->dirty_rx <= rx_q->cur_rx)
2716 + dirty = rx_q->cur_rx - rx_q->dirty_rx;
2718 - dirty = DMA_RX_SIZE - priv->dirty_rx + priv->cur_rx;
2719 + dirty = DMA_RX_SIZE - rx_q->dirty_rx + rx_q->cur_rx;
2723 @@ -232,9 +297,19 @@ static inline void stmmac_hw_fix_mac_spe
2725 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
2727 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2730 + /* check if all TX queues have the work finished */
2731 + for (queue = 0; queue < tx_cnt; queue++) {
2732 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2734 + if (tx_q->dirty_tx != tx_q->cur_tx)
2735 + return; /* still unfinished work */
2738 /* Check and enter in LPI mode */
2739 - if ((priv->dirty_tx == priv->cur_tx) &&
2740 - (priv->tx_path_in_lpi_mode == false))
2741 + if (!priv->tx_path_in_lpi_mode)
2742 priv->hw->mac->set_eee_mode(priv->hw,
2743 priv->plat->en_tx_lpi_clockgating);
2745 @@ -365,14 +440,14 @@ static void stmmac_get_tx_hwtstamp(struc
2748 /* check tx tstamp status */
2749 - if (!priv->hw->desc->get_tx_timestamp_status(p)) {
2750 + if (priv->hw->desc->get_tx_timestamp_status(p)) {
2751 /* get the valid tstamp */
2752 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
2754 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
2755 shhwtstamp.hwtstamp = ns_to_ktime(ns);
2757 - netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
2758 + netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
2759 /* pass tstamp to stack */
2760 skb_tstamp_tx(skb, &shhwtstamp);
2762 @@ -399,19 +474,19 @@ static void stmmac_get_rx_hwtstamp(struc
2765 /* Check if timestamp is available */
2766 - if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
2767 + if (priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
2768 /* For GMAC4, the valid timestamp is from CTX next desc. */
2769 if (priv->plat->has_gmac4)
2770 ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
2772 ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
2774 - netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
2775 + netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
2776 shhwtstamp = skb_hwtstamps(skb);
2777 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
2778 shhwtstamp->hwtstamp = ns_to_ktime(ns);
2780 - netdev_err(priv->dev, "cannot get RX hw timestamp\n");
2781 + netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
2785 @@ -688,6 +763,19 @@ static void stmmac_release_ptp(struct st
2789 + * stmmac_mac_flow_ctrl - Configure flow control in all queues
2790 + * @priv: driver private structure
2791 + * Description: It is used for configuring the flow control in all queues
2793 +static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
2795 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2797 + priv->hw->mac->flow_ctrl(priv->hw, duplex, priv->flow_ctrl,
2798 + priv->pause, tx_cnt);
2802 * stmmac_adjust_link - adjusts the link parameters
2803 * @dev: net device structure
2804 * Description: this is the helper called by the physical abstraction layer
2805 @@ -702,7 +790,6 @@ static void stmmac_adjust_link(struct ne
2806 struct phy_device *phydev = dev->phydev;
2807 unsigned long flags;
2809 - unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
2813 @@ -724,8 +811,7 @@ static void stmmac_adjust_link(struct ne
2815 /* Flow Control operation */
2817 - priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
2819 + stmmac_mac_flow_ctrl(priv, phydev->duplex);
2821 if (phydev->speed != priv->speed) {
2823 @@ -893,22 +979,56 @@ static int stmmac_init_phy(struct net_de
2827 -static void stmmac_display_rings(struct stmmac_priv *priv)
2828 +static void stmmac_display_rx_rings(struct stmmac_priv *priv)
2830 - void *head_rx, *head_tx;
2831 + u32 rx_cnt = priv->plat->rx_queues_to_use;
2835 - if (priv->extend_desc) {
2836 - head_rx = (void *)priv->dma_erx;
2837 - head_tx = (void *)priv->dma_etx;
2839 - head_rx = (void *)priv->dma_rx;
2840 - head_tx = (void *)priv->dma_tx;
2841 + /* Display RX rings */
2842 + for (queue = 0; queue < rx_cnt; queue++) {
2843 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2845 + pr_info("\tRX Queue %u rings\n", queue);
2847 + if (priv->extend_desc)
2848 + head_rx = (void *)rx_q->dma_erx;
2850 + head_rx = (void *)rx_q->dma_rx;
2852 + /* Display RX ring */
2853 + priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
2857 +static void stmmac_display_tx_rings(struct stmmac_priv *priv)
2859 + u32 tx_cnt = priv->plat->tx_queues_to_use;
2863 + /* Display TX rings */
2864 + for (queue = 0; queue < tx_cnt; queue++) {
2865 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2867 + pr_info("\tTX Queue %d rings\n", queue);
2869 + if (priv->extend_desc)
2870 + head_tx = (void *)tx_q->dma_etx;
2872 + head_tx = (void *)tx_q->dma_tx;
2874 + priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
2878 +static void stmmac_display_rings(struct stmmac_priv *priv)
2880 + /* Display RX ring */
2881 + stmmac_display_rx_rings(priv);
2883 - /* Display Rx ring */
2884 - priv->hw->desc->display_ring(head_rx, DMA_RX_SIZE, true);
2885 - /* Display Tx ring */
2886 - priv->hw->desc->display_ring(head_tx, DMA_TX_SIZE, false);
2887 + /* Display TX ring */
2888 + stmmac_display_tx_rings(priv);
2891 static int stmmac_set_bfsize(int mtu, int bufsize)
2892 @@ -928,48 +1048,88 @@ static int stmmac_set_bfsize(int mtu, in
2896 - * stmmac_clear_descriptors - clear descriptors
2897 + * stmmac_clear_rx_descriptors - clear RX descriptors
2898 * @priv: driver private structure
2899 - * Description: this function is called to clear the tx and rx descriptors
2900 + * @queue: RX queue index
2901 + * Description: this function is called to clear the RX descriptors
2902 * in case of both basic and extended descriptors are used.
2904 -static void stmmac_clear_descriptors(struct stmmac_priv *priv)
2905 +static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv, u32 queue)
2907 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2910 - /* Clear the Rx/Tx descriptors */
2911 + /* Clear the RX descriptors */
2912 for (i = 0; i < DMA_RX_SIZE; i++)
2913 if (priv->extend_desc)
2914 - priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
2915 + priv->hw->desc->init_rx_desc(&rx_q->dma_erx[i].basic,
2916 priv->use_riwt, priv->mode,
2917 (i == DMA_RX_SIZE - 1));
2919 - priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
2920 + priv->hw->desc->init_rx_desc(&rx_q->dma_rx[i],
2921 priv->use_riwt, priv->mode,
2922 (i == DMA_RX_SIZE - 1));
2926 + * stmmac_clear_tx_descriptors - clear tx descriptors
2927 + * @priv: driver private structure
2928 + * @queue: TX queue index.
2929 + * Description: this function is called to clear the TX descriptors
2930 + * in case of both basic and extended descriptors are used.
2932 +static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv, u32 queue)
2934 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
2937 + /* Clear the TX descriptors */
2938 for (i = 0; i < DMA_TX_SIZE; i++)
2939 if (priv->extend_desc)
2940 - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
2941 + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
2943 (i == DMA_TX_SIZE - 1));
2945 - priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
2946 + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
2948 (i == DMA_TX_SIZE - 1));
2952 + * stmmac_clear_descriptors - clear descriptors
2953 + * @priv: driver private structure
2954 + * Description: this function is called to clear the TX and RX descriptors
2955 + * in case of both basic and extended descriptors are used.
2957 +static void stmmac_clear_descriptors(struct stmmac_priv *priv)
2959 + u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
2960 + u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
2963 + /* Clear the RX descriptors */
2964 + for (queue = 0; queue < rx_queue_cnt; queue++)
2965 + stmmac_clear_rx_descriptors(priv, queue);
2967 + /* Clear the TX descriptors */
2968 + for (queue = 0; queue < tx_queue_cnt; queue++)
2969 + stmmac_clear_tx_descriptors(priv, queue);
2973 * stmmac_init_rx_buffers - init the RX descriptor buffer.
2974 * @priv: driver private structure
2975 * @p: descriptor pointer
2976 * @i: descriptor index
2977 - * @flags: gfp flag.
2978 + * @flags: gfp flag
2979 + * @queue: RX queue index
2980 * Description: this function is called to allocate a receive buffer, perform
2981 * the DMA mapping and init the descriptor.
2983 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
2984 - int i, gfp_t flags)
2985 + int i, gfp_t flags, u32 queue)
2987 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
2988 struct sk_buff *skb;
2990 skb = __netdev_alloc_skb_ip_align(priv->dev, priv->dma_buf_sz, flags);
2991 @@ -978,20 +1138,20 @@ static int stmmac_init_rx_buffers(struct
2992 "%s: Rx init fails; skb is NULL\n", __func__);
2995 - priv->rx_skbuff[i] = skb;
2996 - priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
2997 + rx_q->rx_skbuff[i] = skb;
2998 + rx_q->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
3001 - if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
3002 + if (dma_mapping_error(priv->device, rx_q->rx_skbuff_dma[i])) {
3003 netdev_err(priv->dev, "%s: DMA mapping error\n", __func__);
3004 dev_kfree_skb_any(skb);
3008 if (priv->synopsys_id >= DWMAC_CORE_4_00)
3009 - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[i]);
3010 + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
3012 - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[i]);
3013 + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[i]);
3015 if ((priv->hw->mode->init_desc3) &&
3016 (priv->dma_buf_sz == BUF_SIZE_16KiB))
3017 @@ -1000,30 +1160,71 @@ static int stmmac_init_rx_buffers(struct
3021 -static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
3023 + * stmmac_free_rx_buffer - free RX dma buffers
3024 + * @priv: private structure
3025 + * @queue: RX queue index
3026 + * @i: buffer index.
3028 +static void stmmac_free_rx_buffer(struct stmmac_priv *priv, u32 queue, int i)
3030 - if (priv->rx_skbuff[i]) {
3031 - dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
3032 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3034 + if (rx_q->rx_skbuff[i]) {
3035 + dma_unmap_single(priv->device, rx_q->rx_skbuff_dma[i],
3036 priv->dma_buf_sz, DMA_FROM_DEVICE);
3037 - dev_kfree_skb_any(priv->rx_skbuff[i]);
3038 + dev_kfree_skb_any(rx_q->rx_skbuff[i]);
3040 - priv->rx_skbuff[i] = NULL;
3041 + rx_q->rx_skbuff[i] = NULL;
3045 - * init_dma_desc_rings - init the RX/TX descriptor rings
3046 + * stmmac_free_tx_buffer - free RX dma buffers
3047 + * @priv: private structure
3048 + * @queue: RX queue index
3049 + * @i: buffer index.
3051 +static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
3053 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3055 + if (tx_q->tx_skbuff_dma[i].buf) {
3056 + if (tx_q->tx_skbuff_dma[i].map_as_page)
3057 + dma_unmap_page(priv->device,
3058 + tx_q->tx_skbuff_dma[i].buf,
3059 + tx_q->tx_skbuff_dma[i].len,
3062 + dma_unmap_single(priv->device,
3063 + tx_q->tx_skbuff_dma[i].buf,
3064 + tx_q->tx_skbuff_dma[i].len,
3068 + if (tx_q->tx_skbuff[i]) {
3069 + dev_kfree_skb_any(tx_q->tx_skbuff[i]);
3070 + tx_q->tx_skbuff[i] = NULL;
3071 + tx_q->tx_skbuff_dma[i].buf = 0;
3072 + tx_q->tx_skbuff_dma[i].map_as_page = false;
3077 + * init_dma_rx_desc_rings - init the RX descriptor rings
3078 * @dev: net device structure
3080 - * Description: this function initializes the DMA RX/TX descriptors
3081 + * Description: this function initializes the DMA RX descriptors
3082 * and allocates the socket buffers. It supports the chained and ring
3085 -static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
3086 +static int init_dma_rx_desc_rings(struct net_device *dev, gfp_t flags)
3089 struct stmmac_priv *priv = netdev_priv(dev);
3090 + u32 rx_count = priv->plat->rx_queues_to_use;
3091 unsigned int bfsize = 0;
3096 if (priv->hw->mode->set_16kib_bfsize)
3097 bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
3098 @@ -1033,235 +1234,409 @@ static int init_dma_desc_rings(struct ne
3100 priv->dma_buf_sz = bfsize;
3102 - netif_dbg(priv, probe, priv->dev,
3103 - "(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n",
3104 - __func__, (u32)priv->dma_rx_phy, (u32)priv->dma_tx_phy);
3106 /* RX INITIALIZATION */
3107 netif_dbg(priv, probe, priv->dev,
3108 "SKB addresses:\nskb\t\tskb data\tdma data\n");
3110 - for (i = 0; i < DMA_RX_SIZE; i++) {
3111 - struct dma_desc *p;
3112 - if (priv->extend_desc)
3113 - p = &((priv->dma_erx + i)->basic);
3115 - p = priv->dma_rx + i;
3116 + for (queue = 0; queue < rx_count; queue++) {
3117 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3119 - ret = stmmac_init_rx_buffers(priv, p, i, flags);
3121 - goto err_init_rx_buffers;
3122 + netif_dbg(priv, probe, priv->dev,
3123 + "(%s) dma_rx_phy=0x%08x\n", __func__,
3124 + (u32)rx_q->dma_rx_phy);
3126 - netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
3127 - priv->rx_skbuff[i], priv->rx_skbuff[i]->data,
3128 - (unsigned int)priv->rx_skbuff_dma[i]);
3129 + for (i = 0; i < DMA_RX_SIZE; i++) {
3130 + struct dma_desc *p;
3132 + if (priv->extend_desc)
3133 + p = &((rx_q->dma_erx + i)->basic);
3135 + p = rx_q->dma_rx + i;
3137 + ret = stmmac_init_rx_buffers(priv, p, i, flags,
3140 + goto err_init_rx_buffers;
3142 + netif_dbg(priv, probe, priv->dev, "[%p]\t[%p]\t[%x]\n",
3143 + rx_q->rx_skbuff[i], rx_q->rx_skbuff[i]->data,
3144 + (unsigned int)rx_q->rx_skbuff_dma[i]);
3148 + rx_q->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
3150 + stmmac_clear_rx_descriptors(priv, queue);
3152 + /* Setup the chained descriptor addresses */
3153 + if (priv->mode == STMMAC_CHAIN_MODE) {
3154 + if (priv->extend_desc)
3155 + priv->hw->mode->init(rx_q->dma_erx,
3159 + priv->hw->mode->init(rx_q->dma_rx,
3165 - priv->dirty_rx = (unsigned int)(i - DMA_RX_SIZE);
3169 - /* Setup the chained descriptor addresses */
3170 - if (priv->mode == STMMAC_CHAIN_MODE) {
3171 - if (priv->extend_desc) {
3172 - priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
3174 - priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
3177 - priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
3179 - priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
3184 +err_init_rx_buffers:
3185 + while (queue >= 0) {
3187 + stmmac_free_rx_buffer(priv, queue, i);
3196 - /* TX INITIALIZATION */
3197 - for (i = 0; i < DMA_TX_SIZE; i++) {
3198 - struct dma_desc *p;
3199 - if (priv->extend_desc)
3200 - p = &((priv->dma_etx + i)->basic);
3202 - p = priv->dma_tx + i;
3206 - if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3214 + * init_dma_tx_desc_rings - init the TX descriptor rings
3215 + * @dev: net device structure.
3216 + * Description: this function initializes the DMA TX descriptors
3217 + * and allocates the socket buffers. It supports the chained and ring
3220 +static int init_dma_tx_desc_rings(struct net_device *dev)
3222 + struct stmmac_priv *priv = netdev_priv(dev);
3223 + u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
3227 + for (queue = 0; queue < tx_queue_cnt; queue++) {
3228 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3230 + netif_dbg(priv, probe, priv->dev,
3231 + "(%s) dma_tx_phy=0x%08x\n", __func__,
3232 + (u32)tx_q->dma_tx_phy);
3234 + /* Setup the chained descriptor addresses */
3235 + if (priv->mode == STMMAC_CHAIN_MODE) {
3236 + if (priv->extend_desc)
3237 + priv->hw->mode->init(tx_q->dma_etx,
3241 + priv->hw->mode->init(tx_q->dma_tx,
3246 + for (i = 0; i < DMA_TX_SIZE; i++) {
3247 + struct dma_desc *p;
3248 + if (priv->extend_desc)
3249 + p = &((tx_q->dma_etx + i)->basic);
3251 + p = tx_q->dma_tx + i;
3253 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3262 + tx_q->tx_skbuff_dma[i].buf = 0;
3263 + tx_q->tx_skbuff_dma[i].map_as_page = false;
3264 + tx_q->tx_skbuff_dma[i].len = 0;
3265 + tx_q->tx_skbuff_dma[i].last_segment = false;
3266 + tx_q->tx_skbuff[i] = NULL;
3269 - priv->tx_skbuff_dma[i].buf = 0;
3270 - priv->tx_skbuff_dma[i].map_as_page = false;
3271 - priv->tx_skbuff_dma[i].len = 0;
3272 - priv->tx_skbuff_dma[i].last_segment = false;
3273 - priv->tx_skbuff[i] = NULL;
3274 + tx_q->dirty_tx = 0;
3277 + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
3280 - priv->dirty_tx = 0;
3282 - netdev_reset_queue(priv->dev);
3287 + * init_dma_desc_rings - init the RX/TX descriptor rings
3288 + * @dev: net device structure
3289 + * @flags: gfp flag.
3290 + * Description: this function initializes the DMA RX/TX descriptors
3291 + * and allocates the socket buffers. It supports the chained and ring
3294 +static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
3296 + struct stmmac_priv *priv = netdev_priv(dev);
3299 + ret = init_dma_rx_desc_rings(dev, flags);
3303 + ret = init_dma_tx_desc_rings(dev);
3305 stmmac_clear_descriptors(priv);
3307 if (netif_msg_hw(priv))
3308 stmmac_display_rings(priv);
3311 -err_init_rx_buffers:
3313 - stmmac_free_rx_buffers(priv, i);
3317 -static void dma_free_rx_skbufs(struct stmmac_priv *priv)
3319 + * dma_free_rx_skbufs - free RX dma buffers
3320 + * @priv: private structure
3321 + * @queue: RX queue index
3323 +static void dma_free_rx_skbufs(struct stmmac_priv *priv, u32 queue)
3327 for (i = 0; i < DMA_RX_SIZE; i++)
3328 - stmmac_free_rx_buffers(priv, i);
3329 + stmmac_free_rx_buffer(priv, queue, i);
3332 -static void dma_free_tx_skbufs(struct stmmac_priv *priv)
3334 + * dma_free_tx_skbufs - free TX dma buffers
3335 + * @priv: private structure
3336 + * @queue: TX queue index
3338 +static void dma_free_tx_skbufs(struct stmmac_priv *priv, u32 queue)
3342 - for (i = 0; i < DMA_TX_SIZE; i++) {
3343 - if (priv->tx_skbuff_dma[i].buf) {
3344 - if (priv->tx_skbuff_dma[i].map_as_page)
3345 - dma_unmap_page(priv->device,
3346 - priv->tx_skbuff_dma[i].buf,
3347 - priv->tx_skbuff_dma[i].len,
3350 - dma_unmap_single(priv->device,
3351 - priv->tx_skbuff_dma[i].buf,
3352 - priv->tx_skbuff_dma[i].len,
3355 + for (i = 0; i < DMA_TX_SIZE; i++)
3356 + stmmac_free_tx_buffer(priv, queue, i);
3359 - if (priv->tx_skbuff[i]) {
3360 - dev_kfree_skb_any(priv->tx_skbuff[i]);
3361 - priv->tx_skbuff[i] = NULL;
3362 - priv->tx_skbuff_dma[i].buf = 0;
3363 - priv->tx_skbuff_dma[i].map_as_page = false;
3366 + * free_dma_rx_desc_resources - free RX dma desc resources
3367 + * @priv: private structure
3369 +static void free_dma_rx_desc_resources(struct stmmac_priv *priv)
3371 + u32 rx_count = priv->plat->rx_queues_to_use;
3374 + /* Free RX queue resources */
3375 + for (queue = 0; queue < rx_count; queue++) {
3376 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3378 + /* Release the DMA RX socket buffers */
3379 + dma_free_rx_skbufs(priv, queue);
3381 + /* Free DMA regions of consistent memory previously allocated */
3382 + if (!priv->extend_desc)
3383 + dma_free_coherent(priv->device,
3384 + DMA_RX_SIZE * sizeof(struct dma_desc),
3385 + rx_q->dma_rx, rx_q->dma_rx_phy);
3387 + dma_free_coherent(priv->device, DMA_RX_SIZE *
3388 + sizeof(struct dma_extended_desc),
3389 + rx_q->dma_erx, rx_q->dma_rx_phy);
3391 + kfree(rx_q->rx_skbuff_dma);
3392 + kfree(rx_q->rx_skbuff);
3397 - * alloc_dma_desc_resources - alloc TX/RX resources.
3398 + * free_dma_tx_desc_resources - free TX dma desc resources
3399 + * @priv: private structure
3401 +static void free_dma_tx_desc_resources(struct stmmac_priv *priv)
3403 + u32 tx_count = priv->plat->tx_queues_to_use;
3406 + /* Free TX queue resources */
3407 + for (queue = 0; queue < tx_count; queue++) {
3408 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3410 + /* Release the DMA TX socket buffers */
3411 + dma_free_tx_skbufs(priv, queue);
3413 + /* Free DMA regions of consistent memory previously allocated */
3414 + if (!priv->extend_desc)
3415 + dma_free_coherent(priv->device,
3416 + DMA_TX_SIZE * sizeof(struct dma_desc),
3417 + tx_q->dma_tx, tx_q->dma_tx_phy);
3419 + dma_free_coherent(priv->device, DMA_TX_SIZE *
3420 + sizeof(struct dma_extended_desc),
3421 + tx_q->dma_etx, tx_q->dma_tx_phy);
3423 + kfree(tx_q->tx_skbuff_dma);
3424 + kfree(tx_q->tx_skbuff);
3429 + * alloc_dma_rx_desc_resources - alloc RX resources.
3430 * @priv: private structure
3431 * Description: according to which descriptor can be used (extend or basic)
3432 * this function allocates the resources for TX and RX paths. In case of
3433 * reception, for example, it pre-allocated the RX socket buffer in order to
3434 * allow zero-copy mechanism.
3436 -static int alloc_dma_desc_resources(struct stmmac_priv *priv)
3437 +static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv)
3439 + u32 rx_count = priv->plat->rx_queues_to_use;
3443 - priv->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE, sizeof(dma_addr_t),
3445 - if (!priv->rx_skbuff_dma)
3447 + /* RX queues buffers and DMA */
3448 + for (queue = 0; queue < rx_count; queue++) {
3449 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
3451 - priv->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *),
3453 - if (!priv->rx_skbuff)
3454 - goto err_rx_skbuff;
3456 - priv->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
3457 - sizeof(*priv->tx_skbuff_dma),
3459 - if (!priv->tx_skbuff_dma)
3460 - goto err_tx_skbuff_dma;
3462 - priv->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *),
3464 - if (!priv->tx_skbuff)
3465 - goto err_tx_skbuff;
3467 - if (priv->extend_desc) {
3468 - priv->dma_erx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
3470 - dma_extended_desc),
3471 - &priv->dma_rx_phy,
3473 - if (!priv->dma_erx)
3475 + rx_q->queue_index = queue;
3476 + rx_q->priv_data = priv;
3478 - priv->dma_etx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
3480 - dma_extended_desc),
3481 - &priv->dma_tx_phy,
3482 + rx_q->rx_skbuff_dma = kmalloc_array(DMA_RX_SIZE,
3483 + sizeof(dma_addr_t),
3485 - if (!priv->dma_etx) {
3486 - dma_free_coherent(priv->device, DMA_RX_SIZE *
3487 - sizeof(struct dma_extended_desc),
3488 - priv->dma_erx, priv->dma_rx_phy);
3492 - priv->dma_rx = dma_zalloc_coherent(priv->device, DMA_RX_SIZE *
3493 - sizeof(struct dma_desc),
3494 - &priv->dma_rx_phy,
3496 - if (!priv->dma_rx)
3498 + if (!rx_q->rx_skbuff_dma)
3501 - priv->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE *
3502 - sizeof(struct dma_desc),
3503 - &priv->dma_tx_phy,
3505 - if (!priv->dma_tx) {
3506 - dma_free_coherent(priv->device, DMA_RX_SIZE *
3507 - sizeof(struct dma_desc),
3508 - priv->dma_rx, priv->dma_rx_phy);
3509 + rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE,
3510 + sizeof(struct sk_buff *),
3512 + if (!rx_q->rx_skbuff)
3515 + if (priv->extend_desc) {
3516 + rx_q->dma_erx = dma_zalloc_coherent(priv->device,
3519 + dma_extended_desc),
3520 + &rx_q->dma_rx_phy,
3522 + if (!rx_q->dma_erx)
3526 + rx_q->dma_rx = dma_zalloc_coherent(priv->device,
3530 + &rx_q->dma_rx_phy,
3532 + if (!rx_q->dma_rx)
3540 - kfree(priv->tx_skbuff);
3542 - kfree(priv->tx_skbuff_dma);
3544 - kfree(priv->rx_skbuff);
3546 - kfree(priv->rx_skbuff_dma);
3547 + free_dma_rx_desc_resources(priv);
3553 + * alloc_dma_tx_desc_resources - alloc TX resources.
3554 + * @priv: private structure
3555 + * Description: according to which descriptor can be used (extend or basic)
3556 + * this function allocates the resources for TX and RX paths. In case of
3557 + * reception, for example, it pre-allocated the RX socket buffer in order to
3558 + * allow zero-copy mechanism.
3560 +static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv)
3562 + u32 tx_count = priv->plat->tx_queues_to_use;
3563 + int ret = -ENOMEM;
3566 + /* TX queues buffers and DMA */
3567 + for (queue = 0; queue < tx_count; queue++) {
3568 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3570 + tx_q->queue_index = queue;
3571 + tx_q->priv_data = priv;
3573 + tx_q->tx_skbuff_dma = kmalloc_array(DMA_TX_SIZE,
3574 + sizeof(*tx_q->tx_skbuff_dma),
3576 + if (!tx_q->tx_skbuff_dma)
3579 + tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE,
3580 + sizeof(struct sk_buff *),
3582 + if (!tx_q->tx_skbuff)
3583 + goto err_dma_buffers;
3585 + if (priv->extend_desc) {
3586 + tx_q->dma_etx = dma_zalloc_coherent(priv->device,
3589 + dma_extended_desc),
3590 + &tx_q->dma_tx_phy,
3592 + if (!tx_q->dma_etx)
3593 + goto err_dma_buffers;
3595 + tx_q->dma_tx = dma_zalloc_coherent(priv->device,
3599 + &tx_q->dma_tx_phy,
3601 + if (!tx_q->dma_tx)
3602 + goto err_dma_buffers;
3609 + free_dma_tx_desc_resources(priv);
3615 + * alloc_dma_desc_resources - alloc TX/RX resources.
3616 + * @priv: private structure
3617 + * Description: according to which descriptor can be used (extend or basic)
3618 + * this function allocates the resources for TX and RX paths. In case of
3619 + * reception, for example, it pre-allocated the RX socket buffer in order to
3620 + * allow zero-copy mechanism.
3622 +static int alloc_dma_desc_resources(struct stmmac_priv *priv)
3624 + /* RX Allocation */
3625 + int ret = alloc_dma_rx_desc_resources(priv);
3630 + ret = alloc_dma_tx_desc_resources(priv);
3636 + * free_dma_desc_resources - free dma desc resources
3637 + * @priv: private structure
3639 static void free_dma_desc_resources(struct stmmac_priv *priv)
3641 - /* Release the DMA TX/RX socket buffers */
3642 - dma_free_rx_skbufs(priv);
3643 - dma_free_tx_skbufs(priv);
3645 - /* Free DMA regions of consistent memory previously allocated */
3646 - if (!priv->extend_desc) {
3647 - dma_free_coherent(priv->device,
3648 - DMA_TX_SIZE * sizeof(struct dma_desc),
3649 - priv->dma_tx, priv->dma_tx_phy);
3650 - dma_free_coherent(priv->device,
3651 - DMA_RX_SIZE * sizeof(struct dma_desc),
3652 - priv->dma_rx, priv->dma_rx_phy);
3654 - dma_free_coherent(priv->device, DMA_TX_SIZE *
3655 - sizeof(struct dma_extended_desc),
3656 - priv->dma_etx, priv->dma_tx_phy);
3657 - dma_free_coherent(priv->device, DMA_RX_SIZE *
3658 - sizeof(struct dma_extended_desc),
3659 - priv->dma_erx, priv->dma_rx_phy);
3661 - kfree(priv->rx_skbuff_dma);
3662 - kfree(priv->rx_skbuff);
3663 - kfree(priv->tx_skbuff_dma);
3664 - kfree(priv->tx_skbuff);
3665 + /* Release the DMA RX socket buffers */
3666 + free_dma_rx_desc_resources(priv);
3668 + /* Release the DMA TX socket buffers */
3669 + free_dma_tx_desc_resources(priv);
3673 @@ -1271,19 +1646,104 @@ static void free_dma_desc_resources(stru
3675 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
3677 - int rx_count = priv->dma_cap.number_rx_queues;
3679 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
3683 - /* If GMAC does not have multiple queues, then this is not necessary*/
3684 - if (rx_count == 1)
3686 + for (queue = 0; queue < rx_queues_count; queue++) {
3687 + mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
3688 + priv->hw->mac->rx_queue_enable(priv->hw, mode, queue);
3693 - * If the core is synthesized with multiple rx queues / multiple
3694 - * dma channels, then rx queues will be disabled by default.
3695 - * For now only rx queue 0 is enabled.
3697 - priv->hw->mac->rx_queue_enable(priv->hw, queue);
3699 + * stmmac_start_rx_dma - start RX DMA channel
3700 + * @priv: driver private structure
3701 + * @chan: RX channel index
3703 + * This starts a RX DMA channel
3705 +static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
3707 + netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
3708 + priv->hw->dma->start_rx(priv->ioaddr, chan);
3712 + * stmmac_start_tx_dma - start TX DMA channel
3713 + * @priv: driver private structure
3714 + * @chan: TX channel index
3716 + * This starts a TX DMA channel
3718 +static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
3720 + netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
3721 + priv->hw->dma->start_tx(priv->ioaddr, chan);
3725 + * stmmac_stop_rx_dma - stop RX DMA channel
3726 + * @priv: driver private structure
3727 + * @chan: RX channel index
3729 + * This stops a RX DMA channel
3731 +static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
3733 + netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
3734 + priv->hw->dma->stop_rx(priv->ioaddr, chan);
3738 + * stmmac_stop_tx_dma - stop TX DMA channel
3739 + * @priv: driver private structure
3740 + * @chan: TX channel index
3742 + * This stops a TX DMA channel
3744 +static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
3746 + netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
3747 + priv->hw->dma->stop_tx(priv->ioaddr, chan);
3751 + * stmmac_start_all_dma - start all RX and TX DMA channels
3752 + * @priv: driver private structure
3754 + * This starts all the RX and TX DMA channels
3756 +static void stmmac_start_all_dma(struct stmmac_priv *priv)
3758 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
3759 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
3762 + for (chan = 0; chan < rx_channels_count; chan++)
3763 + stmmac_start_rx_dma(priv, chan);
3765 + for (chan = 0; chan < tx_channels_count; chan++)
3766 + stmmac_start_tx_dma(priv, chan);
3770 + * stmmac_stop_all_dma - stop all RX and TX DMA channels
3771 + * @priv: driver private structure
3773 + * This stops the RX and TX DMA channels
3775 +static void stmmac_stop_all_dma(struct stmmac_priv *priv)
3777 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
3778 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
3781 + for (chan = 0; chan < rx_channels_count; chan++)
3782 + stmmac_stop_rx_dma(priv, chan);
3784 + for (chan = 0; chan < tx_channels_count; chan++)
3785 + stmmac_stop_tx_dma(priv, chan);
3789 @@ -1294,11 +1754,20 @@ static void stmmac_mac_enable_rx_queues(
3791 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
3793 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
3794 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
3795 int rxfifosz = priv->plat->rx_fifo_size;
3797 - if (priv->plat->force_thresh_dma_mode)
3798 - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc, rxfifosz);
3799 - else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
3804 + if (rxfifosz == 0)
3805 + rxfifosz = priv->dma_cap.rx_fifo_size;
3807 + if (priv->plat->force_thresh_dma_mode) {
3810 + } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
3812 * In case of GMAC, SF mode can be enabled
3813 * to perform the TX COE in HW. This depends on:
3814 @@ -1306,37 +1775,53 @@ static void stmmac_dma_operation_mode(st
3815 * 2) There is no bugged Jumbo frame support
3816 * that needs to not insert csum in the TDES.
3818 - priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE,
3820 + txmode = SF_DMA_MODE;
3821 + rxmode = SF_DMA_MODE;
3822 priv->xstats.threshold = SF_DMA_MODE;
3824 - priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE,
3827 + rxmode = SF_DMA_MODE;
3830 + /* configure all channels */
3831 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
3832 + for (chan = 0; chan < rx_channels_count; chan++)
3833 + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
3836 + for (chan = 0; chan < tx_channels_count; chan++)
3837 + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
3839 + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
3845 * stmmac_tx_clean - to manage the transmission completion
3846 * @priv: driver private structure
3847 + * @queue: TX queue index
3848 * Description: it reclaims the transmit resources after transmission completes.
3850 -static void stmmac_tx_clean(struct stmmac_priv *priv)
3851 +static void stmmac_tx_clean(struct stmmac_priv *priv, u32 queue)
3853 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
3854 unsigned int bytes_compl = 0, pkts_compl = 0;
3855 - unsigned int entry = priv->dirty_tx;
3856 + unsigned int entry = tx_q->dirty_tx;
3858 netif_tx_lock(priv->dev);
3860 priv->xstats.tx_clean++;
3862 - while (entry != priv->cur_tx) {
3863 - struct sk_buff *skb = priv->tx_skbuff[entry];
3864 + while (entry != tx_q->cur_tx) {
3865 + struct sk_buff *skb = tx_q->tx_skbuff[entry];
3869 if (priv->extend_desc)
3870 - p = (struct dma_desc *)(priv->dma_etx + entry);
3871 + p = (struct dma_desc *)(tx_q->dma_etx + entry);
3873 - p = priv->dma_tx + entry;
3874 + p = tx_q->dma_tx + entry;
3876 status = priv->hw->desc->tx_status(&priv->dev->stats,
3878 @@ -1357,48 +1842,51 @@ static void stmmac_tx_clean(struct stmma
3879 stmmac_get_tx_hwtstamp(priv, p, skb);
3882 - if (likely(priv->tx_skbuff_dma[entry].buf)) {
3883 - if (priv->tx_skbuff_dma[entry].map_as_page)
3884 + if (likely(tx_q->tx_skbuff_dma[entry].buf)) {
3885 + if (tx_q->tx_skbuff_dma[entry].map_as_page)
3886 dma_unmap_page(priv->device,
3887 - priv->tx_skbuff_dma[entry].buf,
3888 - priv->tx_skbuff_dma[entry].len,
3889 + tx_q->tx_skbuff_dma[entry].buf,
3890 + tx_q->tx_skbuff_dma[entry].len,
3893 dma_unmap_single(priv->device,
3894 - priv->tx_skbuff_dma[entry].buf,
3895 - priv->tx_skbuff_dma[entry].len,
3896 + tx_q->tx_skbuff_dma[entry].buf,
3897 + tx_q->tx_skbuff_dma[entry].len,
3899 - priv->tx_skbuff_dma[entry].buf = 0;
3900 - priv->tx_skbuff_dma[entry].len = 0;
3901 - priv->tx_skbuff_dma[entry].map_as_page = false;
3902 + tx_q->tx_skbuff_dma[entry].buf = 0;
3903 + tx_q->tx_skbuff_dma[entry].len = 0;
3904 + tx_q->tx_skbuff_dma[entry].map_as_page = false;
3907 if (priv->hw->mode->clean_desc3)
3908 - priv->hw->mode->clean_desc3(priv, p);
3909 + priv->hw->mode->clean_desc3(tx_q, p);
3911 - priv->tx_skbuff_dma[entry].last_segment = false;
3912 - priv->tx_skbuff_dma[entry].is_jumbo = false;
3913 + tx_q->tx_skbuff_dma[entry].last_segment = false;
3914 + tx_q->tx_skbuff_dma[entry].is_jumbo = false;
3916 if (likely(skb != NULL)) {
3918 bytes_compl += skb->len;
3919 dev_consume_skb_any(skb);
3920 - priv->tx_skbuff[entry] = NULL;
3921 + tx_q->tx_skbuff[entry] = NULL;
3924 priv->hw->desc->release_tx_desc(p, priv->mode);
3926 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
3928 - priv->dirty_tx = entry;
3929 + tx_q->dirty_tx = entry;
3931 + netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
3932 + pkts_compl, bytes_compl);
3934 - netdev_completed_queue(priv->dev, pkts_compl, bytes_compl);
3935 + if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
3937 + stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH) {
3939 - if (unlikely(netif_queue_stopped(priv->dev) &&
3940 - stmmac_tx_avail(priv) > STMMAC_TX_THRESH)) {
3941 netif_dbg(priv, tx_done, priv->dev,
3942 "%s: restart transmit\n", __func__);
3943 - netif_wake_queue(priv->dev);
3944 + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
3947 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
3948 @@ -1408,45 +1896,76 @@ static void stmmac_tx_clean(struct stmma
3949 netif_tx_unlock(priv->dev);
3952 -static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
3953 +static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv, u32 chan)
3955 - priv->hw->dma->enable_dma_irq(priv->ioaddr);
3956 + priv->hw->dma->enable_dma_irq(priv->ioaddr, chan);
3959 -static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
3960 +static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv, u32 chan)
3962 - priv->hw->dma->disable_dma_irq(priv->ioaddr);
3963 + priv->hw->dma->disable_dma_irq(priv->ioaddr, chan);
3967 * stmmac_tx_err - to manage the tx error
3968 * @priv: driver private structure
3969 + * @chan: channel index
3970 * Description: it cleans the descriptors and restarts the transmission
3971 * in case of transmission errors.
3973 -static void stmmac_tx_err(struct stmmac_priv *priv)
3974 +static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
3976 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan];
3978 - netif_stop_queue(priv->dev);
3980 - priv->hw->dma->stop_tx(priv->ioaddr);
3981 - dma_free_tx_skbufs(priv);
3982 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
3984 + stmmac_stop_tx_dma(priv, chan);
3985 + dma_free_tx_skbufs(priv, chan);
3986 for (i = 0; i < DMA_TX_SIZE; i++)
3987 if (priv->extend_desc)
3988 - priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
3989 + priv->hw->desc->init_tx_desc(&tx_q->dma_etx[i].basic,
3991 (i == DMA_TX_SIZE - 1));
3993 - priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
3994 + priv->hw->desc->init_tx_desc(&tx_q->dma_tx[i],
3996 (i == DMA_TX_SIZE - 1));
3997 - priv->dirty_tx = 0;
3999 - netdev_reset_queue(priv->dev);
4000 - priv->hw->dma->start_tx(priv->ioaddr);
4001 + tx_q->dirty_tx = 0;
4003 + netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan));
4004 + stmmac_start_tx_dma(priv, chan);
4006 priv->dev->stats.tx_errors++;
4007 - netif_wake_queue(priv->dev);
4008 + netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
4012 + * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
4013 + * @priv: driver private structure
4014 + * @txmode: TX operating mode
4015 + * @rxmode: RX operating mode
4016 + * @chan: channel index
4017 + * Description: it is used for configuring of the DMA operation mode in
4018 + * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
4021 +static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
4022 + u32 rxmode, u32 chan)
4024 + int rxfifosz = priv->plat->rx_fifo_size;
4026 + if (rxfifosz == 0)
4027 + rxfifosz = priv->dma_cap.rx_fifo_size;
4029 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4030 + priv->hw->dma->dma_rx_mode(priv->ioaddr, rxmode, chan,
4032 + priv->hw->dma->dma_tx_mode(priv->ioaddr, txmode, chan);
4034 + priv->hw->dma->dma_mode(priv->ioaddr, txmode, rxmode,
4040 @@ -1458,31 +1977,43 @@ static void stmmac_tx_err(struct stmmac_
4042 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
4044 + u32 tx_channel_count = priv->plat->tx_queues_to_use;
4046 - int rxfifosz = priv->plat->rx_fifo_size;
4049 + for (chan = 0; chan < tx_channel_count; chan++) {
4050 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[chan];
4052 - status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
4053 - if (likely((status & handle_rx)) || (status & handle_tx)) {
4054 - if (likely(napi_schedule_prep(&priv->napi))) {
4055 - stmmac_disable_dma_irq(priv);
4056 - __napi_schedule(&priv->napi);
4057 + status = priv->hw->dma->dma_interrupt(priv->ioaddr,
4058 + &priv->xstats, chan);
4059 + if (likely((status & handle_rx)) || (status & handle_tx)) {
4060 + if (likely(napi_schedule_prep(&rx_q->napi))) {
4061 + stmmac_disable_dma_irq(priv, chan);
4062 + __napi_schedule(&rx_q->napi);
4066 - if (unlikely(status & tx_hard_error_bump_tc)) {
4067 - /* Try to bump up the dma threshold on this failure */
4068 - if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
4071 - if (priv->plat->force_thresh_dma_mode)
4072 - priv->hw->dma->dma_mode(priv->ioaddr, tc, tc,
4075 - priv->hw->dma->dma_mode(priv->ioaddr, tc,
4076 - SF_DMA_MODE, rxfifosz);
4077 - priv->xstats.threshold = tc;
4079 + if (unlikely(status & tx_hard_error_bump_tc)) {
4080 + /* Try to bump up the dma threshold on this failure */
4081 + if (unlikely(priv->xstats.threshold != SF_DMA_MODE) &&
4084 + if (priv->plat->force_thresh_dma_mode)
4085 + stmmac_set_dma_operation_mode(priv,
4090 + stmmac_set_dma_operation_mode(priv,
4094 + priv->xstats.threshold = tc;
4096 + } else if (unlikely(status == tx_hard_error)) {
4097 + stmmac_tx_err(priv, chan);
4099 - } else if (unlikely(status == tx_hard_error))
4100 - stmmac_tx_err(priv);
4105 @@ -1589,6 +2120,13 @@ static void stmmac_check_ether_addr(stru
4107 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
4109 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
4110 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
4111 + struct stmmac_rx_queue *rx_q;
4112 + struct stmmac_tx_queue *tx_q;
4113 + u32 dummy_dma_rx_phy = 0;
4114 + u32 dummy_dma_tx_phy = 0;
4119 @@ -1606,19 +2144,49 @@ static int stmmac_init_dma_engine(struct
4123 - priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
4124 - priv->dma_tx_phy, priv->dma_rx_phy, atds);
4126 if (priv->synopsys_id >= DWMAC_CORE_4_00) {
4127 - priv->rx_tail_addr = priv->dma_rx_phy +
4128 - (DMA_RX_SIZE * sizeof(struct dma_desc));
4129 - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr, priv->rx_tail_addr,
4132 - priv->tx_tail_addr = priv->dma_tx_phy +
4133 - (DMA_TX_SIZE * sizeof(struct dma_desc));
4134 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
4136 + /* DMA Configuration */
4137 + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
4138 + dummy_dma_tx_phy, dummy_dma_rx_phy, atds);
4140 + /* DMA RX Channel Configuration */
4141 + for (chan = 0; chan < rx_channels_count; chan++) {
4142 + rx_q = &priv->rx_queue[chan];
4144 + priv->hw->dma->init_rx_chan(priv->ioaddr,
4145 + priv->plat->dma_cfg,
4146 + rx_q->dma_rx_phy, chan);
4148 + rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4149 + (DMA_RX_SIZE * sizeof(struct dma_desc));
4150 + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
4151 + rx_q->rx_tail_addr,
4155 + /* DMA TX Channel Configuration */
4156 + for (chan = 0; chan < tx_channels_count; chan++) {
4157 + tx_q = &priv->tx_queue[chan];
4159 + priv->hw->dma->init_chan(priv->ioaddr,
4160 + priv->plat->dma_cfg,
4163 + priv->hw->dma->init_tx_chan(priv->ioaddr,
4164 + priv->plat->dma_cfg,
4165 + tx_q->dma_tx_phy, chan);
4167 + tx_q->tx_tail_addr = tx_q->dma_tx_phy +
4168 + (DMA_TX_SIZE * sizeof(struct dma_desc));
4169 + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr,
4170 + tx_q->tx_tail_addr,
4174 + rx_q = &priv->rx_queue[chan];
4175 + tx_q = &priv->tx_queue[chan];
4176 + priv->hw->dma->init(priv->ioaddr, priv->plat->dma_cfg,
4177 + tx_q->dma_tx_phy, rx_q->dma_rx_phy, atds);
4180 if (priv->plat->axi && priv->hw->dma->axi)
4181 @@ -1636,8 +2204,12 @@ static int stmmac_init_dma_engine(struct
4182 static void stmmac_tx_timer(unsigned long data)
4184 struct stmmac_priv *priv = (struct stmmac_priv *)data;
4185 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4188 - stmmac_tx_clean(priv);
4189 + /* let's scan all the tx queues */
4190 + for (queue = 0; queue < tx_queues_count; queue++)
4191 + stmmac_tx_clean(priv, queue);
4195 @@ -1659,6 +2231,196 @@ static void stmmac_init_tx_coalesce(stru
4196 add_timer(&priv->txtimer);
4199 +static void stmmac_set_rings_length(struct stmmac_priv *priv)
4201 + u32 rx_channels_count = priv->plat->rx_queues_to_use;
4202 + u32 tx_channels_count = priv->plat->tx_queues_to_use;
4205 + /* set TX ring length */
4206 + if (priv->hw->dma->set_tx_ring_len) {
4207 + for (chan = 0; chan < tx_channels_count; chan++)
4208 + priv->hw->dma->set_tx_ring_len(priv->ioaddr,
4209 + (DMA_TX_SIZE - 1), chan);
4212 + /* set RX ring length */
4213 + if (priv->hw->dma->set_rx_ring_len) {
4214 + for (chan = 0; chan < rx_channels_count; chan++)
4215 + priv->hw->dma->set_rx_ring_len(priv->ioaddr,
4216 + (DMA_RX_SIZE - 1), chan);
4221 + * stmmac_set_tx_queue_weight - Set TX queue weight
4222 + * @priv: driver private structure
4223 + * Description: It is used for setting TX queues weight
4225 +static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
4227 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4231 + for (queue = 0; queue < tx_queues_count; queue++) {
4232 + weight = priv->plat->tx_queues_cfg[queue].weight;
4233 + priv->hw->mac->set_mtl_tx_queue_weight(priv->hw, weight, queue);
4238 + * stmmac_configure_cbs - Configure CBS in TX queue
4239 + * @priv: driver private structure
4240 + * Description: It is used for configuring CBS in AVB TX queues
4242 +static void stmmac_configure_cbs(struct stmmac_priv *priv)
4244 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4248 + /* queue 0 is reserved for legacy traffic */
4249 + for (queue = 1; queue < tx_queues_count; queue++) {
4250 + mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
4251 + if (mode_to_use == MTL_QUEUE_DCB)
4254 + priv->hw->mac->config_cbs(priv->hw,
4255 + priv->plat->tx_queues_cfg[queue].send_slope,
4256 + priv->plat->tx_queues_cfg[queue].idle_slope,
4257 + priv->plat->tx_queues_cfg[queue].high_credit,
4258 + priv->plat->tx_queues_cfg[queue].low_credit,
4264 + * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
4265 + * @priv: driver private structure
4266 + * Description: It is used for mapping RX queues to RX dma channels
4268 +static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
4270 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4274 + for (queue = 0; queue < rx_queues_count; queue++) {
4275 + chan = priv->plat->rx_queues_cfg[queue].chan;
4276 + priv->hw->mac->map_mtl_to_dma(priv->hw, queue, chan);
4281 + * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
4282 + * @priv: driver private structure
4283 + * Description: It is used for configuring the RX Queue Priority
4285 +static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
4287 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4291 + for (queue = 0; queue < rx_queues_count; queue++) {
4292 + if (!priv->plat->rx_queues_cfg[queue].use_prio)
4295 + prio = priv->plat->rx_queues_cfg[queue].prio;
4296 + priv->hw->mac->rx_queue_prio(priv->hw, prio, queue);
4301 + * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
4302 + * @priv: driver private structure
4303 + * Description: It is used for configuring the TX Queue Priority
4305 +static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
4307 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4311 + for (queue = 0; queue < tx_queues_count; queue++) {
4312 + if (!priv->plat->tx_queues_cfg[queue].use_prio)
4315 + prio = priv->plat->tx_queues_cfg[queue].prio;
4316 + priv->hw->mac->tx_queue_prio(priv->hw, prio, queue);
4321 + * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
4322 + * @priv: driver private structure
4323 + * Description: It is used for configuring the RX queue routing
4325 +static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
4327 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4331 + for (queue = 0; queue < rx_queues_count; queue++) {
4332 + /* no specific packet type routing specified for the queue */
4333 + if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
4336 + packet = priv->plat->rx_queues_cfg[queue].pkt_route;
4337 + priv->hw->mac->rx_queue_prio(priv->hw, packet, queue);
4342 + * stmmac_mtl_configuration - Configure MTL
4343 + * @priv: driver private structure
4344 + * Description: It is used for configurring MTL
4346 +static void stmmac_mtl_configuration(struct stmmac_priv *priv)
4348 + u32 rx_queues_count = priv->plat->rx_queues_to_use;
4349 + u32 tx_queues_count = priv->plat->tx_queues_to_use;
4351 + if (tx_queues_count > 1 && priv->hw->mac->set_mtl_tx_queue_weight)
4352 + stmmac_set_tx_queue_weight(priv);
4354 + /* Configure MTL RX algorithms */
4355 + if (rx_queues_count > 1 && priv->hw->mac->prog_mtl_rx_algorithms)
4356 + priv->hw->mac->prog_mtl_rx_algorithms(priv->hw,
4357 + priv->plat->rx_sched_algorithm);
4359 + /* Configure MTL TX algorithms */
4360 + if (tx_queues_count > 1 && priv->hw->mac->prog_mtl_tx_algorithms)
4361 + priv->hw->mac->prog_mtl_tx_algorithms(priv->hw,
4362 + priv->plat->tx_sched_algorithm);
4364 + /* Configure CBS in AVB TX queues */
4365 + if (tx_queues_count > 1 && priv->hw->mac->config_cbs)
4366 + stmmac_configure_cbs(priv);
4368 + /* Map RX MTL to DMA channels */
4369 + if (priv->hw->mac->map_mtl_to_dma)
4370 + stmmac_rx_queue_dma_chan_map(priv);
4372 + /* Enable MAC RX Queues */
4373 + if (priv->hw->mac->rx_queue_enable)
4374 + stmmac_mac_enable_rx_queues(priv);
4376 + /* Set RX priorities */
4377 + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_prio)
4378 + stmmac_mac_config_rx_queues_prio(priv);
4380 + /* Set TX priorities */
4381 + if (tx_queues_count > 1 && priv->hw->mac->tx_queue_prio)
4382 + stmmac_mac_config_tx_queues_prio(priv);
4384 + /* Set RX routing */
4385 + if (rx_queues_count > 1 && priv->hw->mac->rx_queue_routing)
4386 + stmmac_mac_config_rx_queues_routing(priv);
4390 * stmmac_hw_setup - setup mac in a usable state.
4391 * @dev : pointer to the device structure.
4392 @@ -1674,6 +2436,9 @@ static void stmmac_init_tx_coalesce(stru
4393 static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
4395 struct stmmac_priv *priv = netdev_priv(dev);
4396 + u32 rx_cnt = priv->plat->rx_queues_to_use;
4397 + u32 tx_cnt = priv->plat->tx_queues_to_use;
4401 /* DMA initialization and SW reset */
4402 @@ -1703,9 +2468,9 @@ static int stmmac_hw_setup(struct net_de
4403 /* Initialize the MAC Core */
4404 priv->hw->mac->core_init(priv->hw, dev->mtu);
4406 - /* Initialize MAC RX Queues */
4407 - if (priv->hw->mac->rx_queue_enable)
4408 - stmmac_mac_enable_rx_queues(priv);
4409 + /* Initialize MTL*/
4410 + if (priv->synopsys_id >= DWMAC_CORE_4_00)
4411 + stmmac_mtl_configuration(priv);
4413 ret = priv->hw->mac->rx_ipc(priv->hw);
4415 @@ -1715,10 +2480,7 @@ static int stmmac_hw_setup(struct net_de
4418 /* Enable the MAC Rx/Tx */
4419 - if (priv->synopsys_id >= DWMAC_CORE_4_00)
4420 - stmmac_dwmac4_set_mac(priv->ioaddr, true);
4422 - stmmac_set_mac(priv->ioaddr, true);
4423 + priv->hw->mac->set_mac(priv->ioaddr, true);
4425 /* Set the HW DMA mode and the COE */
4426 stmmac_dma_operation_mode(priv);
4427 @@ -1726,6 +2488,10 @@ static int stmmac_hw_setup(struct net_de
4428 stmmac_mmc_setup(priv);
4431 + ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
4433 + netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret);
4435 ret = stmmac_init_ptp(priv);
4436 if (ret == -EOPNOTSUPP)
4437 netdev_warn(priv->dev, "PTP not supported by HW\n");
4438 @@ -1740,35 +2506,37 @@ static int stmmac_hw_setup(struct net_de
4441 /* Start the ball rolling... */
4442 - netdev_dbg(priv->dev, "DMA RX/TX processes started...\n");
4443 - priv->hw->dma->start_tx(priv->ioaddr);
4444 - priv->hw->dma->start_rx(priv->ioaddr);
4445 + stmmac_start_all_dma(priv);
4447 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
4449 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
4450 priv->rx_riwt = MAX_DMA_RIWT;
4451 - priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
4452 + priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT, rx_cnt);
4455 if (priv->hw->pcs && priv->hw->mac->pcs_ctrl_ane)
4456 priv->hw->mac->pcs_ctrl_ane(priv->hw, 1, priv->hw->ps, 0);
4458 - /* set TX ring length */
4459 - if (priv->hw->dma->set_tx_ring_len)
4460 - priv->hw->dma->set_tx_ring_len(priv->ioaddr,
4461 - (DMA_TX_SIZE - 1));
4462 - /* set RX ring length */
4463 - if (priv->hw->dma->set_rx_ring_len)
4464 - priv->hw->dma->set_rx_ring_len(priv->ioaddr,
4465 - (DMA_RX_SIZE - 1));
4466 + /* set TX and RX rings length */
4467 + stmmac_set_rings_length(priv);
4471 - priv->hw->dma->enable_tso(priv->ioaddr, 1, STMMAC_CHAN0);
4473 + for (chan = 0; chan < tx_cnt; chan++)
4474 + priv->hw->dma->enable_tso(priv->ioaddr, 1, chan);
4480 +static void stmmac_hw_teardown(struct net_device *dev)
4482 + struct stmmac_priv *priv = netdev_priv(dev);
4484 + clk_disable_unprepare(priv->plat->clk_ptp_ref);
4488 * stmmac_open - open entry point of the driver
4489 * @dev : pointer to the device structure.
4490 @@ -1837,7 +2605,7 @@ static int stmmac_open(struct net_device
4491 netdev_err(priv->dev,
4492 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
4493 __func__, dev->irq, ret);
4498 /* Request the Wake IRQ in case of another line is used for WoL */
4499 @@ -1864,8 +2632,8 @@ static int stmmac_open(struct net_device
4503 - napi_enable(&priv->napi);
4504 - netif_start_queue(dev);
4505 + stmmac_enable_all_queues(priv);
4506 + stmmac_start_all_queues(priv);
4510 @@ -1874,7 +2642,12 @@ lpiirq_error:
4511 free_irq(priv->wol_irq, dev);
4513 free_irq(dev->irq, dev);
4516 + phy_stop(dev->phydev);
4518 + del_timer_sync(&priv->txtimer);
4519 + stmmac_hw_teardown(dev);
4521 free_dma_desc_resources(priv);
4523 @@ -1903,9 +2676,9 @@ static int stmmac_release(struct net_dev
4524 phy_disconnect(dev->phydev);
4527 - netif_stop_queue(dev);
4528 + stmmac_stop_all_queues(priv);
4530 - napi_disable(&priv->napi);
4531 + stmmac_disable_all_queues(priv);
4533 del_timer_sync(&priv->txtimer);
4535 @@ -1917,14 +2690,13 @@ static int stmmac_release(struct net_dev
4536 free_irq(priv->lpi_irq, dev);
4538 /* Stop TX/RX DMA and clear the descriptors */
4539 - priv->hw->dma->stop_tx(priv->ioaddr);
4540 - priv->hw->dma->stop_rx(priv->ioaddr);
4541 + stmmac_stop_all_dma(priv);
4543 /* Release and free the Rx/Tx resources */
4544 free_dma_desc_resources(priv);
4546 /* Disable the MAC Rx/Tx */
4547 - stmmac_set_mac(priv->ioaddr, false);
4548 + priv->hw->mac->set_mac(priv->ioaddr, false);
4550 netif_carrier_off(dev);
4552 @@ -1943,22 +2715,24 @@ static int stmmac_release(struct net_dev
4553 * @des: buffer start address
4554 * @total_len: total length to fill in descriptors
4555 * @last_segmant: condition for the last descriptor
4556 + * @queue: TX queue index
4558 * This function fills descriptor and request new descriptors according to
4559 * buffer length to fill
4561 static void stmmac_tso_allocator(struct stmmac_priv *priv, unsigned int des,
4562 - int total_len, bool last_segment)
4563 + int total_len, bool last_segment, u32 queue)
4565 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
4566 struct dma_desc *desc;
4571 tmp_len = total_len;
4573 while (tmp_len > 0) {
4574 - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
4575 - desc = priv->dma_tx + priv->cur_tx;
4576 + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
4577 + desc = tx_q->dma_tx + tx_q->cur_tx;
4579 desc->des0 = cpu_to_le32(des + (total_len - tmp_len));
4580 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4581 @@ -1966,7 +2740,7 @@ static void stmmac_tso_allocator(struct
4583 priv->hw->desc->prepare_tso_tx_desc(desc, 0, buff_size,
4585 - (last_segment) && (buff_size < TSO_MAX_BUFF_SIZE),
4586 + (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4589 tmp_len -= TSO_MAX_BUFF_SIZE;
4590 @@ -2002,23 +2776,28 @@ static void stmmac_tso_allocator(struct
4592 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4595 - int tmp_pay_len = 0;
4596 + struct dma_desc *desc, *first, *mss_desc = NULL;
4597 struct stmmac_priv *priv = netdev_priv(dev);
4598 int nfrags = skb_shinfo(skb)->nr_frags;
4599 + u32 queue = skb_get_queue_mapping(skb);
4600 unsigned int first_entry, des;
4601 - struct dma_desc *desc, *first, *mss_desc = NULL;
4602 + struct stmmac_tx_queue *tx_q;
4603 + int tmp_pay_len = 0;
4608 + tx_q = &priv->tx_queue[queue];
4610 /* Compute header lengths */
4611 proto_hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4613 /* Desc availability based on threshold should be enough safe */
4614 - if (unlikely(stmmac_tx_avail(priv) <
4615 + if (unlikely(stmmac_tx_avail(priv, queue) <
4616 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4617 - if (!netif_queue_stopped(dev)) {
4618 - netif_stop_queue(dev);
4619 + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4620 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4622 /* This is a hard error, log it. */
4623 netdev_err(priv->dev,
4624 "%s: Tx Ring full when queue awake\n",
4625 @@ -2033,10 +2812,10 @@ static netdev_tx_t stmmac_tso_xmit(struc
4627 /* set new MSS value if needed */
4628 if (mss != priv->mss) {
4629 - mss_desc = priv->dma_tx + priv->cur_tx;
4630 + mss_desc = tx_q->dma_tx + tx_q->cur_tx;
4631 priv->hw->desc->set_mss(mss_desc, mss);
4633 - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
4634 + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
4637 if (netif_msg_tx_queued(priv)) {
4638 @@ -2046,9 +2825,9 @@ static netdev_tx_t stmmac_tso_xmit(struc
4642 - first_entry = priv->cur_tx;
4643 + first_entry = tx_q->cur_tx;
4645 - desc = priv->dma_tx + first_entry;
4646 + desc = tx_q->dma_tx + first_entry;
4649 /* first descriptor: fill Headers on Buf1 */
4650 @@ -2057,9 +2836,8 @@ static netdev_tx_t stmmac_tso_xmit(struc
4651 if (dma_mapping_error(priv->device, des))
4654 - priv->tx_skbuff_dma[first_entry].buf = des;
4655 - priv->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4656 - priv->tx_skbuff[first_entry] = skb;
4657 + tx_q->tx_skbuff_dma[first_entry].buf = des;
4658 + tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4660 first->des0 = cpu_to_le32(des);
4662 @@ -2070,7 +2848,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
4663 /* If needed take extra descriptors to fill the remaining payload */
4664 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4666 - stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0));
4667 + stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4669 /* Prepare fragments */
4670 for (i = 0; i < nfrags; i++) {
4671 @@ -2079,24 +2857,34 @@ static netdev_tx_t stmmac_tso_xmit(struc
4672 des = skb_frag_dma_map(priv->device, frag, 0,
4673 skb_frag_size(frag),
4675 + if (dma_mapping_error(priv->device, des))
4678 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4679 - (i == nfrags - 1));
4680 + (i == nfrags - 1), queue);
4682 - priv->tx_skbuff_dma[priv->cur_tx].buf = des;
4683 - priv->tx_skbuff_dma[priv->cur_tx].len = skb_frag_size(frag);
4684 - priv->tx_skbuff[priv->cur_tx] = NULL;
4685 - priv->tx_skbuff_dma[priv->cur_tx].map_as_page = true;
4686 + tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4687 + tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4688 + tx_q->tx_skbuff[tx_q->cur_tx] = NULL;
4689 + tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4692 - priv->tx_skbuff_dma[priv->cur_tx].last_segment = true;
4693 + tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4695 + /* Only the last descriptor gets to point to the skb. */
4696 + tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4698 - priv->cur_tx = STMMAC_GET_ENTRY(priv->cur_tx, DMA_TX_SIZE);
4699 + /* We've used all descriptors we need for this skb, however,
4700 + * advance cur_tx so that it references a fresh descriptor.
4701 + * ndo_start_xmit will fill this descriptor the next time it's
4702 + * called and stmmac_tx_clean may clean up to this descriptor.
4704 + tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, DMA_TX_SIZE);
4706 - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
4707 + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4708 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4710 - netif_stop_queue(dev);
4711 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4714 dev->stats.tx_bytes += skb->len;
4715 @@ -2128,7 +2916,7 @@ static netdev_tx_t stmmac_tso_xmit(struc
4716 priv->hw->desc->prepare_tso_tx_desc(first, 1,
4719 - 1, priv->tx_skbuff_dma[first_entry].last_segment,
4720 + 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4721 tcp_hdrlen(skb) / 4, (skb->len - proto_hdr_len));
4723 /* If context desc is used to change MSS */
4724 @@ -2143,20 +2931,20 @@ static netdev_tx_t stmmac_tso_xmit(struc
4726 if (netif_msg_pktdata(priv)) {
4727 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4728 - __func__, priv->cur_tx, priv->dirty_tx, first_entry,
4729 - priv->cur_tx, first, nfrags);
4730 + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4731 + tx_q->cur_tx, first, nfrags);
4733 - priv->hw->desc->display_ring((void *)priv->dma_tx, DMA_TX_SIZE,
4734 + priv->hw->desc->display_ring((void *)tx_q->dma_tx, DMA_TX_SIZE,
4737 pr_info(">>> frame to be transmitted: ");
4738 print_pkt(skb->data, skb_headlen(skb));
4741 - netdev_sent_queue(dev, skb->len);
4742 + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4744 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
4746 + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
4749 return NETDEV_TX_OK;
4751 @@ -2180,21 +2968,27 @@ static netdev_tx_t stmmac_xmit(struct sk
4752 struct stmmac_priv *priv = netdev_priv(dev);
4753 unsigned int nopaged_len = skb_headlen(skb);
4754 int i, csum_insertion = 0, is_jumbo = 0;
4755 + u32 queue = skb_get_queue_mapping(skb);
4756 int nfrags = skb_shinfo(skb)->nr_frags;
4757 - unsigned int entry, first_entry;
4759 + unsigned int first_entry;
4760 struct dma_desc *desc, *first;
4761 + struct stmmac_tx_queue *tx_q;
4762 unsigned int enh_desc;
4765 + tx_q = &priv->tx_queue[queue];
4767 /* Manage oversized TCP frames for GMAC4 device */
4768 if (skb_is_gso(skb) && priv->tso) {
4769 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
4770 return stmmac_tso_xmit(skb, dev);
4773 - if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
4774 - if (!netif_queue_stopped(dev)) {
4775 - netif_stop_queue(dev);
4776 + if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4777 + if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4778 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4780 /* This is a hard error, log it. */
4781 netdev_err(priv->dev,
4782 "%s: Tx Ring full when queue awake\n",
4783 @@ -2206,20 +3000,18 @@ static netdev_tx_t stmmac_xmit(struct sk
4784 if (priv->tx_path_in_lpi_mode)
4785 stmmac_disable_eee_mode(priv);
4787 - entry = priv->cur_tx;
4788 + entry = tx_q->cur_tx;
4789 first_entry = entry;
4791 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4793 if (likely(priv->extend_desc))
4794 - desc = (struct dma_desc *)(priv->dma_etx + entry);
4795 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4797 - desc = priv->dma_tx + entry;
4798 + desc = tx_q->dma_tx + entry;
4802 - priv->tx_skbuff[first_entry] = skb;
4804 enh_desc = priv->plat->enh_desc;
4805 /* To program the descriptors according to the size of the frame */
4807 @@ -2227,7 +3019,7 @@ static netdev_tx_t stmmac_xmit(struct sk
4809 if (unlikely(is_jumbo) && likely(priv->synopsys_id <
4811 - entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
4812 + entry = priv->hw->mode->jumbo_frm(tx_q, skb, csum_insertion);
4813 if (unlikely(entry < 0))
4816 @@ -2240,48 +3032,56 @@ static netdev_tx_t stmmac_xmit(struct sk
4817 entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
4819 if (likely(priv->extend_desc))
4820 - desc = (struct dma_desc *)(priv->dma_etx + entry);
4821 + desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4823 - desc = priv->dma_tx + entry;
4824 + desc = tx_q->dma_tx + entry;
4826 des = skb_frag_dma_map(priv->device, frag, 0, len,
4828 if (dma_mapping_error(priv->device, des))
4829 goto dma_map_err; /* should reuse desc w/o issues */
4831 - priv->tx_skbuff[entry] = NULL;
4832 + tx_q->tx_skbuff[entry] = NULL;
4834 - priv->tx_skbuff_dma[entry].buf = des;
4835 + tx_q->tx_skbuff_dma[entry].buf = des;
4836 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
4837 desc->des0 = cpu_to_le32(des);
4839 desc->des2 = cpu_to_le32(des);
4841 - priv->tx_skbuff_dma[entry].map_as_page = true;
4842 - priv->tx_skbuff_dma[entry].len = len;
4843 - priv->tx_skbuff_dma[entry].last_segment = last_segment;
4844 + tx_q->tx_skbuff_dma[entry].map_as_page = true;
4845 + tx_q->tx_skbuff_dma[entry].len = len;
4846 + tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4848 /* Prepare the descriptor and set the own bit too */
4849 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
4850 - priv->mode, 1, last_segment);
4851 + priv->mode, 1, last_segment,
4855 - entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
4856 + /* Only the last descriptor gets to point to the skb. */
4857 + tx_q->tx_skbuff[entry] = skb;
4859 - priv->cur_tx = entry;
4860 + /* We've used all descriptors we need for this skb, however,
4861 + * advance cur_tx so that it references a fresh descriptor.
4862 + * ndo_start_xmit will fill this descriptor the next time it's
4863 + * called and stmmac_tx_clean may clean up to this descriptor.
4865 + entry = STMMAC_GET_ENTRY(entry, DMA_TX_SIZE);
4866 + tx_q->cur_tx = entry;
4868 if (netif_msg_pktdata(priv)) {
4871 netdev_dbg(priv->dev,
4872 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4873 - __func__, priv->cur_tx, priv->dirty_tx, first_entry,
4874 + __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4875 entry, first, nfrags);
4877 if (priv->extend_desc)
4878 - tx_head = (void *)priv->dma_etx;
4879 + tx_head = (void *)tx_q->dma_etx;
4881 - tx_head = (void *)priv->dma_tx;
4882 + tx_head = (void *)tx_q->dma_tx;
4884 priv->hw->desc->display_ring(tx_head, DMA_TX_SIZE, false);
4886 @@ -2289,10 +3089,10 @@ static netdev_tx_t stmmac_xmit(struct sk
4887 print_pkt(skb->data, skb->len);
4890 - if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
4891 + if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4892 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4894 - netif_stop_queue(dev);
4895 + netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4898 dev->stats.tx_bytes += skb->len;
4899 @@ -2327,14 +3127,14 @@ static netdev_tx_t stmmac_xmit(struct sk
4900 if (dma_mapping_error(priv->device, des))
4903 - priv->tx_skbuff_dma[first_entry].buf = des;
4904 + tx_q->tx_skbuff_dma[first_entry].buf = des;
4905 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00))
4906 first->des0 = cpu_to_le32(des);
4908 first->des2 = cpu_to_le32(des);
4910 - priv->tx_skbuff_dma[first_entry].len = nopaged_len;
4911 - priv->tx_skbuff_dma[first_entry].last_segment = last_segment;
4912 + tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4913 + tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4915 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4916 priv->hwts_tx_en)) {
4917 @@ -2346,7 +3146,7 @@ static netdev_tx_t stmmac_xmit(struct sk
4918 /* Prepare the first descriptor setting the OWN bit too */
4919 priv->hw->desc->prepare_tx_desc(first, 1, nopaged_len,
4920 csum_insertion, priv->mode, 1,
4922 + last_segment, skb->len);
4924 /* The own bit must be the latest setting done when prepare the
4925 * descriptor and then barrier is needed to make sure that
4926 @@ -2355,13 +3155,13 @@ static netdev_tx_t stmmac_xmit(struct sk
4930 - netdev_sent_queue(dev, skb->len);
4931 + netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4933 if (priv->synopsys_id < DWMAC_CORE_4_00)
4934 priv->hw->dma->enable_dma_transmission(priv->ioaddr);
4936 - priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, priv->tx_tail_addr,
4938 + priv->hw->dma->set_tx_tail_ptr(priv->ioaddr, tx_q->tx_tail_addr,
4941 return NETDEV_TX_OK;
4943 @@ -2389,9 +3189,9 @@ static void stmmac_rx_vlan(struct net_de
4947 -static inline int stmmac_rx_threshold_count(struct stmmac_priv *priv)
4948 +static inline int stmmac_rx_threshold_count(struct stmmac_rx_queue *rx_q)
4950 - if (priv->rx_zeroc_thresh < STMMAC_RX_THRESH)
4951 + if (rx_q->rx_zeroc_thresh < STMMAC_RX_THRESH)
4955 @@ -2400,30 +3200,33 @@ static inline int stmmac_rx_threshold_co
4957 * stmmac_rx_refill - refill used skb preallocated buffers
4958 * @priv: driver private structure
4959 + * @queue: RX queue index
4960 * Description : this is to reallocate the skb for the reception process
4961 * that is based on zero-copy.
4963 -static inline void stmmac_rx_refill(struct stmmac_priv *priv)
4964 +static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4966 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
4967 + int dirty = stmmac_rx_dirty(priv, queue);
4968 + unsigned int entry = rx_q->dirty_rx;
4970 int bfsize = priv->dma_buf_sz;
4971 - unsigned int entry = priv->dirty_rx;
4972 - int dirty = stmmac_rx_dirty(priv);
4974 while (dirty-- > 0) {
4977 if (priv->extend_desc)
4978 - p = (struct dma_desc *)(priv->dma_erx + entry);
4979 + p = (struct dma_desc *)(rx_q->dma_erx + entry);
4981 - p = priv->dma_rx + entry;
4982 + p = rx_q->dma_rx + entry;
4984 - if (likely(priv->rx_skbuff[entry] == NULL)) {
4985 + if (likely(!rx_q->rx_skbuff[entry])) {
4986 struct sk_buff *skb;
4988 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
4989 if (unlikely(!skb)) {
4990 /* so for a while no zero-copy! */
4991 - priv->rx_zeroc_thresh = STMMAC_RX_THRESH;
4992 + rx_q->rx_zeroc_thresh = STMMAC_RX_THRESH;
4993 if (unlikely(net_ratelimit()))
4994 dev_err(priv->device,
4995 "fail to alloc skb entry %d\n",
4996 @@ -2431,28 +3234,28 @@ static inline void stmmac_rx_refill(stru
5000 - priv->rx_skbuff[entry] = skb;
5001 - priv->rx_skbuff_dma[entry] =
5002 + rx_q->rx_skbuff[entry] = skb;
5003 + rx_q->rx_skbuff_dma[entry] =
5004 dma_map_single(priv->device, skb->data, bfsize,
5006 if (dma_mapping_error(priv->device,
5007 - priv->rx_skbuff_dma[entry])) {
5008 + rx_q->rx_skbuff_dma[entry])) {
5009 netdev_err(priv->dev, "Rx DMA map failed\n");
5014 if (unlikely(priv->synopsys_id >= DWMAC_CORE_4_00)) {
5015 - p->des0 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
5016 + p->des0 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
5019 - p->des2 = cpu_to_le32(priv->rx_skbuff_dma[entry]);
5020 + p->des2 = cpu_to_le32(rx_q->rx_skbuff_dma[entry]);
5022 if (priv->hw->mode->refill_desc3)
5023 - priv->hw->mode->refill_desc3(priv, p);
5024 + priv->hw->mode->refill_desc3(rx_q, p);
5026 - if (priv->rx_zeroc_thresh > 0)
5027 - priv->rx_zeroc_thresh--;
5028 + if (rx_q->rx_zeroc_thresh > 0)
5029 + rx_q->rx_zeroc_thresh--;
5031 netif_dbg(priv, rx_status, priv->dev,
5032 "refill entry #%d\n", entry);
5033 @@ -2468,31 +3271,33 @@ static inline void stmmac_rx_refill(stru
5035 entry = STMMAC_GET_ENTRY(entry, DMA_RX_SIZE);
5037 - priv->dirty_rx = entry;
5038 + rx_q->dirty_rx = entry;
5042 * stmmac_rx - manage the receive process
5043 * @priv: driver private structure
5044 - * @limit: napi bugget.
5045 + * @limit: napi bugget
5046 + * @queue: RX queue index.
5047 * Description : this the function called by the napi poll method.
5048 * It gets all the frames inside the ring.
5050 -static int stmmac_rx(struct stmmac_priv *priv, int limit)
5051 +static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5053 - unsigned int entry = priv->cur_rx;
5054 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5055 + unsigned int entry = rx_q->cur_rx;
5056 + int coe = priv->hw->rx_csum;
5057 unsigned int next_entry;
5058 unsigned int count = 0;
5059 - int coe = priv->hw->rx_csum;
5061 if (netif_msg_rx_status(priv)) {
5064 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5065 if (priv->extend_desc)
5066 - rx_head = (void *)priv->dma_erx;
5067 + rx_head = (void *)rx_q->dma_erx;
5069 - rx_head = (void *)priv->dma_rx;
5070 + rx_head = (void *)rx_q->dma_rx;
5072 priv->hw->desc->display_ring(rx_head, DMA_RX_SIZE, true);
5074 @@ -2502,9 +3307,9 @@ static int stmmac_rx(struct stmmac_priv
5075 struct dma_desc *np;
5077 if (priv->extend_desc)
5078 - p = (struct dma_desc *)(priv->dma_erx + entry);
5079 + p = (struct dma_desc *)(rx_q->dma_erx + entry);
5081 - p = priv->dma_rx + entry;
5082 + p = rx_q->dma_rx + entry;
5084 /* read the status of the incoming frame */
5085 status = priv->hw->desc->rx_status(&priv->dev->stats,
5086 @@ -2515,20 +3320,20 @@ static int stmmac_rx(struct stmmac_priv
5090 - priv->cur_rx = STMMAC_GET_ENTRY(priv->cur_rx, DMA_RX_SIZE);
5091 - next_entry = priv->cur_rx;
5092 + rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, DMA_RX_SIZE);
5093 + next_entry = rx_q->cur_rx;
5095 if (priv->extend_desc)
5096 - np = (struct dma_desc *)(priv->dma_erx + next_entry);
5097 + np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5099 - np = priv->dma_rx + next_entry;
5100 + np = rx_q->dma_rx + next_entry;
5104 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
5105 priv->hw->desc->rx_extended_status(&priv->dev->stats,
5110 if (unlikely(status == discard_frame)) {
5111 priv->dev->stats.rx_errors++;
5112 @@ -2538,9 +3343,9 @@ static int stmmac_rx(struct stmmac_priv
5113 * them in stmmac_rx_refill() function so that
5114 * device can reuse it.
5116 - priv->rx_skbuff[entry] = NULL;
5117 + rx_q->rx_skbuff[entry] = NULL;
5118 dma_unmap_single(priv->device,
5119 - priv->rx_skbuff_dma[entry],
5120 + rx_q->rx_skbuff_dma[entry],
5124 @@ -2588,7 +3393,7 @@ static int stmmac_rx(struct stmmac_priv
5126 if (unlikely(!priv->plat->has_gmac4 &&
5127 ((frame_len < priv->rx_copybreak) ||
5128 - stmmac_rx_threshold_count(priv)))) {
5129 + stmmac_rx_threshold_count(rx_q)))) {
5130 skb = netdev_alloc_skb_ip_align(priv->dev,
5132 if (unlikely(!skb)) {
5133 @@ -2600,21 +3405,21 @@ static int stmmac_rx(struct stmmac_priv
5136 dma_sync_single_for_cpu(priv->device,
5137 - priv->rx_skbuff_dma
5138 + rx_q->rx_skbuff_dma
5141 skb_copy_to_linear_data(skb,
5144 rx_skbuff[entry]->data,
5147 skb_put(skb, frame_len);
5148 dma_sync_single_for_device(priv->device,
5149 - priv->rx_skbuff_dma
5150 + rx_q->rx_skbuff_dma
5154 - skb = priv->rx_skbuff[entry];
5155 + skb = rx_q->rx_skbuff[entry];
5156 if (unlikely(!skb)) {
5157 netdev_err(priv->dev,
5158 "%s: Inconsistent Rx chain\n",
5159 @@ -2623,12 +3428,12 @@ static int stmmac_rx(struct stmmac_priv
5162 prefetch(skb->data - NET_IP_ALIGN);
5163 - priv->rx_skbuff[entry] = NULL;
5164 - priv->rx_zeroc_thresh++;
5165 + rx_q->rx_skbuff[entry] = NULL;
5166 + rx_q->rx_zeroc_thresh++;
5168 skb_put(skb, frame_len);
5169 dma_unmap_single(priv->device,
5170 - priv->rx_skbuff_dma[entry],
5171 + rx_q->rx_skbuff_dma[entry],
5175 @@ -2650,7 +3455,7 @@ static int stmmac_rx(struct stmmac_priv
5177 skb->ip_summed = CHECKSUM_UNNECESSARY;
5179 - napi_gro_receive(&priv->napi, skb);
5180 + napi_gro_receive(&rx_q->napi, skb);
5182 priv->dev->stats.rx_packets++;
5183 priv->dev->stats.rx_bytes += frame_len;
5184 @@ -2658,7 +3463,7 @@ static int stmmac_rx(struct stmmac_priv
5188 - stmmac_rx_refill(priv);
5189 + stmmac_rx_refill(priv, queue);
5191 priv->xstats.rx_pkt_n += count;
5193 @@ -2675,16 +3480,24 @@ static int stmmac_rx(struct stmmac_priv
5195 static int stmmac_poll(struct napi_struct *napi, int budget)
5197 - struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
5198 + struct stmmac_rx_queue *rx_q =
5199 + container_of(napi, struct stmmac_rx_queue, napi);
5200 + struct stmmac_priv *priv = rx_q->priv_data;
5201 + u32 tx_count = priv->plat->tx_queues_to_use;
5202 + u32 chan = rx_q->queue_index;
5206 priv->xstats.napi_poll++;
5207 - stmmac_tx_clean(priv);
5209 - work_done = stmmac_rx(priv, budget);
5210 + /* check all the queues */
5211 + for (queue = 0; queue < tx_count; queue++)
5212 + stmmac_tx_clean(priv, queue);
5214 + work_done = stmmac_rx(priv, budget, rx_q->queue_index);
5215 if (work_done < budget) {
5216 napi_complete_done(napi, work_done);
5217 - stmmac_enable_dma_irq(priv);
5218 + stmmac_enable_dma_irq(priv, chan);
5222 @@ -2700,9 +3513,12 @@ static int stmmac_poll(struct napi_struc
5223 static void stmmac_tx_timeout(struct net_device *dev)
5225 struct stmmac_priv *priv = netdev_priv(dev);
5226 + u32 tx_count = priv->plat->tx_queues_to_use;
5229 /* Clear Tx resources and restart transmitting again */
5230 - stmmac_tx_err(priv);
5231 + for (chan = 0; chan < tx_count; chan++)
5232 + stmmac_tx_err(priv, chan);
5236 @@ -2825,6 +3641,12 @@ static irqreturn_t stmmac_interrupt(int
5238 struct net_device *dev = (struct net_device *)dev_id;
5239 struct stmmac_priv *priv = netdev_priv(dev);
5240 + u32 rx_cnt = priv->plat->rx_queues_to_use;
5241 + u32 tx_cnt = priv->plat->tx_queues_to_use;
5245 + queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5248 pm_wakeup_event(priv->device, 0);
5249 @@ -2838,16 +3660,30 @@ static irqreturn_t stmmac_interrupt(int
5250 if ((priv->plat->has_gmac) || (priv->plat->has_gmac4)) {
5251 int status = priv->hw->mac->host_irq_status(priv->hw,
5254 if (unlikely(status)) {
5255 /* For LPI we need to save the tx status */
5256 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
5257 priv->tx_path_in_lpi_mode = true;
5258 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
5259 priv->tx_path_in_lpi_mode = false;
5260 - if (status & CORE_IRQ_MTL_RX_OVERFLOW && priv->hw->dma->set_rx_tail_ptr)
5261 - priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
5262 - priv->rx_tail_addr,
5266 + if (priv->synopsys_id >= DWMAC_CORE_4_00) {
5267 + for (queue = 0; queue < queues_count; queue++) {
5268 + struct stmmac_rx_queue *rx_q =
5269 + &priv->rx_queue[queue];
5272 + priv->hw->mac->host_mtl_irq_status(priv->hw,
5275 + if (status & CORE_IRQ_MTL_RX_OVERFLOW &&
5276 + priv->hw->dma->set_rx_tail_ptr)
5277 + priv->hw->dma->set_rx_tail_ptr(priv->ioaddr,
5278 + rx_q->rx_tail_addr,
5283 /* PCS link status */
5284 @@ -2932,7 +3768,7 @@ static void sysfs_display_ring(void *hea
5287 seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
5288 - i, (unsigned int)virt_to_phys(ep),
5289 + i, (unsigned int)virt_to_phys(p),
5290 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
5291 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
5293 @@ -2945,17 +3781,40 @@ static int stmmac_sysfs_ring_read(struct
5295 struct net_device *dev = seq->private;
5296 struct stmmac_priv *priv = netdev_priv(dev);
5297 + u32 rx_count = priv->plat->rx_queues_to_use;
5298 + u32 tx_count = priv->plat->tx_queues_to_use;
5301 - if (priv->extend_desc) {
5302 - seq_printf(seq, "Extended RX descriptor ring:\n");
5303 - sysfs_display_ring((void *)priv->dma_erx, DMA_RX_SIZE, 1, seq);
5304 - seq_printf(seq, "Extended TX descriptor ring:\n");
5305 - sysfs_display_ring((void *)priv->dma_etx, DMA_TX_SIZE, 1, seq);
5307 - seq_printf(seq, "RX descriptor ring:\n");
5308 - sysfs_display_ring((void *)priv->dma_rx, DMA_RX_SIZE, 0, seq);
5309 - seq_printf(seq, "TX descriptor ring:\n");
5310 - sysfs_display_ring((void *)priv->dma_tx, DMA_TX_SIZE, 0, seq);
5311 + for (queue = 0; queue < rx_count; queue++) {
5312 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5314 + seq_printf(seq, "RX Queue %d:\n", queue);
5316 + if (priv->extend_desc) {
5317 + seq_printf(seq, "Extended descriptor ring:\n");
5318 + sysfs_display_ring((void *)rx_q->dma_erx,
5319 + DMA_RX_SIZE, 1, seq);
5321 + seq_printf(seq, "Descriptor ring:\n");
5322 + sysfs_display_ring((void *)rx_q->dma_rx,
5323 + DMA_RX_SIZE, 0, seq);
5327 + for (queue = 0; queue < tx_count; queue++) {
5328 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5330 + seq_printf(seq, "TX Queue %d:\n", queue);
5332 + if (priv->extend_desc) {
5333 + seq_printf(seq, "Extended descriptor ring:\n");
5334 + sysfs_display_ring((void *)tx_q->dma_etx,
5335 + DMA_TX_SIZE, 1, seq);
5337 + seq_printf(seq, "Descriptor ring:\n");
5338 + sysfs_display_ring((void *)tx_q->dma_tx,
5339 + DMA_TX_SIZE, 0, seq);
5344 @@ -3238,11 +4097,14 @@ int stmmac_dvr_probe(struct device *devi
5345 struct plat_stmmacenet_data *plat_dat,
5346 struct stmmac_resources *res)
5349 struct net_device *ndev = NULL;
5350 struct stmmac_priv *priv;
5354 - ndev = alloc_etherdev(sizeof(struct stmmac_priv));
5355 + ndev = alloc_etherdev_mqs(sizeof(struct stmmac_priv),
5356 + MTL_MAX_TX_QUEUES,
5357 + MTL_MAX_RX_QUEUES);
5361 @@ -3284,6 +4146,10 @@ int stmmac_dvr_probe(struct device *devi
5365 + /* Configure real RX and TX queues */
5366 + netif_set_real_num_rx_queues(ndev, priv->plat->rx_queues_to_use);
5367 + netif_set_real_num_tx_queues(ndev, priv->plat->tx_queues_to_use);
5369 ndev->netdev_ops = &stmmac_netdev_ops;
5371 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
5372 @@ -3316,7 +4182,12 @@ int stmmac_dvr_probe(struct device *devi
5373 "Enable RX Mitigation via HW Watchdog Timer\n");
5376 - netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
5377 + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
5378 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5380 + netif_napi_add(ndev, &rx_q->napi, stmmac_poll,
5381 + (8 * priv->plat->rx_queues_to_use));
5384 spin_lock_init(&priv->lock);
5386 @@ -3361,7 +4232,11 @@ error_netdev_register:
5387 priv->hw->pcs != STMMAC_PCS_RTBI)
5388 stmmac_mdio_unregister(ndev);
5389 error_mdio_register:
5390 - netif_napi_del(&priv->napi);
5391 + for (queue = 0; queue < priv->plat->rx_queues_to_use; queue++) {
5392 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5394 + netif_napi_del(&rx_q->napi);
5399 @@ -3382,10 +4257,9 @@ int stmmac_dvr_remove(struct device *dev
5401 netdev_info(priv->dev, "%s: removing driver", __func__);
5403 - priv->hw->dma->stop_rx(priv->ioaddr);
5404 - priv->hw->dma->stop_tx(priv->ioaddr);
5405 + stmmac_stop_all_dma(priv);
5407 - stmmac_set_mac(priv->ioaddr, false);
5408 + priv->hw->mac->set_mac(priv->ioaddr, false);
5409 netif_carrier_off(ndev);
5410 unregister_netdev(ndev);
5411 if (priv->plat->stmmac_rst)
5412 @@ -3424,20 +4298,19 @@ int stmmac_suspend(struct device *dev)
5413 spin_lock_irqsave(&priv->lock, flags);
5415 netif_device_detach(ndev);
5416 - netif_stop_queue(ndev);
5417 + stmmac_stop_all_queues(priv);
5419 - napi_disable(&priv->napi);
5420 + stmmac_disable_all_queues(priv);
5422 /* Stop TX/RX DMA */
5423 - priv->hw->dma->stop_tx(priv->ioaddr);
5424 - priv->hw->dma->stop_rx(priv->ioaddr);
5425 + stmmac_stop_all_dma(priv);
5427 /* Enable Power down mode by programming the PMT regs */
5428 if (device_may_wakeup(priv->device)) {
5429 priv->hw->mac->pmt(priv->hw, priv->wolopts);
5432 - stmmac_set_mac(priv->ioaddr, false);
5433 + priv->hw->mac->set_mac(priv->ioaddr, false);
5434 pinctrl_pm_select_sleep_state(priv->device);
5435 /* Disable clock in case of PWM is off */
5436 clk_disable(priv->plat->pclk);
5437 @@ -3453,6 +4326,31 @@ int stmmac_suspend(struct device *dev)
5438 EXPORT_SYMBOL_GPL(stmmac_suspend);
5441 + * stmmac_reset_queues_param - reset queue parameters
5442 + * @dev: device pointer
5444 +static void stmmac_reset_queues_param(struct stmmac_priv *priv)
5446 + u32 rx_cnt = priv->plat->rx_queues_to_use;
5447 + u32 tx_cnt = priv->plat->tx_queues_to_use;
5450 + for (queue = 0; queue < rx_cnt; queue++) {
5451 + struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
5454 + rx_q->dirty_rx = 0;
5457 + for (queue = 0; queue < tx_cnt; queue++) {
5458 + struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue];
5461 + tx_q->dirty_tx = 0;
5466 * stmmac_resume - resume callback
5467 * @dev: device pointer
5468 * Description: when resume this function is invoked to setup the DMA and CORE
5469 @@ -3492,10 +4390,8 @@ int stmmac_resume(struct device *dev)
5471 spin_lock_irqsave(&priv->lock, flags);
5474 - priv->dirty_rx = 0;
5475 - priv->dirty_tx = 0;
5477 + stmmac_reset_queues_param(priv);
5479 /* reset private mss value to force mss context settings at
5480 * next tso xmit (only used for gmac4).
5482 @@ -3507,9 +4403,9 @@ int stmmac_resume(struct device *dev)
5483 stmmac_init_tx_coalesce(priv);
5484 stmmac_set_rx_mode(ndev);
5486 - napi_enable(&priv->napi);
5487 + stmmac_enable_all_queues(priv);
5489 - netif_start_queue(ndev);
5490 + stmmac_start_all_queues(priv);
5492 spin_unlock_irqrestore(&priv->lock, flags);
5494 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
5495 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
5498 struct stmmac_pci_dmi_data {
5500 + const char *asset_tag;
5504 @@ -46,6 +47,7 @@ struct stmmac_pci_info {
5505 static int stmmac_pci_find_phy_addr(struct stmmac_pci_info *info)
5507 const char *name = dmi_get_system_info(DMI_BOARD_NAME);
5508 + const char *asset_tag = dmi_get_system_info(DMI_BOARD_ASSET_TAG);
5509 unsigned int func = PCI_FUNC(info->pdev->devfn);
5510 struct stmmac_pci_dmi_data *dmi;
5512 @@ -57,18 +59,19 @@ static int stmmac_pci_find_phy_addr(stru
5515 for (dmi = info->dmi; dmi->name && *dmi->name; dmi++) {
5516 - if (!strcmp(dmi->name, name) && dmi->func == func)
5517 + if (!strcmp(dmi->name, name) && dmi->func == func) {
5518 + /* If asset tag is provided, match on it as well. */
5519 + if (dmi->asset_tag && strcmp(dmi->asset_tag, asset_tag))
5521 return dmi->phy_addr;
5528 -static void stmmac_default_data(struct plat_stmmacenet_data *plat)
5529 +static void common_default_data(struct plat_stmmacenet_data *plat)
5532 - plat->phy_addr = 0;
5533 - plat->interface = PHY_INTERFACE_MODE_GMII;
5534 plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
5536 plat->force_sf_dma_mode = 1;
5537 @@ -76,10 +79,6 @@ static void stmmac_default_data(struct p
5538 plat->mdio_bus_data->phy_reset = NULL;
5539 plat->mdio_bus_data->phy_mask = 0;
5541 - plat->dma_cfg->pbl = 32;
5542 - plat->dma_cfg->pblx8 = true;
5545 /* Set default value for multicast hash bins */
5546 plat->multicast_filter_bins = HASH_TABLE_SIZE;
5548 @@ -88,6 +87,31 @@ static void stmmac_default_data(struct p
5550 /* Set the maxmtu to a default of JUMBO_LEN */
5551 plat->maxmtu = JUMBO_LEN;
5553 + /* Set default number of RX and TX queues to use */
5554 + plat->tx_queues_to_use = 1;
5555 + plat->rx_queues_to_use = 1;
5557 + /* Disable Priority config by default */
5558 + plat->tx_queues_cfg[0].use_prio = false;
5559 + plat->rx_queues_cfg[0].use_prio = false;
5561 + /* Disable RX queues routing by default */
5562 + plat->rx_queues_cfg[0].pkt_route = 0x0;
5565 +static void stmmac_default_data(struct plat_stmmacenet_data *plat)
5567 + /* Set common default data first */
5568 + common_default_data(plat);
5571 + plat->phy_addr = 0;
5572 + plat->interface = PHY_INTERFACE_MODE_GMII;
5574 + plat->dma_cfg->pbl = 32;
5575 + plat->dma_cfg->pblx8 = true;
5579 static int quark_default_data(struct plat_stmmacenet_data *plat,
5580 @@ -96,6 +120,9 @@ static int quark_default_data(struct pla
5581 struct pci_dev *pdev = info->pdev;
5584 + /* Set common default data first */
5585 + common_default_data(plat);
5588 * Refuse to load the driver and register net device if MAC controller
5589 * does not connect to any PHY interface.
5590 @@ -107,27 +134,12 @@ static int quark_default_data(struct pla
5591 plat->bus_id = PCI_DEVID(pdev->bus->number, pdev->devfn);
5592 plat->phy_addr = ret;
5593 plat->interface = PHY_INTERFACE_MODE_RMII;
5594 - plat->clk_csr = 2;
5595 - plat->has_gmac = 1;
5596 - plat->force_sf_dma_mode = 1;
5598 - plat->mdio_bus_data->phy_reset = NULL;
5599 - plat->mdio_bus_data->phy_mask = 0;
5601 plat->dma_cfg->pbl = 16;
5602 plat->dma_cfg->pblx8 = true;
5603 plat->dma_cfg->fixed_burst = 1;
5606 - /* Set default value for multicast hash bins */
5607 - plat->multicast_filter_bins = HASH_TABLE_SIZE;
5609 - /* Set default value for unicast filter entries */
5610 - plat->unicast_filter_entries = 1;
5612 - /* Set the maxmtu to a default of JUMBO_LEN */
5613 - plat->maxmtu = JUMBO_LEN;
5618 @@ -142,6 +154,24 @@ static struct stmmac_pci_dmi_data quark_
5623 + .name = "SIMATIC IOT2000",
5624 + .asset_tag = "6ES7647-0AA00-0YA2",
5629 + .name = "SIMATIC IOT2000",
5630 + .asset_tag = "6ES7647-0AA00-1YA2",
5635 + .name = "SIMATIC IOT2000",
5636 + .asset_tag = "6ES7647-0AA00-1YA2",
5643 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
5644 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
5645 @@ -108,7 +108,7 @@ static struct stmmac_axi *stmmac_axi_set
5649 - axi = kzalloc(sizeof(*axi), GFP_KERNEL);
5650 + axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
5653 return ERR_PTR(-ENOMEM);
5654 @@ -132,6 +132,155 @@ static struct stmmac_axi *stmmac_axi_set
5658 + * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
5659 + * @pdev: platform device
5661 +static void stmmac_mtl_setup(struct platform_device *pdev,
5662 + struct plat_stmmacenet_data *plat)
5664 + struct device_node *q_node;
5665 + struct device_node *rx_node;
5666 + struct device_node *tx_node;
5669 + /* For backwards-compatibility with device trees that don't have any
5670 + * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
5671 + * to one RX and TX queues each.
5673 + plat->rx_queues_to_use = 1;
5674 + plat->tx_queues_to_use = 1;
5676 + rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
5680 + tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
5682 + of_node_put(rx_node);
5686 + /* Processing RX queues common config */
5687 + if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
5688 + &plat->rx_queues_to_use))
5689 + plat->rx_queues_to_use = 1;
5691 + if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
5692 + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
5693 + else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
5694 + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
5696 + plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
5698 + /* Processing individual RX queue config */
5699 + for_each_child_of_node(rx_node, q_node) {
5700 + if (queue >= plat->rx_queues_to_use)
5703 + if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
5704 + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5705 + else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
5706 + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
5708 + plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5710 + if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
5711 + &plat->rx_queues_cfg[queue].chan))
5712 + plat->rx_queues_cfg[queue].chan = queue;
5713 + /* TODO: Dynamic mapping to be included in the future */
5715 + if (of_property_read_u32(q_node, "snps,priority",
5716 + &plat->rx_queues_cfg[queue].prio)) {
5717 + plat->rx_queues_cfg[queue].prio = 0;
5718 + plat->rx_queues_cfg[queue].use_prio = false;
5720 + plat->rx_queues_cfg[queue].use_prio = true;
5723 + /* RX queue specific packet type routing */
5724 + if (of_property_read_bool(q_node, "snps,route-avcp"))
5725 + plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
5726 + else if (of_property_read_bool(q_node, "snps,route-ptp"))
5727 + plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
5728 + else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
5729 + plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
5730 + else if (of_property_read_bool(q_node, "snps,route-up"))
5731 + plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
5732 + else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
5733 + plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
5735 + plat->rx_queues_cfg[queue].pkt_route = 0x0;
5740 + /* Processing TX queues common config */
5741 + if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
5742 + &plat->tx_queues_to_use))
5743 + plat->tx_queues_to_use = 1;
5745 + if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
5746 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
5747 + else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
5748 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
5749 + else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
5750 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
5751 + else if (of_property_read_bool(tx_node, "snps,tx-sched-sp"))
5752 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
5754 + plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
5758 + /* Processing individual TX queue config */
5759 + for_each_child_of_node(tx_node, q_node) {
5760 + if (queue >= plat->tx_queues_to_use)
5763 + if (of_property_read_u8(q_node, "snps,weight",
5764 + &plat->tx_queues_cfg[queue].weight))
5765 + plat->tx_queues_cfg[queue].weight = 0x10 + queue;
5767 + if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
5768 + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5769 + } else if (of_property_read_bool(q_node,
5770 + "snps,avb-algorithm")) {
5771 + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
5773 + /* Credit Base Shaper parameters used by AVB */
5774 + if (of_property_read_u32(q_node, "snps,send_slope",
5775 + &plat->tx_queues_cfg[queue].send_slope))
5776 + plat->tx_queues_cfg[queue].send_slope = 0x0;
5777 + if (of_property_read_u32(q_node, "snps,idle_slope",
5778 + &plat->tx_queues_cfg[queue].idle_slope))
5779 + plat->tx_queues_cfg[queue].idle_slope = 0x0;
5780 + if (of_property_read_u32(q_node, "snps,high_credit",
5781 + &plat->tx_queues_cfg[queue].high_credit))
5782 + plat->tx_queues_cfg[queue].high_credit = 0x0;
5783 + if (of_property_read_u32(q_node, "snps,low_credit",
5784 + &plat->tx_queues_cfg[queue].low_credit))
5785 + plat->tx_queues_cfg[queue].low_credit = 0x0;
5787 + plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
5790 + if (of_property_read_u32(q_node, "snps,priority",
5791 + &plat->tx_queues_cfg[queue].prio)) {
5792 + plat->tx_queues_cfg[queue].prio = 0;
5793 + plat->tx_queues_cfg[queue].use_prio = false;
5795 + plat->tx_queues_cfg[queue].use_prio = true;
5801 + of_node_put(rx_node);
5802 + of_node_put(tx_node);
5803 + of_node_put(q_node);
5807 * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
5808 * @plat: driver data platform structure
5809 * @np: device tree node
5810 @@ -340,6 +489,8 @@ stmmac_probe_config_dt(struct platform_d
5812 plat->axi = stmmac_axi_setup(pdev);
5814 + stmmac_mtl_setup(pdev, plat);
5817 plat->stmmac_clk = devm_clk_get(&pdev->dev,
5818 STMMAC_RESOURCE_NAME);
5819 @@ -359,13 +510,12 @@ stmmac_probe_config_dt(struct platform_d
5820 clk_prepare_enable(plat->pclk);
5822 /* Fall-back to main clock in case of no PTP ref is passed */
5823 - plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "clk_ptp_ref");
5824 + plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
5825 if (IS_ERR(plat->clk_ptp_ref)) {
5826 plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
5827 plat->clk_ptp_ref = NULL;
5828 dev_warn(&pdev->dev, "PTP uses main clock\n");
5830 - clk_prepare_enable(plat->clk_ptp_ref);
5831 plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
5832 dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
5834 --- a/include/linux/stmmac.h
5835 +++ b/include/linux/stmmac.h
5838 #include <linux/platform_device.h>
5840 +#define MTL_MAX_RX_QUEUES 8
5841 +#define MTL_MAX_TX_QUEUES 8
5843 #define STMMAC_RX_COE_NONE 0
5844 #define STMMAC_RX_COE_TYPE1 1
5845 #define STMMAC_RX_COE_TYPE2 2
5847 #define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
5848 #define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
5850 +/* MTL algorithms identifiers */
5851 +#define MTL_TX_ALGORITHM_WRR 0x0
5852 +#define MTL_TX_ALGORITHM_WFQ 0x1
5853 +#define MTL_TX_ALGORITHM_DWRR 0x2
5854 +#define MTL_TX_ALGORITHM_SP 0x3
5855 +#define MTL_RX_ALGORITHM_SP 0x4
5856 +#define MTL_RX_ALGORITHM_WSP 0x5
5858 +/* RX/TX Queue Mode */
5859 +#define MTL_QUEUE_AVB 0x0
5860 +#define MTL_QUEUE_DCB 0x1
5862 /* The MDC clock could be set higher than the IEEE 802.3
5863 * specified frequency limit 0f 2.5 MHz, by programming a clock divider
5864 * of value different than the above defined values. The resultant MDIO
5865 @@ -109,6 +124,26 @@ struct stmmac_axi {
5869 +struct stmmac_rxq_cfg {
5877 +struct stmmac_txq_cfg {
5880 + /* Credit Base Shaper parameters */
5889 struct plat_stmmacenet_data {
5892 @@ -133,6 +168,12 @@ struct plat_stmmacenet_data {
5893 int unicast_filter_entries;
5896 + u8 rx_queues_to_use;
5897 + u8 tx_queues_to_use;
5898 + u8 rx_sched_algorithm;
5899 + u8 tx_sched_algorithm;
5900 + struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
5901 + struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
5902 void (*fix_mac_speed)(void *priv, unsigned int speed);
5903 int (*init)(struct platform_device *pdev, void *priv);
5904 void (*exit)(struct platform_device *pdev, void *priv);