4991c52d36568c8be26615fc34fc1a450034ae7d
[openwrt/staging/chunkeey.git] / target / linux / mediatek / patches-4.9 / 0059-eth-fixes.patch
1 Index: linux-4.9.44/drivers/net/ethernet/mediatek/mtk_eth_soc.c
2 ===================================================================
3 --- linux-4.9.44.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.c
4 +++ linux-4.9.44/drivers/net/ethernet/mediatek/mtk_eth_soc.c
5 @@ -24,6 +24,7 @@
6 #include <linux/tcp.h>
7
8 #if defined(CONFIG_NET_MEDIATEK_HW_QOS)
9 +
10 struct mtk_ioctl_reg {
11 unsigned int off;
12 unsigned int val;
13 @@ -32,6 +33,13 @@ struct mtk_ioctl_reg {
14 #define REG_HQOS_MAX 0x3FFF
15 #define RAETH_QDMA_REG_READ 0x89F8
16 #define RAETH_QDMA_REG_WRITE 0x89F9
17 +#define RAETH_QDMA_QUEUE_MAPPING 0x89FA
18 +
19 +unsigned int M2Q_table[16] = {0};
20 +unsigned int lan_wan_separate = 0;
21 +
22 +EXPORT_SYMBOL_GPL(M2Q_table);
23 +
24 #endif
25
26 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
27 @@ -225,7 +233,7 @@ static void mtk_phy_link_adjust(struct n
28 if (flowctrl & FLOW_CTRL_RX)
29 mcr |= MAC_MCR_FORCE_RX_FC;
30
31 - netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
32 + netif_info(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
33 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
34 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
35 }
36 @@ -508,9 +516,9 @@ static struct rtnl_link_stats64 * mtk_ge
37 unsigned int start;
38
39 if (netif_running(dev) && netif_device_present(dev)) {
40 - if (spin_trylock_bh(&hw_stats->stats_lock)) {
41 + if (spin_trylock(&hw_stats->stats_lock)) {
42 mtk_stats_update_mac(mac);
43 - spin_unlock_bh(&hw_stats->stats_lock);
44 + spin_unlock(&hw_stats->stats_lock);
45 }
46 }
47
48 @@ -690,6 +698,7 @@ static int mtk_tx_map(struct sk_buff *sk
49 txd3 |= skb->mark & 0x7;
50 if (mac->id)
51 txd3 += 8;
52 + txd3 = 0;
53 #endif
54
55 mapped_addr = dma_map_single(eth->dev, skb->data,
56 @@ -760,16 +769,7 @@ static int mtk_tx_map(struct sk_buff *sk
57 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
58 (!nr_frags * TX_DMA_LS0)));
59
60 - /* we have a single DMA ring so BQL needs to be updated for all devices
61 - * sitting on this ring
62 - */
63 - for (i = 0; i < MTK_MAC_COUNT; i++) {
64 - if (!eth->netdev[i])
65 - continue;
66 -
67 - netdev_sent_queue(eth->netdev[i], skb->len);
68 - }
69 -
70 + netdev_sent_queue(dev, skb->len);
71 skb_tx_timestamp(skb);
72
73 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
74 @@ -980,20 +980,9 @@ static int mtk_poll_rx(struct napi_struc
75 if (!(trxd.rxd2 & RX_DMA_DONE))
76 break;
77
78 - /* find out which mac the packet comes from. If the special tag is
79 - * we can assume that the traffic is coming from the builtin mt7530
80 - * and the DSA driver has loaded. FPORT will be the physical switch
81 - * port in this case rather than the FE forward port id. */
82 - if (!(trxd.rxd4 & RX_DMA_SP_TAG)) {
83 - /* values start at 1 */
84 - mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
85 - RX_DMA_FPORT_MASK;
86 - mac--;
87 - }
88 -
89 - if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
90 - !eth->netdev[mac]))
91 - goto release_desc;
92 + /* find out which mac the packet come from. values start at 1 */
93 + mac = (trxd.rxd4 >> 22) & 0x1;
94 + mac = (mac + 1) % 2;
95
96 netdev = eth->netdev[mac];
97
98 @@ -1017,6 +1006,9 @@ static int mtk_poll_rx(struct napi_struc
99 }
100
101 /* receive data */
102 + if (mac < 0 || mac > 2)
103 + mac = 0;
104 +
105 skb = build_skb(data, ring->frag_size);
106 if (unlikely(!skb)) {
107 skb_free_frag(new_data);
108 @@ -1076,18 +1068,21 @@ static int mtk_poll_tx(struct mtk_eth *e
109 struct mtk_tx_dma *desc;
110 struct sk_buff *skb;
111 struct mtk_tx_buf *tx_buf;
112 - int total = 0, done = 0;
113 - unsigned int bytes = 0;
114 + unsigned int done[MTK_MAX_DEVS];
115 + unsigned int bytes[MTK_MAX_DEVS];
116 u32 cpu, dma;
117 static int condition;
118 - int i;
119 + int total = 0, i;
120 +
121 + memset(done, 0, sizeof(done));
122 + memset(bytes, 0, sizeof(bytes));
123
124 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
125 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
126
127 desc = mtk_qdma_phys_to_virt(ring, cpu);
128
129 - while ((cpu != dma) && done < budget) {
130 + while ((cpu != dma) && budget) {
131 u32 next_cpu = desc->txd2;
132 int mac = 0;
133
134 @@ -1106,8 +1101,9 @@ static int mtk_poll_tx(struct mtk_eth *e
135 }
136
137 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
138 - bytes += skb->len;
139 - done++;
140 + bytes[mac] += skb->len;
141 + done[mac]++;
142 + budget--;
143 }
144 mtk_tx_unmap(eth, tx_buf);
145
146 @@ -1119,13 +1115,11 @@ static int mtk_poll_tx(struct mtk_eth *e
147
148 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
149
150 - /* we have a single DMA ring so BQL needs to be updated for all devices
151 - * sitting on this ring
152 - */
153 for (i = 0; i < MTK_MAC_COUNT; i++) {
154 - if (!eth->netdev[i])
155 + if (!eth->netdev[i] || !done[i])
156 continue;
157 - netdev_completed_queue(eth->netdev[i], done, bytes);
158 + netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
159 + total += done[i];
160 }
161
162 if (mtk_queue_stopped(eth) &&
163 @@ -1286,21 +1280,11 @@ static void mtk_tx_clean(struct mtk_eth
164
165 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
166 {
167 - struct mtk_rx_ring *ring;
168 + struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
169 int rx_data_len, rx_dma_size;
170 int i;
171 - u32 offset = 0;
172 -
173 - if (rx_flag & MTK_RX_FLAGS_QDMA) {
174 - if (ring_no)
175 - return -EINVAL;
176 - ring = &eth->rx_ring_qdma;
177 - offset = 0x1000;
178 - } else {
179 - ring = &eth->rx_ring[ring_no];
180 - }
181
182 - if (rx_flag & MTK_RX_FLAGS_HWLRO) {
183 + if (rx_flag == MTK_RX_FLAGS_HWLRO) {
184 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
185 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
186 } else {
187 @@ -1348,16 +1332,104 @@ static int mtk_rx_alloc(struct mtk_eth *
188 */
189 wmb();
190
191 - mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
192 - mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
193 - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
194 - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
195 + mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
196 + mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
197 + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
198 + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
199
200 return 0;
201 }
202
203 -static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
204 +static int mtk_rx_alloc_qdma(struct mtk_eth *eth, int rx_flag)
205 {
206 + struct mtk_rx_ring *ring = &eth->rx_ring_qdma;
207 + int rx_data_len, rx_dma_size;
208 + int i;
209 +
210 + rx_data_len = ETH_DATA_LEN;
211 + rx_dma_size = MTK_DMA_SIZE;
212 +
213 + ring->frag_size = mtk_max_frag_size(rx_data_len);
214 + ring->buf_size = mtk_max_buf_size(ring->frag_size);
215 + ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
216 + GFP_KERNEL);
217 + if (!ring->data)
218 + return -ENOMEM;
219 +
220 + for (i = 0; i < rx_dma_size; i++) {
221 + ring->data[i] = netdev_alloc_frag(ring->frag_size);
222 + if (!ring->data[i])
223 + return -ENOMEM;
224 + }
225 +
226 + ring->dma = dma_alloc_coherent(eth->dev,
227 + rx_dma_size * sizeof(*ring->dma),
228 + &ring->phys,
229 + GFP_ATOMIC | __GFP_ZERO);
230 + if (!ring->dma)
231 + return -ENOMEM;
232 +
233 + for (i = 0; i < rx_dma_size; i++) {
234 + dma_addr_t dma_addr = dma_map_single(eth->dev,
235 + ring->data[i] + NET_SKB_PAD,
236 + ring->buf_size,
237 + DMA_FROM_DEVICE);
238 + if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
239 + return -ENOMEM;
240 + ring->dma[i].rxd1 = (unsigned int)dma_addr;
241 +
242 + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
243 + }
244 + ring->dma_size = rx_dma_size;
245 + ring->calc_idx_update = false;
246 + ring->calc_idx = rx_dma_size - 1;
247 + ring->crx_idx_reg = MTK_QRX_CRX_IDX_CFG(0);
248 + /* make sure that all changes to the dma ring are flushed before we
249 + * continue
250 + */
251 + wmb();
252 +
253 + mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(0));
254 + mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(0));
255 + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
256 + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(0), MTK_QDMA_RST_IDX);
257 +
258 + return 0;
259 +}
260 +
261 +static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
262 +{
263 + struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
264 + int i;
265 +
266 + if (ring->data && ring->dma) {
267 + for (i = 0; i < ring->dma_size; i++) {
268 + if (!ring->data[i])
269 + continue;
270 + if (!ring->dma[i].rxd1)
271 + continue;
272 + dma_unmap_single(eth->dev,
273 + ring->dma[i].rxd1,
274 + ring->buf_size,
275 + DMA_FROM_DEVICE);
276 + skb_free_frag(ring->data[i]);
277 + }
278 + kfree(ring->data);
279 + ring->data = NULL;
280 + }
281 +
282 + if (ring->dma) {
283 + dma_free_coherent(eth->dev,
284 + ring->dma_size * sizeof(*ring->dma),
285 + ring->dma,
286 + ring->phys);
287 + ring->dma = NULL;
288 + }
289 +}
290 +
291 +static void mtk_rx_clean_qdma(struct mtk_eth *eth)
292 +{
293 + struct mtk_rx_ring *ring = &eth->rx_ring_qdma;
294 int i;
295
296 if (ring->data && ring->dma) {
297 @@ -1683,7 +1755,7 @@ static int mtk_dma_init(struct mtk_eth *
298 if (err)
299 return err;
300
301 - err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
302 + err = mtk_rx_alloc_qdma(eth, MTK_RX_FLAGS_NORMAL);
303 if (err)
304 return err;
305
306 @@ -1702,6 +1774,7 @@ static int mtk_dma_init(struct mtk_eth *
307 return err;
308 }
309
310 +
311 /* Enable random early drop and set drop threshold automatically */
312 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
313 MTK_QDMA_FC_THRES);
314 @@ -1726,13 +1799,13 @@ static void mtk_dma_free(struct mtk_eth
315 eth->phy_scratch_ring = 0;
316 }
317 mtk_tx_clean(eth);
318 - mtk_rx_clean(eth, &eth->rx_ring[0]);
319 - mtk_rx_clean(eth, &eth->rx_ring_qdma);
320 + mtk_rx_clean(eth, 0);
321 + mtk_rx_clean_qdma(eth);
322
323 if (eth->hwlro) {
324 mtk_hwlro_rx_uninit(eth);
325 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
326 - mtk_rx_clean(eth, &eth->rx_ring[i]);
327 + mtk_rx_clean(eth, i);
328 }
329
330 kfree(eth->scratch_head);
331 @@ -1947,20 +2020,14 @@ static int mtk_hw_init(struct mtk_eth *e
332 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
333 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
334
335 - /* Indicates CDM to parse the MTK special tag from CPU
336 - * which also is working out for untag packets.
337 - */
338 - val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
339 - mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
340 - val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
341 - mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
342 -
343 /* Enable RX VLan Offloading */
344 if (MTK_HW_FEATURES & NETIF_F_HW_VLAN_CTAG_RX)
345 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
346 else
347 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
348
349 + mtk_w32(eth, 0x81000001, MTK_CDMP_IG_CTRL);
350 +
351 /* disable delay and normal interrupt */
352 #ifdef MTK_IRQ_DLY
353 mtk_w32(eth, 0x84048404, MTK_PDMA_DELAY_INT);
354 @@ -1990,6 +2057,9 @@ static int mtk_hw_init(struct mtk_eth *e
355 /* Enable RX checksum */
356 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
357
358 + if (!i)
359 + val |= BIT(24);
360 +
361 /* setup the mac dma */
362 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
363 }
364 @@ -2069,7 +2139,18 @@ static int mtk_do_ioctl(struct net_devic
365 if (reg.off > REG_HQOS_MAX)
366 return -EINVAL;
367 mtk_w32(eth, reg.val, 0x1800 + reg.off);
368 -// printk("write reg off:%x val:%x\n", reg.off, reg.val);
369 + printk("write reg off:%x val:%x\n", reg.off, reg.val);
370 + return 0;
371 +
372 + case RAETH_QDMA_QUEUE_MAPPING:
373 + copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
374 + if ((reg.off & 0x100) == 0x100) {
375 + lan_wan_separate = 1;
376 + reg.off &= 0xff;
377 + } else {
378 + lan_wan_separate = 0;
379 + }
380 + M2Q_table[reg.off] = reg.val;
381 return 0;
382 #endif
383 case SIOCGMIIPHY:
384 @@ -2288,9 +2369,9 @@ static void mtk_get_ethtool_stats(struct
385 return;
386
387 if (netif_running(dev) && netif_device_present(dev)) {
388 - if (spin_trylock_bh(&hwstats->stats_lock)) {
389 + if (spin_trylock(&hwstats->stats_lock)) {
390 mtk_stats_update_mac(mac);
391 - spin_unlock_bh(&hwstats->stats_lock);
392 + spin_unlock(&hwstats->stats_lock);
393 }
394 }
395
396 @@ -2443,7 +2524,7 @@ static int mtk_add_mac(struct mtk_eth *e
397 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
398
399 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
400 - eth->netdev[id]->watchdog_timeo = 30 * HZ;
401 + eth->netdev[id]->watchdog_timeo = 15 * HZ;
402 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
403 eth->netdev[id]->base_addr = (unsigned long)eth->base;
404
405 Index: linux-4.9.44/drivers/net/ethernet/mediatek/mtk_eth_soc.h
406 ===================================================================
407 --- linux-4.9.44.orig/drivers/net/ethernet/mediatek/mtk_eth_soc.h
408 +++ linux-4.9.44/drivers/net/ethernet/mediatek/mtk_eth_soc.h
409 @@ -80,7 +80,6 @@
410
411 /* CDMP Ingress Control Register */
412 #define MTK_CDMP_IG_CTRL 0x400
413 -#define MTK_CDMP_STAG_EN BIT(0)
414
415 /* CDMP Exgress Control Register */
416 #define MTK_CDMP_EG_CTRL 0x404
417 @@ -91,12 +90,27 @@
418 #define MTK_GDMA_TCS_EN BIT(21)
419 #define MTK_GDMA_UCS_EN BIT(20)
420
421 +/* GDMA Ingress Control Register */
422 +#define MTK_GDMA1_IG_CTRL(x) (0x500 + (x * 0x1000))
423 +
424 /* Unicast Filter MAC Address Register - Low */
425 #define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
426
427 /* Unicast Filter MAC Address Register - High */
428 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
429
430 +/* QDMA RX Base Pointer Register */
431 +#define MTK_QRX_BASE_PTR0 0x1900
432 +#define MTK_QRX_BASE_PTR_CFG(x) (MTK_QRX_BASE_PTR0 + (x * 0x10))
433 +
434 +/* QDMA RX Maximum Count Register */
435 +#define MTK_QRX_MAX_CNT0 0x1904
436 +#define MTK_QRX_MAX_CNT_CFG(x) (MTK_QRX_MAX_CNT0 + (x * 0x10))
437 +
438 +/* QDMA RX CPU Pointer Register */
439 +#define MTK_QRX_CRX_IDX0 0x1908
440 +#define MTK_QRX_CRX_IDX_CFG(x) (MTK_QRX_CRX_IDX0 + (x * 0x10))
441 +
442 /* PDMA RX Base Pointer Register */
443 #define MTK_PRX_BASE_PTR0 0x900
444 #define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
445 @@ -240,7 +254,10 @@
446 #define MTK_QDMA_INT_MASK 0x1A1C
447
448 /* QDMA Interrupt Mask Register */
449 +#define MTK_QDMA_HRED1 0x1A40
450 #define MTK_QDMA_HRED2 0x1A44
451 +#define MTK_QDMA_SRED1 0x1A48
452 +#define MTK_QDMA_SRED2 0x1A4c
453
454 /* QDMA TX Forward CPU Pointer Register */
455 #define MTK_QTX_CTX_PTR 0x1B00
456 @@ -275,6 +292,7 @@
457 #define TX_DMA_TSO BIT(28)
458 #define TX_DMA_FPORT_SHIFT 25
459 #define TX_DMA_FPORT_MASK 0x7
460 +#define TX_DMA_VQID0 BIT(17)
461 #define TX_DMA_INS_VLAN BIT(16)
462
463 /* QDMA descriptor txd3 */
464 @@ -294,7 +312,6 @@
465
466 /* QDMA descriptor rxd4 */
467 #define RX_DMA_L4_VALID BIT(24)
468 -#define RX_DMA_SP_TAG BIT(22)
469 #define RX_DMA_FPORT_SHIFT 19
470 #define RX_DMA_FPORT_MASK 0x7
471
472 @@ -310,6 +327,7 @@
473
474 /* Mac control registers */
475 #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
476 +#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
477 #define MAC_MCR_MAX_RX_1536 BIT(24)
478 #define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
479 #define MAC_MCR_FORCE_MODE BIT(15)
480 @@ -495,7 +513,6 @@ struct mtk_tx_ring {
481 enum mtk_rx_flags {
482 MTK_RX_FLAGS_NORMAL = 0,
483 MTK_RX_FLAGS_HWLRO,
484 - MTK_RX_FLAGS_QDMA,
485 };
486
487 /* struct mtk_rx_ring - This struct holds info describing a RX ring
488 @@ -539,9 +556,9 @@ struct mtk_rx_ring {
489 * @pctl: The register map pointing at the range used to setup
490 * GMAC port drive/slew values
491 * @dma_refcnt: track how many netdevs are using the DMA engine
492 - * @tx_ring: Pointer to the memory holding info about the TX ring
493 - * @rx_ring: Pointer to the memory holding info about the RX ring
494 - * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
495 + * @tx_ring: Pointer to the memore holding info about the TX ring
496 + * @rx_ring: Pointer to the memore holding info about the RX ring
497 + * @rx_ring_qdma: Pointer to the memore holding info about the RX ring (QDMA)
498 * @tx_napi: The TX NAPI struct
499 * @rx_napi: The RX NAPI struct
500 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
501 @@ -563,6 +580,7 @@ struct mtk_eth {
502 struct net_device *netdev[MTK_MAX_DEVS];
503 struct mtk_mac *mac[MTK_MAX_DEVS];
504 int irq[3];
505 + cpumask_t affinity_mask[3];
506 u32 msg_enable;
507 unsigned long sysclk;
508 struct regmap *ethsys;
509 @@ -615,4 +633,6 @@ void mtk_stats_update_mac(struct mtk_mac
510 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
511 u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
512
513 +extern unsigned int M2Q_table[16];
514 +
515 #endif /* MTK_ETH_H */