c155961862cce970f4cee4f203c911050ee04b25
[openwrt/staging/chunkeey.git] / target / linux / mediatek / patches-4.9 / 0059-eth-fixes.patch
1 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
2 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
3 @@ -24,6 +24,7 @@
4 #include <linux/tcp.h>
5
6 #if defined(CONFIG_NET_MEDIATEK_HW_QOS)
7 +
8 struct mtk_ioctl_reg {
9 unsigned int off;
10 unsigned int val;
11 @@ -32,6 +33,13 @@ struct mtk_ioctl_reg {
12 #define REG_HQOS_MAX 0x3FFF
13 #define RAETH_QDMA_REG_READ 0x89F8
14 #define RAETH_QDMA_REG_WRITE 0x89F9
15 +#define RAETH_QDMA_QUEUE_MAPPING 0x89FA
16 +
17 +unsigned int M2Q_table[16] = {0};
18 +unsigned int lan_wan_separate = 0;
19 +
20 +EXPORT_SYMBOL_GPL(M2Q_table);
21 +
22 #endif
23
24 #if defined(CONFIG_NET_MEDIATEK_HNAT) || defined(CONFIG_NET_MEDIATEK_HNAT_MODULE)
25 @@ -225,7 +233,7 @@ static void mtk_phy_link_adjust(struct n
26 if (flowctrl & FLOW_CTRL_RX)
27 mcr |= MAC_MCR_FORCE_RX_FC;
28
29 - netif_dbg(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
30 + netif_info(mac->hw, link, dev, "rx pause %s, tx pause %s\n",
31 flowctrl & FLOW_CTRL_RX ? "enabled" : "disabled",
32 flowctrl & FLOW_CTRL_TX ? "enabled" : "disabled");
33 }
34 @@ -508,9 +516,9 @@ static struct rtnl_link_stats64 * mtk_ge
35 unsigned int start;
36
37 if (netif_running(dev) && netif_device_present(dev)) {
38 - if (spin_trylock_bh(&hw_stats->stats_lock)) {
39 + if (spin_trylock(&hw_stats->stats_lock)) {
40 mtk_stats_update_mac(mac);
41 - spin_unlock_bh(&hw_stats->stats_lock);
42 + spin_unlock(&hw_stats->stats_lock);
43 }
44 }
45
46 @@ -690,6 +698,7 @@ static int mtk_tx_map(struct sk_buff *sk
47 txd3 |= skb->mark & 0x7;
48 if (mac->id)
49 txd3 += 8;
50 + txd3 = 0;
51 #endif
52
53 mapped_addr = dma_map_single(eth->dev, skb->data,
54 @@ -760,16 +769,7 @@ static int mtk_tx_map(struct sk_buff *sk
55 WRITE_ONCE(itxd->txd3, (TX_DMA_SWC | TX_DMA_PLEN0(skb_headlen(skb)) |
56 (!nr_frags * TX_DMA_LS0)));
57
58 - /* we have a single DMA ring so BQL needs to be updated for all devices
59 - * sitting on this ring
60 - */
61 - for (i = 0; i < MTK_MAC_COUNT; i++) {
62 - if (!eth->netdev[i])
63 - continue;
64 -
65 - netdev_sent_queue(eth->netdev[i], skb->len);
66 - }
67 -
68 + netdev_sent_queue(dev, skb->len);
69 skb_tx_timestamp(skb);
70
71 ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
72 @@ -980,20 +980,9 @@ static int mtk_poll_rx(struct napi_struc
73 if (!(trxd.rxd2 & RX_DMA_DONE))
74 break;
75
76 - /* find out which mac the packet comes from. If the special tag is
77 - * we can assume that the traffic is coming from the builtin mt7530
78 - * and the DSA driver has loaded. FPORT will be the physical switch
79 - * port in this case rather than the FE forward port id. */
80 - if (!(trxd.rxd4 & RX_DMA_SP_TAG)) {
81 - /* values start at 1 */
82 - mac = (trxd.rxd4 >> RX_DMA_FPORT_SHIFT) &
83 - RX_DMA_FPORT_MASK;
84 - mac--;
85 - }
86 -
87 - if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT ||
88 - !eth->netdev[mac]))
89 - goto release_desc;
90 + /* find out which mac the packet come from. values start at 1 */
91 + mac = (trxd.rxd4 >> 22) & 0x1;
92 + mac = (mac + 1) % 2;
93
94 netdev = eth->netdev[mac];
95
96 @@ -1017,6 +1006,9 @@ static int mtk_poll_rx(struct napi_struc
97 }
98
99 /* receive data */
100 + if (mac < 0 || mac > 2)
101 + mac = 0;
102 +
103 skb = build_skb(data, ring->frag_size);
104 if (unlikely(!skb)) {
105 skb_free_frag(new_data);
106 @@ -1076,18 +1068,21 @@ static int mtk_poll_tx(struct mtk_eth *e
107 struct mtk_tx_dma *desc;
108 struct sk_buff *skb;
109 struct mtk_tx_buf *tx_buf;
110 - int total = 0, done = 0;
111 - unsigned int bytes = 0;
112 + unsigned int done[MTK_MAX_DEVS];
113 + unsigned int bytes[MTK_MAX_DEVS];
114 u32 cpu, dma;
115 static int condition;
116 - int i;
117 + int total = 0, i;
118 +
119 + memset(done, 0, sizeof(done));
120 + memset(bytes, 0, sizeof(bytes));
121
122 cpu = mtk_r32(eth, MTK_QTX_CRX_PTR);
123 dma = mtk_r32(eth, MTK_QTX_DRX_PTR);
124
125 desc = mtk_qdma_phys_to_virt(ring, cpu);
126
127 - while ((cpu != dma) && done < budget) {
128 + while ((cpu != dma) && budget) {
129 u32 next_cpu = desc->txd2;
130 int mac = 0;
131
132 @@ -1106,8 +1101,9 @@ static int mtk_poll_tx(struct mtk_eth *e
133 }
134
135 if (skb != (struct sk_buff *)MTK_DMA_DUMMY_DESC) {
136 - bytes += skb->len;
137 - done++;
138 + bytes[mac] += skb->len;
139 + done[mac]++;
140 + budget--;
141 }
142 mtk_tx_unmap(eth, tx_buf);
143
144 @@ -1119,13 +1115,11 @@ static int mtk_poll_tx(struct mtk_eth *e
145
146 mtk_w32(eth, cpu, MTK_QTX_CRX_PTR);
147
148 - /* we have a single DMA ring so BQL needs to be updated for all devices
149 - * sitting on this ring
150 - */
151 for (i = 0; i < MTK_MAC_COUNT; i++) {
152 - if (!eth->netdev[i])
153 + if (!eth->netdev[i] || !done[i])
154 continue;
155 - netdev_completed_queue(eth->netdev[i], done, bytes);
156 + netdev_completed_queue(eth->netdev[i], done[i], bytes[i]);
157 + total += done[i];
158 }
159
160 if (mtk_queue_stopped(eth) &&
161 @@ -1286,21 +1280,11 @@ static void mtk_tx_clean(struct mtk_eth
162
163 static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
164 {
165 - struct mtk_rx_ring *ring;
166 + struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
167 int rx_data_len, rx_dma_size;
168 int i;
169 - u32 offset = 0;
170 -
171 - if (rx_flag & MTK_RX_FLAGS_QDMA) {
172 - if (ring_no)
173 - return -EINVAL;
174 - ring = &eth->rx_ring_qdma;
175 - offset = 0x1000;
176 - } else {
177 - ring = &eth->rx_ring[ring_no];
178 - }
179
180 - if (rx_flag & MTK_RX_FLAGS_HWLRO) {
181 + if (rx_flag == MTK_RX_FLAGS_HWLRO) {
182 rx_data_len = MTK_MAX_LRO_RX_LENGTH;
183 rx_dma_size = MTK_HW_LRO_DMA_SIZE;
184 } else {
185 @@ -1348,16 +1332,104 @@ static int mtk_rx_alloc(struct mtk_eth *
186 */
187 wmb();
188
189 - mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no) + offset);
190 - mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no) + offset);
191 - mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg + offset);
192 - mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX + offset);
193 + mtk_w32(eth, ring->phys, MTK_PRX_BASE_PTR_CFG(ring_no));
194 + mtk_w32(eth, rx_dma_size, MTK_PRX_MAX_CNT_CFG(ring_no));
195 + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
196 + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), MTK_PDMA_RST_IDX);
197
198 return 0;
199 }
200
201 -static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring)
202 +static int mtk_rx_alloc_qdma(struct mtk_eth *eth, int rx_flag)
203 {
204 + struct mtk_rx_ring *ring = &eth->rx_ring_qdma;
205 + int rx_data_len, rx_dma_size;
206 + int i;
207 +
208 + rx_data_len = ETH_DATA_LEN;
209 + rx_dma_size = MTK_DMA_SIZE;
210 +
211 + ring->frag_size = mtk_max_frag_size(rx_data_len);
212 + ring->buf_size = mtk_max_buf_size(ring->frag_size);
213 + ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
214 + GFP_KERNEL);
215 + if (!ring->data)
216 + return -ENOMEM;
217 +
218 + for (i = 0; i < rx_dma_size; i++) {
219 + ring->data[i] = netdev_alloc_frag(ring->frag_size);
220 + if (!ring->data[i])
221 + return -ENOMEM;
222 + }
223 +
224 + ring->dma = dma_alloc_coherent(eth->dev,
225 + rx_dma_size * sizeof(*ring->dma),
226 + &ring->phys,
227 + GFP_ATOMIC | __GFP_ZERO);
228 + if (!ring->dma)
229 + return -ENOMEM;
230 +
231 + for (i = 0; i < rx_dma_size; i++) {
232 + dma_addr_t dma_addr = dma_map_single(eth->dev,
233 + ring->data[i] + NET_SKB_PAD,
234 + ring->buf_size,
235 + DMA_FROM_DEVICE);
236 + if (unlikely(dma_mapping_error(eth->dev, dma_addr)))
237 + return -ENOMEM;
238 + ring->dma[i].rxd1 = (unsigned int)dma_addr;
239 +
240 + ring->dma[i].rxd2 = RX_DMA_PLEN0(ring->buf_size);
241 + }
242 + ring->dma_size = rx_dma_size;
243 + ring->calc_idx_update = false;
244 + ring->calc_idx = rx_dma_size - 1;
245 + ring->crx_idx_reg = MTK_QRX_CRX_IDX_CFG(0);
246 + /* make sure that all changes to the dma ring are flushed before we
247 + * continue
248 + */
249 + wmb();
250 +
251 + mtk_w32(eth, ring->phys, MTK_QRX_BASE_PTR_CFG(0));
252 + mtk_w32(eth, rx_dma_size, MTK_QRX_MAX_CNT_CFG(0));
253 + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
254 + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(0), MTK_QDMA_RST_IDX);
255 +
256 + return 0;
257 +}
258 +
259 +static void mtk_rx_clean(struct mtk_eth *eth, int ring_no)
260 +{
261 + struct mtk_rx_ring *ring = &eth->rx_ring[ring_no];
262 + int i;
263 +
264 + if (ring->data && ring->dma) {
265 + for (i = 0; i < ring->dma_size; i++) {
266 + if (!ring->data[i])
267 + continue;
268 + if (!ring->dma[i].rxd1)
269 + continue;
270 + dma_unmap_single(eth->dev,
271 + ring->dma[i].rxd1,
272 + ring->buf_size,
273 + DMA_FROM_DEVICE);
274 + skb_free_frag(ring->data[i]);
275 + }
276 + kfree(ring->data);
277 + ring->data = NULL;
278 + }
279 +
280 + if (ring->dma) {
281 + dma_free_coherent(eth->dev,
282 + ring->dma_size * sizeof(*ring->dma),
283 + ring->dma,
284 + ring->phys);
285 + ring->dma = NULL;
286 + }
287 +}
288 +
289 +static void mtk_rx_clean_qdma(struct mtk_eth *eth)
290 +{
291 + struct mtk_rx_ring *ring = &eth->rx_ring_qdma;
292 int i;
293
294 if (ring->data && ring->dma) {
295 @@ -1683,7 +1755,7 @@ static int mtk_dma_init(struct mtk_eth *
296 if (err)
297 return err;
298
299 - err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
300 + err = mtk_rx_alloc_qdma(eth, MTK_RX_FLAGS_NORMAL);
301 if (err)
302 return err;
303
304 @@ -1702,6 +1774,7 @@ static int mtk_dma_init(struct mtk_eth *
305 return err;
306 }
307
308 +
309 /* Enable random early drop and set drop threshold automatically */
310 mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | FC_THRES_MIN,
311 MTK_QDMA_FC_THRES);
312 @@ -1726,13 +1799,13 @@ static void mtk_dma_free(struct mtk_eth
313 eth->phy_scratch_ring = 0;
314 }
315 mtk_tx_clean(eth);
316 - mtk_rx_clean(eth, &eth->rx_ring[0]);
317 - mtk_rx_clean(eth, &eth->rx_ring_qdma);
318 + mtk_rx_clean(eth, 0);
319 + mtk_rx_clean_qdma(eth);
320
321 if (eth->hwlro) {
322 mtk_hwlro_rx_uninit(eth);
323 for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
324 - mtk_rx_clean(eth, &eth->rx_ring[i]);
325 + mtk_rx_clean(eth, i);
326 }
327
328 kfree(eth->scratch_head);
329 @@ -1947,20 +2020,14 @@ static int mtk_hw_init(struct mtk_eth *e
330 val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
331 mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
332
333 - /* Indicates CDM to parse the MTK special tag from CPU
334 - * which also is working out for untag packets.
335 - */
336 - val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
337 - mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
338 - val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
339 - mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
340 -
341 /* Enable RX VLan Offloading */
342 if (MTK_HW_FEATURES & NETIF_F_HW_VLAN_CTAG_RX)
343 mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
344 else
345 mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
346
347 + mtk_w32(eth, 0x81000001, MTK_CDMP_IG_CTRL);
348 +
349 /* disable delay and normal interrupt */
350 #ifdef MTK_IRQ_DLY
351 mtk_w32(eth, 0x84048404, MTK_PDMA_DELAY_INT);
352 @@ -1990,6 +2057,9 @@ static int mtk_hw_init(struct mtk_eth *e
353 /* Enable RX checksum */
354 val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
355
356 + if (!i)
357 + val |= BIT(24);
358 +
359 /* setup the mac dma */
360 mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
361 }
362 @@ -2069,7 +2139,18 @@ static int mtk_do_ioctl(struct net_devic
363 if (reg.off > REG_HQOS_MAX)
364 return -EINVAL;
365 mtk_w32(eth, reg.val, 0x1800 + reg.off);
366 -// printk("write reg off:%x val:%x\n", reg.off, reg.val);
367 + printk("write reg off:%x val:%x\n", reg.off, reg.val);
368 + return 0;
369 +
370 + case RAETH_QDMA_QUEUE_MAPPING:
371 + copy_from_user(&reg, ifr->ifr_data, sizeof(reg));
372 + if ((reg.off & 0x100) == 0x100) {
373 + lan_wan_separate = 1;
374 + reg.off &= 0xff;
375 + } else {
376 + lan_wan_separate = 0;
377 + }
378 + M2Q_table[reg.off] = reg.val;
379 return 0;
380 #endif
381 case SIOCGMIIPHY:
382 @@ -2288,9 +2369,9 @@ static void mtk_get_ethtool_stats(struct
383 return;
384
385 if (netif_running(dev) && netif_device_present(dev)) {
386 - if (spin_trylock_bh(&hwstats->stats_lock)) {
387 + if (spin_trylock(&hwstats->stats_lock)) {
388 mtk_stats_update_mac(mac);
389 - spin_unlock_bh(&hwstats->stats_lock);
390 + spin_unlock(&hwstats->stats_lock);
391 }
392 }
393
394 @@ -2443,7 +2524,7 @@ static int mtk_add_mac(struct mtk_eth *e
395 mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
396
397 SET_NETDEV_DEV(eth->netdev[id], eth->dev);
398 - eth->netdev[id]->watchdog_timeo = 30 * HZ;
399 + eth->netdev[id]->watchdog_timeo = 15 * HZ;
400 eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
401 eth->netdev[id]->base_addr = (unsigned long)eth->base;
402
403 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
404 +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
405 @@ -80,7 +80,6 @@
406
407 /* CDMP Ingress Control Register */
408 #define MTK_CDMP_IG_CTRL 0x400
409 -#define MTK_CDMP_STAG_EN BIT(0)
410
411 /* CDMP Exgress Control Register */
412 #define MTK_CDMP_EG_CTRL 0x404
413 @@ -91,12 +90,27 @@
414 #define MTK_GDMA_TCS_EN BIT(21)
415 #define MTK_GDMA_UCS_EN BIT(20)
416
417 +/* GDMA Ingress Control Register */
418 +#define MTK_GDMA1_IG_CTRL(x) (0x500 + (x * 0x1000))
419 +
420 /* Unicast Filter MAC Address Register - Low */
421 #define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000))
422
423 /* Unicast Filter MAC Address Register - High */
424 #define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000))
425
426 +/* QDMA RX Base Pointer Register */
427 +#define MTK_QRX_BASE_PTR0 0x1900
428 +#define MTK_QRX_BASE_PTR_CFG(x) (MTK_QRX_BASE_PTR0 + (x * 0x10))
429 +
430 +/* QDMA RX Maximum Count Register */
431 +#define MTK_QRX_MAX_CNT0 0x1904
432 +#define MTK_QRX_MAX_CNT_CFG(x) (MTK_QRX_MAX_CNT0 + (x * 0x10))
433 +
434 +/* QDMA RX CPU Pointer Register */
435 +#define MTK_QRX_CRX_IDX0 0x1908
436 +#define MTK_QRX_CRX_IDX_CFG(x) (MTK_QRX_CRX_IDX0 + (x * 0x10))
437 +
438 /* PDMA RX Base Pointer Register */
439 #define MTK_PRX_BASE_PTR0 0x900
440 #define MTK_PRX_BASE_PTR_CFG(x) (MTK_PRX_BASE_PTR0 + (x * 0x10))
441 @@ -240,7 +254,10 @@
442 #define MTK_QDMA_INT_MASK 0x1A1C
443
444 /* QDMA Interrupt Mask Register */
445 +#define MTK_QDMA_HRED1 0x1A40
446 #define MTK_QDMA_HRED2 0x1A44
447 +#define MTK_QDMA_SRED1 0x1A48
448 +#define MTK_QDMA_SRED2 0x1A4c
449
450 /* QDMA TX Forward CPU Pointer Register */
451 #define MTK_QTX_CTX_PTR 0x1B00
452 @@ -275,6 +292,7 @@
453 #define TX_DMA_TSO BIT(28)
454 #define TX_DMA_FPORT_SHIFT 25
455 #define TX_DMA_FPORT_MASK 0x7
456 +#define TX_DMA_VQID0 BIT(17)
457 #define TX_DMA_INS_VLAN BIT(16)
458
459 /* QDMA descriptor txd3 */
460 @@ -294,7 +312,6 @@
461
462 /* QDMA descriptor rxd4 */
463 #define RX_DMA_L4_VALID BIT(24)
464 -#define RX_DMA_SP_TAG BIT(22)
465 #define RX_DMA_FPORT_SHIFT 19
466 #define RX_DMA_FPORT_MASK 0x7
467
468 @@ -310,6 +327,7 @@
469
470 /* Mac control registers */
471 #define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
472 +#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
473 #define MAC_MCR_MAX_RX_1536 BIT(24)
474 #define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
475 #define MAC_MCR_FORCE_MODE BIT(15)
476 @@ -495,7 +513,6 @@ struct mtk_tx_ring {
477 enum mtk_rx_flags {
478 MTK_RX_FLAGS_NORMAL = 0,
479 MTK_RX_FLAGS_HWLRO,
480 - MTK_RX_FLAGS_QDMA,
481 };
482
483 /* struct mtk_rx_ring - This struct holds info describing a RX ring
484 @@ -539,9 +556,9 @@ struct mtk_rx_ring {
485 * @pctl: The register map pointing at the range used to setup
486 * GMAC port drive/slew values
487 * @dma_refcnt: track how many netdevs are using the DMA engine
488 - * @tx_ring: Pointer to the memory holding info about the TX ring
489 - * @rx_ring: Pointer to the memory holding info about the RX ring
490 - * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
491 + * @tx_ring: Pointer to the memore holding info about the TX ring
492 + * @rx_ring: Pointer to the memore holding info about the RX ring
493 + * @rx_ring_qdma: Pointer to the memore holding info about the RX ring (QDMA)
494 * @tx_napi: The TX NAPI struct
495 * @rx_napi: The RX NAPI struct
496 * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
497 @@ -563,6 +580,7 @@ struct mtk_eth {
498 struct net_device *netdev[MTK_MAX_DEVS];
499 struct mtk_mac *mac[MTK_MAX_DEVS];
500 int irq[3];
501 + cpumask_t affinity_mask[3];
502 u32 msg_enable;
503 unsigned long sysclk;
504 struct regmap *ethsys;
505 @@ -615,4 +633,6 @@ void mtk_stats_update_mac(struct mtk_mac
506 void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
507 u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
508
509 +extern unsigned int M2Q_table[16];
510 +
511 #endif /* MTK_ETH_H */