mac80211: mask nested A-MSDU support for mesh
[openwrt/staging/noltari.git] / target / linux / ipq40xx / files / drivers / net / ethernet / qualcomm / essedma / edma_axi.c
1 /*
2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16 #include <linux/cpu_rmap.h>
17 #include <linux/of.h>
18 #include <linux/of_net.h>
19 #include <linux/timer.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_address.h>
22 #include <linux/of_mdio.h>
23 #include <linux/clk.h>
24 #include <linux/string.h>
25 #include <linux/reset.h>
26 #include <linux/version.h>
27 #include "edma.h"
28 #include "ess_edma.h"
29
30 /* Weight round robin and virtual QID mask */
31 #define EDMA_WRR_VID_SCTL_MASK 0xffff
32
33 /* Weight round robin and virtual QID shift */
34 #define EDMA_WRR_VID_SCTL_SHIFT 16
35
36 char edma_axi_driver_name[] = "ess_edma";
37 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
38 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
39
40 static u32 edma_hw_addr;
41
42 char edma_tx_irq[16][64];
43 char edma_rx_irq[8][64];
44 struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
45 static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
46 EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
47 static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
48 EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
49
50 static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN;
51 static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN;
52 static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
53 static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
54 static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
55 static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
56 static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
57 static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE;
58 static u32 edma_rss_idt_idx;
59
60 static int edma_weight_assigned_to_q __read_mostly;
61 static int edma_queue_to_virtual_q __read_mostly;
62 static bool edma_enable_rstp __read_mostly;
63 static int edma_athr_hdr_eth_type __read_mostly;
64
65 static int page_mode;
66 module_param(page_mode, int, 0);
67 MODULE_PARM_DESC(page_mode, "enable page mode");
68
69 static int overwrite_mode;
70 module_param(overwrite_mode, int, 0);
71 MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
72
73 static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
74 module_param(jumbo_mru, int, 0);
75 MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
76
77 static int num_rxq = 4;
78 module_param(num_rxq, int, 0);
79 MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
80
81 void edma_write_reg(u16 reg_addr, u32 reg_value)
82 {
83 writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
84 }
85
86 void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
87 {
88 *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
89 }
90
91 static void ess_write_reg(struct edma_common_info *edma, u16 reg_addr, u32 reg_value)
92 {
93 writel(reg_value, ((void __iomem *)
94 ((unsigned long)edma->ess_hw_addr + reg_addr)));
95 }
96
97 static void ess_read_reg(struct edma_common_info *edma, u16 reg_addr,
98 volatile u32 *reg_value)
99 {
100 *reg_value = readl((void __iomem *)
101 ((unsigned long)edma->ess_hw_addr + reg_addr));
102 }
103
104 static int ess_reset(struct edma_common_info *edma)
105 {
106 struct device_node *switch_node = NULL;
107 struct reset_control *ess_rst;
108 u32 regval;
109
110 switch_node = of_find_node_by_name(NULL, "ess-switch");
111 if (!switch_node) {
112 pr_err("switch-node not found\n");
113 return -EINVAL;
114 }
115
116 ess_rst = of_reset_control_get(switch_node, "ess_rst");
117 of_node_put(switch_node);
118
119 if (IS_ERR(ess_rst)) {
120 pr_err("failed to find ess_rst!\n");
121 return -ENOENT;
122 }
123
124 reset_control_assert(ess_rst);
125 msleep(10);
126 reset_control_deassert(ess_rst);
127 msleep(100);
128 reset_control_put(ess_rst);
129
130 /* Enable only port 5 <--> port 0
131 * bits 0:6 bitmap of ports it can fwd to */
132 #define SET_PORT_BMP(r,v) \
133 ess_read_reg(edma, r, &regval); \
134 ess_write_reg(edma, r, ((regval & ~0x3F) | v));
135
136 SET_PORT_BMP(ESS_PORT0_LOOKUP_CTRL,0x20);
137 SET_PORT_BMP(ESS_PORT1_LOOKUP_CTRL,0x00);
138 SET_PORT_BMP(ESS_PORT2_LOOKUP_CTRL,0x00);
139 SET_PORT_BMP(ESS_PORT3_LOOKUP_CTRL,0x00);
140 SET_PORT_BMP(ESS_PORT4_LOOKUP_CTRL,0x00);
141 SET_PORT_BMP(ESS_PORT5_LOOKUP_CTRL,0x01);
142 ess_write_reg(edma, ESS_RGMII_CTRL, 0x400);
143 ess_write_reg(edma, ESS_PORT0_STATUS, ESS_PORT_1G_FDX);
144 ess_write_reg(edma, ESS_PORT5_STATUS, ESS_PORT_1G_FDX);
145 ess_write_reg(edma, ESS_PORT0_HEADER_CTRL, 0);
146 #undef SET_PORT_BMP
147
148 /* forward multicast and broadcast frames to CPU */
149 ess_write_reg(edma, ESS_FWD_CTRL1,
150 (ESS_PORTS_ALL << ESS_FWD_CTRL1_UC_FLOOD_S) |
151 (ESS_PORTS_ALL << ESS_FWD_CTRL1_MC_FLOOD_S) |
152 (ESS_PORTS_ALL << ESS_FWD_CTRL1_BC_FLOOD_S));
153
154 return 0;
155 }
156
157 void ess_set_port_status_speed(struct edma_common_info *edma,
158 struct phy_device *phydev, uint8_t port_id)
159 {
160 uint16_t reg_off = ESS_PORT0_STATUS + (4 * port_id);
161 uint32_t reg_val = 0;
162
163 ess_read_reg(edma, reg_off, &reg_val);
164
165 /* reset the speed bits [0:1] */
166 reg_val &= ~ESS_PORT_STATUS_SPEED_INV;
167
168 /* set the new speed */
169 switch(phydev->speed) {
170 case SPEED_1000: reg_val |= ESS_PORT_STATUS_SPEED_1000; break;
171 case SPEED_100: reg_val |= ESS_PORT_STATUS_SPEED_100; break;
172 case SPEED_10: reg_val |= ESS_PORT_STATUS_SPEED_10; break;
173 default: reg_val |= ESS_PORT_STATUS_SPEED_INV; break;
174 }
175
176 /* check full/half duplex */
177 if (phydev->duplex) {
178 reg_val |= ESS_PORT_STATUS_DUPLEX_MODE;
179 } else {
180 reg_val &= ~ESS_PORT_STATUS_DUPLEX_MODE;
181 }
182
183 ess_write_reg(edma, reg_off, reg_val);
184 }
185
186 /* edma_change_tx_coalesce()
187 * change tx interrupt moderation timer
188 */
189 void edma_change_tx_coalesce(int usecs)
190 {
191 u32 reg_value;
192
193 /* Here, we right shift the value from the user by 1, this is
194 * done because IMT resolution timer is 2usecs. 1 count
195 * of this register corresponds to 2 usecs.
196 */
197 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
198 reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
199 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
200 }
201
202 /* edma_change_rx_coalesce()
203 * change rx interrupt moderation timer
204 */
205 void edma_change_rx_coalesce(int usecs)
206 {
207 u32 reg_value;
208
209 /* Here, we right shift the value from the user by 1, this is
210 * done because IMT resolution timer is 2usecs. 1 count
211 * of this register corresponds to 2 usecs.
212 */
213 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
214 reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
215 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
216 }
217
218 /* edma_get_tx_rx_coalesce()
219 * Get tx/rx interrupt moderation value
220 */
221 void edma_get_tx_rx_coalesce(u32 *reg_val)
222 {
223 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
224 }
225
226 void edma_read_append_stats(struct edma_common_info *edma_cinfo)
227 {
228 uint32_t *p;
229 int i;
230 u32 stat;
231
232 spin_lock_bh(&edma_cinfo->stats_lock);
233 p = (uint32_t *)&(edma_cinfo->edma_ethstats);
234
235 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
236 edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
237 *p += stat;
238 p++;
239 }
240
241 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
242 edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
243 *p += stat;
244 p++;
245 }
246
247 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
248 edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
249 *p += stat;
250 p++;
251 }
252
253 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
254 edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
255 *p += stat;
256 p++;
257 }
258
259 spin_unlock_bh(&edma_cinfo->stats_lock);
260 }
261
262 static void edma_statistics_timer(struct timer_list *t)
263 {
264 struct edma_common_info *edma_cinfo =
265 from_timer(edma_cinfo, t, edma_stats_timer);
266
267 edma_read_append_stats(edma_cinfo);
268
269 mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
270 }
271
272 static int edma_enable_stp_rstp(struct ctl_table *table, int write,
273 void __user *buffer, size_t *lenp,
274 loff_t *ppos)
275 {
276 int ret;
277
278 ret = proc_dointvec(table, write, buffer, lenp, ppos);
279 if (write)
280 edma_set_stp_rstp(edma_enable_rstp);
281
282 return ret;
283 }
284
285 static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
286 void __user *buffer, size_t *lenp,
287 loff_t *ppos)
288 {
289 int ret;
290
291 ret = proc_dointvec(table, write, buffer, lenp, ppos);
292 if (write)
293 edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
294
295 return ret;
296 }
297
298 static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
299 void __user *buffer, size_t *lenp,
300 loff_t *ppos)
301 {
302 struct edma_adapter *adapter;
303 int ret;
304
305 if (!edma_netdev[1]) {
306 pr_err("Netdevice for default_lan does not exist\n");
307 return -1;
308 }
309
310 adapter = netdev_priv(edma_netdev[1]);
311
312 ret = proc_dointvec(table, write, buffer, lenp, ppos);
313
314 if (write)
315 adapter->default_vlan_tag = edma_default_ltag;
316
317 return ret;
318 }
319
320 static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
321 void __user *buffer, size_t *lenp,
322 loff_t *ppos)
323 {
324 struct edma_adapter *adapter;
325 int ret;
326
327 if (!edma_netdev[0]) {
328 pr_err("Netdevice for default_wan does not exist\n");
329 return -1;
330 }
331
332 adapter = netdev_priv(edma_netdev[0]);
333
334 ret = proc_dointvec(table, write, buffer, lenp, ppos);
335
336 if (write)
337 adapter->default_vlan_tag = edma_default_wtag;
338
339 return ret;
340 }
341
342 static int edma_change_group1_vtag(struct ctl_table *table, int write,
343 void __user *buffer, size_t *lenp,
344 loff_t *ppos)
345 {
346 struct edma_adapter *adapter;
347 struct edma_common_info *edma_cinfo;
348 int ret;
349
350 if (!edma_netdev[0]) {
351 pr_err("Netdevice for Group 1 does not exist\n");
352 return -1;
353 }
354
355 adapter = netdev_priv(edma_netdev[0]);
356 edma_cinfo = adapter->edma_cinfo;
357
358 ret = proc_dointvec(table, write, buffer, lenp, ppos);
359
360 if (write)
361 adapter->default_vlan_tag = edma_default_group1_vtag;
362
363 return ret;
364 }
365
366 static int edma_change_group2_vtag(struct ctl_table *table, int write,
367 void __user *buffer, size_t *lenp,
368 loff_t *ppos)
369 {
370 struct edma_adapter *adapter;
371 struct edma_common_info *edma_cinfo;
372 int ret;
373
374 if (!edma_netdev[1]) {
375 pr_err("Netdevice for Group 2 does not exist\n");
376 return -1;
377 }
378
379 adapter = netdev_priv(edma_netdev[1]);
380 edma_cinfo = adapter->edma_cinfo;
381
382 ret = proc_dointvec(table, write, buffer, lenp, ppos);
383
384 if (write)
385 adapter->default_vlan_tag = edma_default_group2_vtag;
386
387 return ret;
388 }
389
390 static int edma_change_group3_vtag(struct ctl_table *table, int write,
391 void __user *buffer, size_t *lenp,
392 loff_t *ppos)
393 {
394 struct edma_adapter *adapter;
395 struct edma_common_info *edma_cinfo;
396 int ret;
397
398 if (!edma_netdev[2]) {
399 pr_err("Netdevice for Group 3 does not exist\n");
400 return -1;
401 }
402
403 adapter = netdev_priv(edma_netdev[2]);
404 edma_cinfo = adapter->edma_cinfo;
405
406 ret = proc_dointvec(table, write, buffer, lenp, ppos);
407
408 if (write)
409 adapter->default_vlan_tag = edma_default_group3_vtag;
410
411 return ret;
412 }
413
414 static int edma_change_group4_vtag(struct ctl_table *table, int write,
415 void __user *buffer, size_t *lenp,
416 loff_t *ppos)
417 {
418 struct edma_adapter *adapter;
419 struct edma_common_info *edma_cinfo;
420 int ret;
421
422 if (!edma_netdev[3]) {
423 pr_err("Netdevice for Group 4 does not exist\n");
424 return -1;
425 }
426
427 adapter = netdev_priv(edma_netdev[3]);
428 edma_cinfo = adapter->edma_cinfo;
429
430 ret = proc_dointvec(table, write, buffer, lenp, ppos);
431
432 if (write)
433 adapter->default_vlan_tag = edma_default_group4_vtag;
434
435 return ret;
436 }
437
438 static int edma_change_group5_vtag(struct ctl_table *table, int write,
439 void __user *buffer, size_t *lenp,
440 loff_t *ppos)
441 {
442 struct edma_adapter *adapter;
443 struct edma_common_info *edma_cinfo;
444 int ret;
445
446 if (!edma_netdev[4]) {
447 pr_err("Netdevice for Group 5 does not exist\n");
448 return -1;
449 }
450
451 adapter = netdev_priv(edma_netdev[4]);
452 edma_cinfo = adapter->edma_cinfo;
453
454 ret = proc_dointvec(table, write, buffer, lenp, ppos);
455
456 if (write)
457 adapter->default_vlan_tag = edma_default_group5_vtag;
458
459 return ret;
460 }
461
462 static int edma_set_rss_idt_value(struct ctl_table *table, int write,
463 void __user *buffer, size_t *lenp,
464 loff_t *ppos)
465 {
466 int ret;
467
468 ret = proc_dointvec(table, write, buffer, lenp, ppos);
469 if (write && !ret)
470 edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx),
471 edma_rss_idt_val);
472 return ret;
473 }
474
475 static int edma_set_rss_idt_idx(struct ctl_table *table, int write,
476 void __user *buffer, size_t *lenp,
477 loff_t *ppos)
478 {
479 int ret;
480 u32 old_value = edma_rss_idt_idx;
481
482 ret = proc_dointvec(table, write, buffer, lenp, ppos);
483 if (!write || ret)
484 return ret;
485
486 if (edma_rss_idt_idx >= EDMA_NUM_IDT) {
487 pr_err("Invalid RSS indirection table index %d\n",
488 edma_rss_idt_idx);
489 edma_rss_idt_idx = old_value;
490 return -EINVAL;
491 }
492 return ret;
493 }
494
495 static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
496 void __user *buffer, size_t *lenp,
497 loff_t *ppos)
498 {
499 int ret, queue_id, weight;
500 u32 reg_data, data, reg_addr;
501
502 ret = proc_dointvec(table, write, buffer, lenp, ppos);
503 if (write) {
504 queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
505 if (queue_id < 0 || queue_id > 15) {
506 pr_err("queue_id not within desired range\n");
507 return -EINVAL;
508 }
509
510 weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
511 if (weight < 0 || weight > 0xF) {
512 pr_err("queue_id not within desired range\n");
513 return -EINVAL;
514 }
515
516 data = weight << EDMA_WRR_SHIFT(queue_id);
517
518 reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
519 edma_read_reg(reg_addr, &reg_data);
520 reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
521 edma_write_reg(reg_addr, data | reg_data);
522 }
523
524 return ret;
525 }
526
527 static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
528 void __user *buffer, size_t *lenp,
529 loff_t *ppos)
530 {
531 int ret, queue_id, virtual_qid;
532 u32 reg_data, data, reg_addr;
533
534 ret = proc_dointvec(table, write, buffer, lenp, ppos);
535 if (write) {
536 queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
537 if (queue_id < 0 || queue_id > 15) {
538 pr_err("queue_id not within desired range\n");
539 return -EINVAL;
540 }
541
542 virtual_qid = edma_queue_to_virtual_q >>
543 EDMA_WRR_VID_SCTL_SHIFT;
544 if (virtual_qid < 0 || virtual_qid > 8) {
545 pr_err("queue_id not within desired range\n");
546 return -EINVAL;
547 }
548
549 data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
550
551 reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3);
552 edma_read_reg(reg_addr, &reg_data);
553 reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id));
554 edma_write_reg(reg_addr, data | reg_data);
555 }
556
557 return ret;
558 }
559
560 static struct ctl_table edma_table[] = {
561 {
562 .procname = "default_lan_tag",
563 .data = &edma_default_ltag,
564 .maxlen = sizeof(int),
565 .mode = 0644,
566 .proc_handler = edma_change_default_lan_vlan
567 },
568 {
569 .procname = "default_wan_tag",
570 .data = &edma_default_wtag,
571 .maxlen = sizeof(int),
572 .mode = 0644,
573 .proc_handler = edma_change_default_wan_vlan
574 },
575 {
576 .procname = "weight_assigned_to_queues",
577 .data = &edma_weight_assigned_to_q,
578 .maxlen = sizeof(int),
579 .mode = 0644,
580 .proc_handler = edma_weight_assigned_to_queues
581 },
582 {
583 .procname = "queue_to_virtual_queue_map",
584 .data = &edma_queue_to_virtual_q,
585 .maxlen = sizeof(int),
586 .mode = 0644,
587 .proc_handler = edma_queue_to_virtual_queue_map
588 },
589 {
590 .procname = "enable_stp_rstp",
591 .data = &edma_enable_rstp,
592 .maxlen = sizeof(int),
593 .mode = 0644,
594 .proc_handler = edma_enable_stp_rstp
595 },
596 {
597 .procname = "athr_hdr_eth_type",
598 .data = &edma_athr_hdr_eth_type,
599 .maxlen = sizeof(int),
600 .mode = 0644,
601 .proc_handler = edma_ath_hdr_eth_type
602 },
603 {
604 .procname = "default_group1_vlan_tag",
605 .data = &edma_default_group1_vtag,
606 .maxlen = sizeof(int),
607 .mode = 0644,
608 .proc_handler = edma_change_group1_vtag
609 },
610 {
611 .procname = "default_group2_vlan_tag",
612 .data = &edma_default_group2_vtag,
613 .maxlen = sizeof(int),
614 .mode = 0644,
615 .proc_handler = edma_change_group2_vtag
616 },
617 {
618 .procname = "default_group3_vlan_tag",
619 .data = &edma_default_group3_vtag,
620 .maxlen = sizeof(int),
621 .mode = 0644,
622 .proc_handler = edma_change_group3_vtag
623 },
624 {
625 .procname = "default_group4_vlan_tag",
626 .data = &edma_default_group4_vtag,
627 .maxlen = sizeof(int),
628 .mode = 0644,
629 .proc_handler = edma_change_group4_vtag
630 },
631 {
632 .procname = "default_group5_vlan_tag",
633 .data = &edma_default_group5_vtag,
634 .maxlen = sizeof(int),
635 .mode = 0644,
636 .proc_handler = edma_change_group5_vtag
637 },
638 {
639 .procname = "edma_rss_idt_value",
640 .data = &edma_rss_idt_val,
641 .maxlen = sizeof(int),
642 .mode = 0644,
643 .proc_handler = edma_set_rss_idt_value
644 },
645 {
646 .procname = "edma_rss_idt_idx",
647 .data = &edma_rss_idt_idx,
648 .maxlen = sizeof(int),
649 .mode = 0644,
650 .proc_handler = edma_set_rss_idt_idx
651 },
652 {}
653 };
654
655 static int ess_parse(struct edma_common_info *edma)
656 {
657 struct device_node *switch_node;
658 int ret = -EINVAL;
659
660 switch_node = of_find_node_by_name(NULL, "ess-switch");
661 if (!switch_node) {
662 pr_err("cannot find ess-switch node\n");
663 goto out;
664 }
665
666 edma->ess_hw_addr = of_io_request_and_map(switch_node,
667 0, KBUILD_MODNAME);
668 if (!edma->ess_hw_addr) {
669 pr_err("%s ioremap fail.", __func__);
670 goto out;
671 }
672
673 edma->ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
674 ret = clk_prepare_enable(edma->ess_clk);
675 out:
676 of_node_put(switch_node);
677 return ret;
678 }
679
680 /* edma_axi_netdev_ops
681 * Describe the operations supported by registered netdevices
682 *
683 * static const struct net_device_ops edma_axi_netdev_ops = {
684 * .ndo_open = edma_open,
685 * .ndo_stop = edma_close,
686 * .ndo_start_xmit = edma_xmit_frame,
687 * .ndo_set_mac_address = edma_set_mac_addr,
688 * }
689 */
690 static const struct net_device_ops edma_axi_netdev_ops = {
691 .ndo_open = edma_open,
692 .ndo_stop = edma_close,
693 .ndo_start_xmit = edma_xmit,
694 .ndo_set_mac_address = edma_set_mac_addr,
695 #ifdef CONFIG_RFS_ACCEL
696 .ndo_rx_flow_steer = edma_rx_flow_steer,
697 .ndo_register_rfs_filter = edma_register_rfs_filter,
698 .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
699 #endif
700 .ndo_get_stats = edma_get_stats,
701 };
702
703 /* edma_axi_probe()
704 * Initialise an adapter identified by a platform_device structure.
705 *
706 * The OS initialization, configuring of the adapter private structure,
707 * and a hardware reset occur in the probe.
708 */
709 static int edma_axi_probe(struct platform_device *pdev)
710 {
711 struct edma_common_info *edma_cinfo;
712 struct edma_hw *hw;
713 struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
714 struct resource *res;
715 struct device_node *np = pdev->dev.of_node;
716 struct device_node *pnp;
717 struct device_node *mdio_node = NULL;
718 struct mii_bus *miibus = NULL;
719 int i, j, k, err = 0;
720 int portid_bmp;
721 int idx = 0, idx_mac = 0;
722
723 if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
724 dev_err(&pdev->dev, "Invalid CPU Cores\n");
725 return -EINVAL;
726 }
727
728 if ((num_rxq != 4) && (num_rxq != 8)) {
729 dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
730 return -EINVAL;
731 }
732 edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
733 if (!edma_cinfo) {
734 err = -ENOMEM;
735 goto err_alloc;
736 }
737
738 edma_cinfo->pdev = pdev;
739
740 of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac);
741 if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
742 pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
743 err = -EINVAL;
744 goto err_cinfo;
745 }
746
747 /* Initialize the netdev array before allocation
748 * to avoid double free
749 */
750 for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
751 edma_netdev[i] = NULL;
752
753 for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
754 edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
755 EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
756
757 if (!edma_netdev[i]) {
758 dev_err(&pdev->dev,
759 "net device alloc fails for index=%d\n", i);
760 err = -ENODEV;
761 goto err_ioremap;
762 }
763
764 SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
765 platform_set_drvdata(pdev, edma_netdev[i]);
766 edma_cinfo->netdev[i] = edma_netdev[i];
767 }
768
769 /* Fill ring details */
770 edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
771 edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
772 edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
773
774 /* Update num rx queues based on module parameter */
775 edma_cinfo->num_rx_queues = num_rxq;
776 edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
777
778 edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
779
780 hw = &edma_cinfo->hw;
781
782 /* Fill HW defaults */
783 hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
784 hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
785
786 of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
787 of_property_read_u32(np, "qcom,rx_head_buf_size",
788 &hw->rx_head_buff_size);
789
790 if (overwrite_mode) {
791 dev_info(&pdev->dev, "page mode overwritten");
792 edma_cinfo->page_mode = page_mode;
793 }
794
795 if (jumbo_mru)
796 edma_cinfo->fraglist_mode = 1;
797
798 if (edma_cinfo->page_mode)
799 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
800 else if (edma_cinfo->fraglist_mode)
801 hw->rx_head_buff_size = jumbo_mru;
802 else if (!hw->rx_head_buff_size)
803 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
804
805 hw->misc_intr_mask = 0;
806 hw->wol_intr_mask = 0;
807
808 hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
809 hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
810
811 /* configure RSS type to the different protocol that can be
812 * supported
813 */
814 hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
815 EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
816 EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
817
818 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
819
820 edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
821 if (IS_ERR(edma_cinfo->hw.hw_addr)) {
822 err = PTR_ERR(edma_cinfo->hw.hw_addr);
823 goto err_ioremap;
824 }
825
826 edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
827
828 /* Parse tx queue interrupt number from device tree */
829 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
830 edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
831
832 /* Parse rx queue interrupt number from device tree
833 * Here we are setting j to point to the point where we
834 * left tx interrupt parsing(i.e 16) and run run the loop
835 * from 0 to 7 to parse rx interrupt number.
836 */
837 for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
838 i < edma_cinfo->num_rx_queues; i++) {
839 edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
840 k += ((num_rxq == 4) ? 2 : 1);
841 j += ((num_rxq == 4) ? 2 : 1);
842 }
843
844 edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
845 edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
846
847 err = edma_alloc_queues_tx(edma_cinfo);
848 if (err) {
849 dev_err(&pdev->dev, "Allocation of TX queue failed\n");
850 goto err_tx_qinit;
851 }
852
853 err = edma_alloc_queues_rx(edma_cinfo);
854 if (err) {
855 dev_err(&pdev->dev, "Allocation of RX queue failed\n");
856 goto err_rx_qinit;
857 }
858
859 err = edma_alloc_tx_rings(edma_cinfo);
860 if (err) {
861 dev_err(&pdev->dev, "Allocation of TX resources failed\n");
862 goto err_tx_rinit;
863 }
864
865 err = edma_alloc_rx_rings(edma_cinfo);
866 if (err) {
867 dev_err(&pdev->dev, "Allocation of RX resources failed\n");
868 goto err_rx_rinit;
869 }
870
871 /* Initialize netdev and netdev bitmap for transmit descriptor rings */
872 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
873 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i];
874 int j;
875
876 etdr->netdev_bmp = 0;
877 for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
878 etdr->netdev[j] = NULL;
879 etdr->nq[j] = NULL;
880 }
881 }
882
883 if (of_property_read_bool(np, "qcom,mdio_supported")) {
884 mdio_node = of_find_compatible_node(NULL, NULL,
885 "qcom,ipq4019-mdio");
886 if (!mdio_node) {
887 dev_err(&pdev->dev, "cannot find mdio node by phandle");
888 err = -EIO;
889 goto err_mdiobus_init_fail;
890 }
891
892 miibus = of_mdio_find_bus(mdio_node);
893 if (!miibus)
894 return -EINVAL;
895 }
896
897 if (of_property_read_bool(np, "qcom,single-phy") &&
898 edma_cinfo->num_gmac == 1) {
899 err = ess_parse(edma_cinfo);
900 if (!err)
901 err = ess_reset(edma_cinfo);
902 if (err)
903 goto err_single_phy_init;
904 else
905 edma_cinfo->is_single_phy = true;
906 }
907
908 for_each_available_child_of_node(np, pnp) {
909 /* this check is needed if parent and daughter dts have
910 * different number of gmac nodes
911 */
912 if (idx_mac == edma_cinfo->num_gmac) {
913 of_node_put(np);
914 break;
915 }
916
917 of_get_mac_address(pnp, edma_netdev[idx_mac]->dev_addr);
918
919 idx_mac++;
920 }
921
922 /* Populate the adapter structure register the netdevice */
923 for (i = 0; i < edma_cinfo->num_gmac; i++) {
924 int k, m;
925
926 adapter[i] = netdev_priv(edma_netdev[i]);
927 adapter[i]->netdev = edma_netdev[i];
928 adapter[i]->pdev = pdev;
929 for (j = 0; j < CONFIG_NR_CPUS; j++) {
930 m = i % 2;
931 adapter[i]->tx_start_offset[j] =
932 ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
933 /* Share the queues with available net-devices.
934 * For instance , with 5 net-devices
935 * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
936 * and eth1/eth3 will get the remaining.
937 */
938 for (k = adapter[i]->tx_start_offset[j]; k <
939 (adapter[i]->tx_start_offset[j] + 2); k++) {
940 if (edma_fill_netdev(edma_cinfo, k, i, j)) {
941 pr_err("Netdev overflow Error\n");
942 goto err_register;
943 }
944 }
945 }
946
947 adapter[i]->edma_cinfo = edma_cinfo;
948 edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
949 edma_netdev[i]->max_mtu = 9000;
950 edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
951 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
952 NETIF_F_TSO | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_TX;
953 edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
954 NETIF_F_HW_VLAN_CTAG_RX
955 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_GRO;
956 edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
957 NETIF_F_TSO | NETIF_F_GRO;
958 edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
959 NETIF_F_TSO | NETIF_F_GRO;
960
961 #ifdef CONFIG_RFS_ACCEL
962 edma_netdev[i]->features |= NETIF_F_NTUPLE | NETIF_F_RXHASH;
963 edma_netdev[i]->hw_features |= NETIF_F_NTUPLE | NETIF_F_RXHASH;
964 edma_netdev[i]->vlan_features |= NETIF_F_NTUPLE | NETIF_F_RXHASH;
965 edma_netdev[i]->wanted_features |= NETIF_F_NTUPLE | NETIF_F_RXHASH;
966 #endif
967 edma_set_ethtool_ops(edma_netdev[i]);
968
969 /* This just fill in some default MAC address
970 */
971 if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
972 random_ether_addr(edma_netdev[i]->dev_addr);
973 pr_info("EDMA using MAC@ - using");
974 pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
975 *(edma_netdev[i]->dev_addr),
976 *(edma_netdev[i]->dev_addr + 1),
977 *(edma_netdev[i]->dev_addr + 2),
978 *(edma_netdev[i]->dev_addr + 3),
979 *(edma_netdev[i]->dev_addr + 4),
980 *(edma_netdev[i]->dev_addr + 5));
981 }
982
983 err = register_netdev(edma_netdev[i]);
984 if (err)
985 goto err_register;
986
987 /* carrier off reporting is important to
988 * ethtool even BEFORE open
989 */
990 netif_carrier_off(edma_netdev[i]);
991
992 /* Allocate reverse irq cpu mapping structure for
993 * receive queues
994 */
995 #ifdef CONFIG_RFS_ACCEL
996 edma_netdev[i]->rx_cpu_rmap =
997 alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
998 if (!edma_netdev[i]->rx_cpu_rmap) {
999 err = -ENOMEM;
1000 goto err_rmap_alloc_fail;
1001 }
1002 #endif
1003 }
1004
1005 for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
1006 edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
1007
1008 for_each_available_child_of_node(np, pnp) {
1009 const uint32_t *vlan_tag = NULL;
1010 int len;
1011
1012 /* this check is needed if parent and daughter dts have
1013 * different number of gmac nodes
1014 */
1015 if (idx == edma_cinfo->num_gmac)
1016 break;
1017
1018 /* Populate port-id to netdev lookup table */
1019 vlan_tag = of_get_property(pnp, "vlan_tag", &len);
1020 if (!vlan_tag) {
1021 pr_err("Vlan tag parsing Failed.\n");
1022 goto err_rmap_alloc_fail;
1023 }
1024
1025 adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
1026 vlan_tag++;
1027 portid_bmp = of_read_number(vlan_tag, 1);
1028 adapter[idx]->dp_bitmap = portid_bmp;
1029
1030 portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
1031 while (portid_bmp) {
1032 int port_bit = ffs(portid_bmp);
1033
1034 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
1035 goto err_rmap_alloc_fail;
1036 edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
1037 edma_netdev[idx];
1038 portid_bmp &= ~(1 << (port_bit - 1));
1039 }
1040
1041 if (!of_property_read_u32(pnp, "qcom,poll_required",
1042 &adapter[idx]->poll_required)) {
1043 if (adapter[idx]->poll_required) {
1044 of_property_read_u32(pnp, "qcom,phy_mdio_addr",
1045 &adapter[idx]->phy_mdio_addr);
1046 of_property_read_u32(pnp, "qcom,forced_speed",
1047 &adapter[idx]->forced_speed);
1048 of_property_read_u32(pnp, "qcom,forced_duplex",
1049 &adapter[idx]->forced_duplex);
1050
1051 /* create a phyid using MDIO bus id
1052 * and MDIO bus address
1053 */
1054 snprintf(adapter[idx]->phy_id,
1055 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1056 miibus->id,
1057 adapter[idx]->phy_mdio_addr);
1058 }
1059 } else {
1060 adapter[idx]->poll_required = 0;
1061 adapter[idx]->forced_speed = SPEED_1000;
1062 adapter[idx]->forced_duplex = DUPLEX_FULL;
1063 }
1064
1065 idx++;
1066 }
1067
1068 edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
1069 "net/edma",
1070 edma_table);
1071 if (!edma_cinfo->edma_ctl_table_hdr) {
1072 dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
1073 goto err_unregister_sysctl_tbl;
1074 }
1075
1076 /* Disable all 16 Tx and 8 rx irqs */
1077 edma_irq_disable(edma_cinfo);
1078
1079 err = edma_reset(edma_cinfo);
1080 if (err) {
1081 err = -EIO;
1082 goto err_reset;
1083 }
1084
1085 /* populate per_core_info, do a napi_Add, request 16 TX irqs,
1086 * 8 RX irqs, do a napi enable
1087 */
1088 for (i = 0; i < CONFIG_NR_CPUS; i++) {
1089 u8 rx_start;
1090
1091 edma_cinfo->edma_percpu_info[i].napi.state = 0;
1092
1093 netif_napi_add(edma_netdev[0],
1094 &edma_cinfo->edma_percpu_info[i].napi,
1095 edma_poll, 64);
1096 napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
1097 edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
1098 edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
1099 << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
1100 edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
1101 edma_cinfo->edma_percpu_info[i].rx_start =
1102 i << EDMA_RX_CPU_START_SHIFT;
1103 rx_start = i << EDMA_RX_CPU_START_SHIFT;
1104 edma_cinfo->edma_percpu_info[i].tx_status = 0;
1105 edma_cinfo->edma_percpu_info[i].rx_status = 0;
1106 edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
1107
1108 /* Request irq per core */
1109 for (j = edma_cinfo->edma_percpu_info[i].tx_start;
1110 j < tx_start[i] + 4; j++) {
1111 sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j);
1112 err = request_irq(edma_cinfo->tx_irq[j],
1113 edma_interrupt,
1114 0,
1115 &edma_tx_irq[j][0],
1116 &edma_cinfo->edma_percpu_info[i]);
1117 if (err)
1118 goto err_reset;
1119 }
1120
1121 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1122 j < (rx_start +
1123 ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
1124 j++) {
1125 sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j);
1126 err = request_irq(edma_cinfo->rx_irq[j],
1127 edma_interrupt,
1128 0,
1129 &edma_rx_irq[j][0],
1130 &edma_cinfo->edma_percpu_info[i]);
1131 if (err)
1132 goto err_reset;
1133 }
1134
1135 #ifdef CONFIG_RFS_ACCEL
1136 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1137 j < rx_start + 2; j += 2) {
1138 err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
1139 edma_cinfo->rx_irq[j]);
1140 if (err)
1141 goto err_rmap_add_fail;
1142 }
1143 #endif
1144 }
1145
1146 /* Used to clear interrupt status, allocate rx buffer,
1147 * configure edma descriptors registers
1148 */
1149 err = edma_configure(edma_cinfo);
1150 if (err) {
1151 err = -EIO;
1152 goto err_configure;
1153 }
1154
1155 /* Configure RSS indirection table.
1156 * 128 hash will be configured in the following
1157 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1158 * and so on
1159 */
1160 for (i = 0; i < EDMA_NUM_IDT; i++)
1161 edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
1162
1163 /* Configure load balance mapping table.
1164 * 4 table entry will be configured according to the
1165 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1166 * respectively.
1167 */
1168 edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
1169
1170 /* Configure Virtual queue for Tx rings
1171 * User can also change this value runtime through
1172 * a sysctl
1173 */
1174 edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
1175 edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
1176
1177 /* Configure Max AXI Burst write size to 128 bytes*/
1178 edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
1179 EDMA_AXIW_MAXWRSIZE_VALUE);
1180
1181 /* Enable All 16 tx and 8 rx irq mask */
1182 edma_irq_enable(edma_cinfo);
1183 edma_enable_tx_ctrl(&edma_cinfo->hw);
1184 edma_enable_rx_ctrl(&edma_cinfo->hw);
1185
1186 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1187 if (adapter[i]->poll_required) {
1188 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0)
1189 phy_interface_t phy_mode;
1190
1191 err = of_get_phy_mode(np, &phy_mode);
1192 if (err)
1193 phy_mode = PHY_INTERFACE_MODE_SGMII;
1194 #else
1195 int phy_mode = of_get_phy_mode(np);
1196 if (phy_mode < 0)
1197 phy_mode = PHY_INTERFACE_MODE_SGMII;
1198 #endif
1199 adapter[i]->phydev =
1200 phy_connect(edma_netdev[i],
1201 (const char *)adapter[i]->phy_id,
1202 &edma_adjust_link,
1203 phy_mode);
1204 if (IS_ERR(adapter[i]->phydev)) {
1205 dev_dbg(&pdev->dev, "PHY attach FAIL");
1206 err = -EIO;
1207 goto edma_phy_attach_fail;
1208 } else {
1209 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1210 adapter[i]->phydev->advertising);
1211 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1212 adapter[i]->phydev->advertising);
1213 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1214 adapter[i]->phydev->supported);
1215 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1216 adapter[i]->phydev->supported);
1217 }
1218 } else {
1219 adapter[i]->phydev = NULL;
1220 }
1221 }
1222
1223 spin_lock_init(&edma_cinfo->stats_lock);
1224
1225 timer_setup(&edma_cinfo->edma_stats_timer, edma_statistics_timer, 0);
1226 mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
1227
1228 return 0;
1229
1230 edma_phy_attach_fail:
1231 miibus = NULL;
1232 err_configure:
1233 #ifdef CONFIG_RFS_ACCEL
1234 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1235 free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
1236 adapter[i]->netdev->rx_cpu_rmap = NULL;
1237 }
1238 #endif
1239 err_rmap_add_fail:
1240 edma_free_irqs(adapter[0]);
1241 for (i = 0; i < CONFIG_NR_CPUS; i++)
1242 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1243 err_reset:
1244 err_unregister_sysctl_tbl:
1245 err_rmap_alloc_fail:
1246 for (i = 0; i < edma_cinfo->num_gmac; i++)
1247 unregister_netdev(edma_netdev[i]);
1248 err_register:
1249 err_single_phy_init:
1250 iounmap(edma_cinfo->ess_hw_addr);
1251 clk_disable_unprepare(edma_cinfo->ess_clk);
1252 err_mdiobus_init_fail:
1253 edma_free_rx_rings(edma_cinfo);
1254 err_rx_rinit:
1255 edma_free_tx_rings(edma_cinfo);
1256 err_tx_rinit:
1257 edma_free_queues(edma_cinfo);
1258 err_rx_qinit:
1259 err_tx_qinit:
1260 iounmap(edma_cinfo->hw.hw_addr);
1261 err_ioremap:
1262 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1263 if (edma_netdev[i])
1264 free_netdev(edma_netdev[i]);
1265 }
1266 err_cinfo:
1267 kfree(edma_cinfo);
1268 err_alloc:
1269 return err;
1270 }
1271
1272 /* edma_axi_remove()
1273 * Device Removal Routine
1274 *
1275 * edma_axi_remove is called by the platform subsystem to alert the driver
1276 * that it should release a platform device.
1277 */
1278 static int edma_axi_remove(struct platform_device *pdev)
1279 {
1280 struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
1281 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1282 struct edma_hw *hw = &edma_cinfo->hw;
1283 int i;
1284
1285 for (i = 0; i < edma_cinfo->num_gmac; i++)
1286 unregister_netdev(edma_netdev[i]);
1287
1288 edma_stop_rx_tx(hw);
1289 for (i = 0; i < CONFIG_NR_CPUS; i++)
1290 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1291
1292 edma_irq_disable(edma_cinfo);
1293 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1294 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1295 #ifdef CONFIG_RFS_ACCEL
1296 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1297 free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap);
1298 edma_netdev[i]->rx_cpu_rmap = NULL;
1299 }
1300 #endif
1301
1302 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1303 struct edma_adapter *adapter = netdev_priv(edma_netdev[i]);
1304
1305 if (adapter->phydev)
1306 phy_disconnect(adapter->phydev);
1307 }
1308
1309 del_timer_sync(&edma_cinfo->edma_stats_timer);
1310 edma_free_irqs(adapter);
1311 unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
1312 iounmap(edma_cinfo->ess_hw_addr);
1313 clk_disable_unprepare(edma_cinfo->ess_clk);
1314 edma_free_tx_resources(edma_cinfo);
1315 edma_free_rx_resources(edma_cinfo);
1316 edma_free_tx_rings(edma_cinfo);
1317 edma_free_rx_rings(edma_cinfo);
1318 edma_free_queues(edma_cinfo);
1319 for (i = 0; i < edma_cinfo->num_gmac; i++)
1320 free_netdev(edma_netdev[i]);
1321
1322 kfree(edma_cinfo);
1323
1324 return 0;
1325 }
1326
1327 static const struct of_device_id edma_of_mtable[] = {
1328 {.compatible = "qcom,ess-edma" },
1329 {}
1330 };
1331 MODULE_DEVICE_TABLE(of, edma_of_mtable);
1332
1333 static struct platform_driver edma_axi_driver = {
1334 .driver = {
1335 .name = edma_axi_driver_name,
1336 .of_match_table = edma_of_mtable,
1337 },
1338 .probe = edma_axi_probe,
1339 .remove = edma_axi_remove,
1340 };
1341
1342 module_platform_driver(edma_axi_driver);
1343
1344 MODULE_AUTHOR("Qualcomm Atheros Inc");
1345 MODULE_DESCRIPTION("QCA ESS EDMA driver");
1346 MODULE_LICENSE("GPL");