ipq40xx: add v5.4 support
[openwrt/openwrt.git] / target / linux / ipq40xx / files-5.4 / drivers / net / ethernet / qualcomm / essedma / edma_axi.c
1 /*
2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16 #include <linux/cpu_rmap.h>
17 #include <linux/of.h>
18 #include <linux/of_net.h>
19 #include <linux/timer.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_address.h>
22 #include <linux/clk.h>
23 #include <linux/string.h>
24 #include <linux/reset.h>
25 #include "edma.h"
26 #include "ess_edma.h"
27
28 /* Weight round robin and virtual QID mask */
29 #define EDMA_WRR_VID_SCTL_MASK 0xffff
30
31 /* Weight round robin and virtual QID shift */
32 #define EDMA_WRR_VID_SCTL_SHIFT 16
33
34 char edma_axi_driver_name[] = "ess_edma";
35 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
36 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
37
38 static u32 edma_hw_addr;
39
40 char edma_tx_irq[16][64];
41 char edma_rx_irq[8][64];
42 struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
43 static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
44 EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
45 static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
46 EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
47
48 static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN;
49 static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN;
50 static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
51 static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
52 static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
53 static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
54 static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
55 static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE;
56 static u32 edma_rss_idt_idx;
57
58 static int edma_weight_assigned_to_q __read_mostly;
59 static int edma_queue_to_virtual_q __read_mostly;
60 static bool edma_enable_rstp __read_mostly;
61 static int edma_athr_hdr_eth_type __read_mostly;
62
63 static int page_mode;
64 module_param(page_mode, int, 0);
65 MODULE_PARM_DESC(page_mode, "enable page mode");
66
67 static int overwrite_mode;
68 module_param(overwrite_mode, int, 0);
69 MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
70
71 static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
72 module_param(jumbo_mru, int, 0);
73 MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
74
75 static int num_rxq = 4;
76 module_param(num_rxq, int, 0);
77 MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
78
79 void edma_write_reg(u16 reg_addr, u32 reg_value)
80 {
81 writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
82 }
83
84 void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
85 {
86 *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
87 }
88
89 static void ess_write_reg(struct edma_common_info *edma, u16 reg_addr, u32 reg_value)
90 {
91 writel(reg_value, ((void __iomem *)
92 ((unsigned long)edma->ess_hw_addr + reg_addr)));
93 }
94
95 static void ess_read_reg(struct edma_common_info *edma, u16 reg_addr,
96 volatile u32 *reg_value)
97 {
98 *reg_value = readl((void __iomem *)
99 ((unsigned long)edma->ess_hw_addr + reg_addr));
100 }
101
102 static int ess_reset(struct edma_common_info *edma)
103 {
104 struct device_node *switch_node = NULL;
105 struct reset_control *ess_rst;
106 u32 regval;
107
108 switch_node = of_find_node_by_name(NULL, "ess-switch");
109 if (!switch_node) {
110 pr_err("switch-node not found\n");
111 return -EINVAL;
112 }
113
114 ess_rst = of_reset_control_get(switch_node, "ess_rst");
115 of_node_put(switch_node);
116
117 if (IS_ERR(ess_rst)) {
118 pr_err("failed to find ess_rst!\n");
119 return -ENOENT;
120 }
121
122 reset_control_assert(ess_rst);
123 msleep(10);
124 reset_control_deassert(ess_rst);
125 msleep(100);
126 reset_control_put(ess_rst);
127
128 /* Enable only port 5 <--> port 0
129 * bits 0:6 bitmap of ports it can fwd to */
130 #define SET_PORT_BMP(r,v) \
131 ess_read_reg(edma, r, &regval); \
132 ess_write_reg(edma, r, ((regval & ~0x3F) | v));
133
134 SET_PORT_BMP(ESS_PORT0_LOOKUP_CTRL,0x20);
135 SET_PORT_BMP(ESS_PORT1_LOOKUP_CTRL,0x00);
136 SET_PORT_BMP(ESS_PORT2_LOOKUP_CTRL,0x00);
137 SET_PORT_BMP(ESS_PORT3_LOOKUP_CTRL,0x00);
138 SET_PORT_BMP(ESS_PORT4_LOOKUP_CTRL,0x00);
139 SET_PORT_BMP(ESS_PORT5_LOOKUP_CTRL,0x01);
140 ess_write_reg(edma, ESS_RGMII_CTRL, 0x400);
141 ess_write_reg(edma, ESS_PORT0_STATUS, ESS_PORT_1G_FDX);
142 ess_write_reg(edma, ESS_PORT5_STATUS, ESS_PORT_1G_FDX);
143 ess_write_reg(edma, ESS_PORT0_HEADER_CTRL, 0);
144 #undef SET_PORT_BMP
145
146 /* forward multicast and broadcast frames to CPU */
147 ess_write_reg(edma, ESS_FWD_CTRL1,
148 (ESS_PORTS_ALL << ESS_FWD_CTRL1_UC_FLOOD_S) |
149 (ESS_PORTS_ALL << ESS_FWD_CTRL1_MC_FLOOD_S) |
150 (ESS_PORTS_ALL << ESS_FWD_CTRL1_BC_FLOOD_S));
151
152 return 0;
153 }
154
155 void ess_set_port_status_speed(struct edma_common_info *edma,
156 struct phy_device *phydev, uint8_t port_id)
157 {
158 uint16_t reg_off = ESS_PORT0_STATUS + (4 * port_id);
159 uint32_t reg_val = 0;
160
161 ess_read_reg(edma, reg_off, &reg_val);
162
163 /* reset the speed bits [0:1] */
164 reg_val &= ~ESS_PORT_STATUS_SPEED_INV;
165
166 /* set the new speed */
167 switch(phydev->speed) {
168 case SPEED_1000: reg_val |= ESS_PORT_STATUS_SPEED_1000; break;
169 case SPEED_100: reg_val |= ESS_PORT_STATUS_SPEED_100; break;
170 case SPEED_10: reg_val |= ESS_PORT_STATUS_SPEED_10; break;
171 default: reg_val |= ESS_PORT_STATUS_SPEED_INV; break;
172 }
173
174 /* check full/half duplex */
175 if (phydev->duplex) {
176 reg_val |= ESS_PORT_STATUS_DUPLEX_MODE;
177 } else {
178 reg_val &= ~ESS_PORT_STATUS_DUPLEX_MODE;
179 }
180
181 ess_write_reg(edma, reg_off, reg_val);
182 }
183
184 /* edma_change_tx_coalesce()
185 * change tx interrupt moderation timer
186 */
187 void edma_change_tx_coalesce(int usecs)
188 {
189 u32 reg_value;
190
191 /* Here, we right shift the value from the user by 1, this is
192 * done because IMT resolution timer is 2usecs. 1 count
193 * of this register corresponds to 2 usecs.
194 */
195 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
196 reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
197 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
198 }
199
200 /* edma_change_rx_coalesce()
201 * change rx interrupt moderation timer
202 */
203 void edma_change_rx_coalesce(int usecs)
204 {
205 u32 reg_value;
206
207 /* Here, we right shift the value from the user by 1, this is
208 * done because IMT resolution timer is 2usecs. 1 count
209 * of this register corresponds to 2 usecs.
210 */
211 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
212 reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
213 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
214 }
215
216 /* edma_get_tx_rx_coalesce()
217 * Get tx/rx interrupt moderation value
218 */
219 void edma_get_tx_rx_coalesce(u32 *reg_val)
220 {
221 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
222 }
223
224 void edma_read_append_stats(struct edma_common_info *edma_cinfo)
225 {
226 uint32_t *p;
227 int i;
228 u32 stat;
229
230 spin_lock_bh(&edma_cinfo->stats_lock);
231 p = (uint32_t *)&(edma_cinfo->edma_ethstats);
232
233 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
234 edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
235 *p += stat;
236 p++;
237 }
238
239 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
240 edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
241 *p += stat;
242 p++;
243 }
244
245 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
246 edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
247 *p += stat;
248 p++;
249 }
250
251 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
252 edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
253 *p += stat;
254 p++;
255 }
256
257 spin_unlock_bh(&edma_cinfo->stats_lock);
258 }
259
260 static void edma_statistics_timer(struct timer_list *t)
261 {
262 struct edma_common_info *edma_cinfo =
263 from_timer(edma_cinfo, t, edma_stats_timer);
264
265 edma_read_append_stats(edma_cinfo);
266
267 mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
268 }
269
270 static int edma_enable_stp_rstp(struct ctl_table *table, int write,
271 void __user *buffer, size_t *lenp,
272 loff_t *ppos)
273 {
274 int ret;
275
276 ret = proc_dointvec(table, write, buffer, lenp, ppos);
277 if (write)
278 edma_set_stp_rstp(edma_enable_rstp);
279
280 return ret;
281 }
282
283 static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
284 void __user *buffer, size_t *lenp,
285 loff_t *ppos)
286 {
287 int ret;
288
289 ret = proc_dointvec(table, write, buffer, lenp, ppos);
290 if (write)
291 edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
292
293 return ret;
294 }
295
296 static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
297 void __user *buffer, size_t *lenp,
298 loff_t *ppos)
299 {
300 struct edma_adapter *adapter;
301 int ret;
302
303 if (!edma_netdev[1]) {
304 pr_err("Netdevice for default_lan does not exist\n");
305 return -1;
306 }
307
308 adapter = netdev_priv(edma_netdev[1]);
309
310 ret = proc_dointvec(table, write, buffer, lenp, ppos);
311
312 if (write)
313 adapter->default_vlan_tag = edma_default_ltag;
314
315 return ret;
316 }
317
318 static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
319 void __user *buffer, size_t *lenp,
320 loff_t *ppos)
321 {
322 struct edma_adapter *adapter;
323 int ret;
324
325 if (!edma_netdev[0]) {
326 pr_err("Netdevice for default_wan does not exist\n");
327 return -1;
328 }
329
330 adapter = netdev_priv(edma_netdev[0]);
331
332 ret = proc_dointvec(table, write, buffer, lenp, ppos);
333
334 if (write)
335 adapter->default_vlan_tag = edma_default_wtag;
336
337 return ret;
338 }
339
340 static int edma_change_group1_vtag(struct ctl_table *table, int write,
341 void __user *buffer, size_t *lenp,
342 loff_t *ppos)
343 {
344 struct edma_adapter *adapter;
345 struct edma_common_info *edma_cinfo;
346 int ret;
347
348 if (!edma_netdev[0]) {
349 pr_err("Netdevice for Group 1 does not exist\n");
350 return -1;
351 }
352
353 adapter = netdev_priv(edma_netdev[0]);
354 edma_cinfo = adapter->edma_cinfo;
355
356 ret = proc_dointvec(table, write, buffer, lenp, ppos);
357
358 if (write)
359 adapter->default_vlan_tag = edma_default_group1_vtag;
360
361 return ret;
362 }
363
364 static int edma_change_group2_vtag(struct ctl_table *table, int write,
365 void __user *buffer, size_t *lenp,
366 loff_t *ppos)
367 {
368 struct edma_adapter *adapter;
369 struct edma_common_info *edma_cinfo;
370 int ret;
371
372 if (!edma_netdev[1]) {
373 pr_err("Netdevice for Group 2 does not exist\n");
374 return -1;
375 }
376
377 adapter = netdev_priv(edma_netdev[1]);
378 edma_cinfo = adapter->edma_cinfo;
379
380 ret = proc_dointvec(table, write, buffer, lenp, ppos);
381
382 if (write)
383 adapter->default_vlan_tag = edma_default_group2_vtag;
384
385 return ret;
386 }
387
388 static int edma_change_group3_vtag(struct ctl_table *table, int write,
389 void __user *buffer, size_t *lenp,
390 loff_t *ppos)
391 {
392 struct edma_adapter *adapter;
393 struct edma_common_info *edma_cinfo;
394 int ret;
395
396 if (!edma_netdev[2]) {
397 pr_err("Netdevice for Group 3 does not exist\n");
398 return -1;
399 }
400
401 adapter = netdev_priv(edma_netdev[2]);
402 edma_cinfo = adapter->edma_cinfo;
403
404 ret = proc_dointvec(table, write, buffer, lenp, ppos);
405
406 if (write)
407 adapter->default_vlan_tag = edma_default_group3_vtag;
408
409 return ret;
410 }
411
412 static int edma_change_group4_vtag(struct ctl_table *table, int write,
413 void __user *buffer, size_t *lenp,
414 loff_t *ppos)
415 {
416 struct edma_adapter *adapter;
417 struct edma_common_info *edma_cinfo;
418 int ret;
419
420 if (!edma_netdev[3]) {
421 pr_err("Netdevice for Group 4 does not exist\n");
422 return -1;
423 }
424
425 adapter = netdev_priv(edma_netdev[3]);
426 edma_cinfo = adapter->edma_cinfo;
427
428 ret = proc_dointvec(table, write, buffer, lenp, ppos);
429
430 if (write)
431 adapter->default_vlan_tag = edma_default_group4_vtag;
432
433 return ret;
434 }
435
436 static int edma_change_group5_vtag(struct ctl_table *table, int write,
437 void __user *buffer, size_t *lenp,
438 loff_t *ppos)
439 {
440 struct edma_adapter *adapter;
441 struct edma_common_info *edma_cinfo;
442 int ret;
443
444 if (!edma_netdev[4]) {
445 pr_err("Netdevice for Group 5 does not exist\n");
446 return -1;
447 }
448
449 adapter = netdev_priv(edma_netdev[4]);
450 edma_cinfo = adapter->edma_cinfo;
451
452 ret = proc_dointvec(table, write, buffer, lenp, ppos);
453
454 if (write)
455 adapter->default_vlan_tag = edma_default_group5_vtag;
456
457 return ret;
458 }
459
460 static int edma_set_rss_idt_value(struct ctl_table *table, int write,
461 void __user *buffer, size_t *lenp,
462 loff_t *ppos)
463 {
464 int ret;
465
466 ret = proc_dointvec(table, write, buffer, lenp, ppos);
467 if (write && !ret)
468 edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx),
469 edma_rss_idt_val);
470 return ret;
471 }
472
473 static int edma_set_rss_idt_idx(struct ctl_table *table, int write,
474 void __user *buffer, size_t *lenp,
475 loff_t *ppos)
476 {
477 int ret;
478 u32 old_value = edma_rss_idt_idx;
479
480 ret = proc_dointvec(table, write, buffer, lenp, ppos);
481 if (!write || ret)
482 return ret;
483
484 if (edma_rss_idt_idx >= EDMA_NUM_IDT) {
485 pr_err("Invalid RSS indirection table index %d\n",
486 edma_rss_idt_idx);
487 edma_rss_idt_idx = old_value;
488 return -EINVAL;
489 }
490 return ret;
491 }
492
493 static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
494 void __user *buffer, size_t *lenp,
495 loff_t *ppos)
496 {
497 int ret, queue_id, weight;
498 u32 reg_data, data, reg_addr;
499
500 ret = proc_dointvec(table, write, buffer, lenp, ppos);
501 if (write) {
502 queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
503 if (queue_id < 0 || queue_id > 15) {
504 pr_err("queue_id not within desired range\n");
505 return -EINVAL;
506 }
507
508 weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
509 if (weight < 0 || weight > 0xF) {
510 pr_err("queue_id not within desired range\n");
511 return -EINVAL;
512 }
513
514 data = weight << EDMA_WRR_SHIFT(queue_id);
515
516 reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
517 edma_read_reg(reg_addr, &reg_data);
518 reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
519 edma_write_reg(reg_addr, data | reg_data);
520 }
521
522 return ret;
523 }
524
525 static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
526 void __user *buffer, size_t *lenp,
527 loff_t *ppos)
528 {
529 int ret, queue_id, virtual_qid;
530 u32 reg_data, data, reg_addr;
531
532 ret = proc_dointvec(table, write, buffer, lenp, ppos);
533 if (write) {
534 queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
535 if (queue_id < 0 || queue_id > 15) {
536 pr_err("queue_id not within desired range\n");
537 return -EINVAL;
538 }
539
540 virtual_qid = edma_queue_to_virtual_q >>
541 EDMA_WRR_VID_SCTL_SHIFT;
542 if (virtual_qid < 0 || virtual_qid > 8) {
543 pr_err("queue_id not within desired range\n");
544 return -EINVAL;
545 }
546
547 data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
548
549 reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3);
550 edma_read_reg(reg_addr, &reg_data);
551 reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id));
552 edma_write_reg(reg_addr, data | reg_data);
553 }
554
555 return ret;
556 }
557
558 static struct ctl_table edma_table[] = {
559 {
560 .procname = "default_lan_tag",
561 .data = &edma_default_ltag,
562 .maxlen = sizeof(int),
563 .mode = 0644,
564 .proc_handler = edma_change_default_lan_vlan
565 },
566 {
567 .procname = "default_wan_tag",
568 .data = &edma_default_wtag,
569 .maxlen = sizeof(int),
570 .mode = 0644,
571 .proc_handler = edma_change_default_wan_vlan
572 },
573 {
574 .procname = "weight_assigned_to_queues",
575 .data = &edma_weight_assigned_to_q,
576 .maxlen = sizeof(int),
577 .mode = 0644,
578 .proc_handler = edma_weight_assigned_to_queues
579 },
580 {
581 .procname = "queue_to_virtual_queue_map",
582 .data = &edma_queue_to_virtual_q,
583 .maxlen = sizeof(int),
584 .mode = 0644,
585 .proc_handler = edma_queue_to_virtual_queue_map
586 },
587 {
588 .procname = "enable_stp_rstp",
589 .data = &edma_enable_rstp,
590 .maxlen = sizeof(int),
591 .mode = 0644,
592 .proc_handler = edma_enable_stp_rstp
593 },
594 {
595 .procname = "athr_hdr_eth_type",
596 .data = &edma_athr_hdr_eth_type,
597 .maxlen = sizeof(int),
598 .mode = 0644,
599 .proc_handler = edma_ath_hdr_eth_type
600 },
601 {
602 .procname = "default_group1_vlan_tag",
603 .data = &edma_default_group1_vtag,
604 .maxlen = sizeof(int),
605 .mode = 0644,
606 .proc_handler = edma_change_group1_vtag
607 },
608 {
609 .procname = "default_group2_vlan_tag",
610 .data = &edma_default_group2_vtag,
611 .maxlen = sizeof(int),
612 .mode = 0644,
613 .proc_handler = edma_change_group2_vtag
614 },
615 {
616 .procname = "default_group3_vlan_tag",
617 .data = &edma_default_group3_vtag,
618 .maxlen = sizeof(int),
619 .mode = 0644,
620 .proc_handler = edma_change_group3_vtag
621 },
622 {
623 .procname = "default_group4_vlan_tag",
624 .data = &edma_default_group4_vtag,
625 .maxlen = sizeof(int),
626 .mode = 0644,
627 .proc_handler = edma_change_group4_vtag
628 },
629 {
630 .procname = "default_group5_vlan_tag",
631 .data = &edma_default_group5_vtag,
632 .maxlen = sizeof(int),
633 .mode = 0644,
634 .proc_handler = edma_change_group5_vtag
635 },
636 {
637 .procname = "edma_rss_idt_value",
638 .data = &edma_rss_idt_val,
639 .maxlen = sizeof(int),
640 .mode = 0644,
641 .proc_handler = edma_set_rss_idt_value
642 },
643 {
644 .procname = "edma_rss_idt_idx",
645 .data = &edma_rss_idt_idx,
646 .maxlen = sizeof(int),
647 .mode = 0644,
648 .proc_handler = edma_set_rss_idt_idx
649 },
650 {}
651 };
652
653 static int ess_parse(struct edma_common_info *edma)
654 {
655 struct device_node *switch_node;
656 int ret = -EINVAL;
657
658 switch_node = of_find_node_by_name(NULL, "ess-switch");
659 if (!switch_node) {
660 pr_err("cannot find ess-switch node\n");
661 goto out;
662 }
663
664 edma->ess_hw_addr = of_io_request_and_map(switch_node,
665 0, KBUILD_MODNAME);
666 if (!edma->ess_hw_addr) {
667 pr_err("%s ioremap fail.", __func__);
668 goto out;
669 }
670
671 edma->ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
672 ret = clk_prepare_enable(edma->ess_clk);
673 out:
674 of_node_put(switch_node);
675 return ret;
676 }
677
678 /* edma_axi_netdev_ops
679 * Describe the operations supported by registered netdevices
680 *
681 * static const struct net_device_ops edma_axi_netdev_ops = {
682 * .ndo_open = edma_open,
683 * .ndo_stop = edma_close,
684 * .ndo_start_xmit = edma_xmit_frame,
685 * .ndo_set_mac_address = edma_set_mac_addr,
686 * }
687 */
688 static const struct net_device_ops edma_axi_netdev_ops = {
689 .ndo_open = edma_open,
690 .ndo_stop = edma_close,
691 .ndo_start_xmit = edma_xmit,
692 .ndo_set_mac_address = edma_set_mac_addr,
693 #ifdef CONFIG_RFS_ACCEL
694 .ndo_rx_flow_steer = edma_rx_flow_steer,
695 .ndo_register_rfs_filter = edma_register_rfs_filter,
696 .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
697 #endif
698 .ndo_get_stats = edma_get_stats,
699 };
700
701 /* edma_axi_probe()
702 * Initialise an adapter identified by a platform_device structure.
703 *
704 * The OS initialization, configuring of the adapter private structure,
705 * and a hardware reset occur in the probe.
706 */
707 static int edma_axi_probe(struct platform_device *pdev)
708 {
709 struct edma_common_info *edma_cinfo;
710 struct edma_hw *hw;
711 struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
712 struct resource *res;
713 struct device_node *np = pdev->dev.of_node;
714 struct device_node *pnp;
715 struct device_node *mdio_node = NULL;
716 struct platform_device *mdio_plat = NULL;
717 struct mii_bus *miibus = NULL;
718 struct edma_mdio_data *mdio_data = NULL;
719 int i, j, k, err = 0;
720 int portid_bmp;
721 int idx = 0, idx_mac = 0;
722
723 if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
724 dev_err(&pdev->dev, "Invalid CPU Cores\n");
725 return -EINVAL;
726 }
727
728 if ((num_rxq != 4) && (num_rxq != 8)) {
729 dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
730 return -EINVAL;
731 }
732 edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
733 if (!edma_cinfo) {
734 err = -ENOMEM;
735 goto err_alloc;
736 }
737
738 edma_cinfo->pdev = pdev;
739
740 of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac);
741 if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
742 pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
743 err = -EINVAL;
744 goto err_cinfo;
745 }
746
747 /* Initialize the netdev array before allocation
748 * to avoid double free
749 */
750 for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
751 edma_netdev[i] = NULL;
752
753 for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
754 edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
755 EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
756
757 if (!edma_netdev[i]) {
758 dev_err(&pdev->dev,
759 "net device alloc fails for index=%d\n", i);
760 err = -ENODEV;
761 goto err_ioremap;
762 }
763
764 SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
765 platform_set_drvdata(pdev, edma_netdev[i]);
766 edma_cinfo->netdev[i] = edma_netdev[i];
767 }
768
769 /* Fill ring details */
770 edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
771 edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
772 edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
773
774 /* Update num rx queues based on module parameter */
775 edma_cinfo->num_rx_queues = num_rxq;
776 edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
777
778 edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
779
780 hw = &edma_cinfo->hw;
781
782 /* Fill HW defaults */
783 hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
784 hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
785
786 of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
787 of_property_read_u32(np, "qcom,rx_head_buf_size",
788 &hw->rx_head_buff_size);
789
790 if (overwrite_mode) {
791 dev_info(&pdev->dev, "page mode overwritten");
792 edma_cinfo->page_mode = page_mode;
793 }
794
795 if (jumbo_mru)
796 edma_cinfo->fraglist_mode = 1;
797
798 if (edma_cinfo->page_mode)
799 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
800 else if (edma_cinfo->fraglist_mode)
801 hw->rx_head_buff_size = jumbo_mru;
802 else if (!hw->rx_head_buff_size)
803 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
804
805 hw->misc_intr_mask = 0;
806 hw->wol_intr_mask = 0;
807
808 hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
809 hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
810
811 /* configure RSS type to the different protocol that can be
812 * supported
813 */
814 hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
815 EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
816 EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
817
818 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
819
820 edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
821 if (IS_ERR(edma_cinfo->hw.hw_addr)) {
822 err = PTR_ERR(edma_cinfo->hw.hw_addr);
823 goto err_ioremap;
824 }
825
826 edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
827
828 /* Parse tx queue interrupt number from device tree */
829 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
830 edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
831
832 /* Parse rx queue interrupt number from device tree
833 * Here we are setting j to point to the point where we
834 * left tx interrupt parsing(i.e 16) and run run the loop
835 * from 0 to 7 to parse rx interrupt number.
836 */
837 for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
838 i < edma_cinfo->num_rx_queues; i++) {
839 edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
840 k += ((num_rxq == 4) ? 2 : 1);
841 j += ((num_rxq == 4) ? 2 : 1);
842 }
843
844 edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
845 edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
846
847 err = edma_alloc_queues_tx(edma_cinfo);
848 if (err) {
849 dev_err(&pdev->dev, "Allocation of TX queue failed\n");
850 goto err_tx_qinit;
851 }
852
853 err = edma_alloc_queues_rx(edma_cinfo);
854 if (err) {
855 dev_err(&pdev->dev, "Allocation of RX queue failed\n");
856 goto err_rx_qinit;
857 }
858
859 err = edma_alloc_tx_rings(edma_cinfo);
860 if (err) {
861 dev_err(&pdev->dev, "Allocation of TX resources failed\n");
862 goto err_tx_rinit;
863 }
864
865 err = edma_alloc_rx_rings(edma_cinfo);
866 if (err) {
867 dev_err(&pdev->dev, "Allocation of RX resources failed\n");
868 goto err_rx_rinit;
869 }
870
871 /* Initialize netdev and netdev bitmap for transmit descriptor rings */
872 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
873 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i];
874 int j;
875
876 etdr->netdev_bmp = 0;
877 for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
878 etdr->netdev[j] = NULL;
879 etdr->nq[j] = NULL;
880 }
881 }
882
883 if (of_property_read_bool(np, "qcom,mdio_supported")) {
884 mdio_node = of_find_compatible_node(NULL, NULL,
885 "qcom,ipq4019-mdio");
886 if (!mdio_node) {
887 dev_err(&pdev->dev, "cannot find mdio node by phandle");
888 err = -EIO;
889 goto err_mdiobus_init_fail;
890 }
891
892 mdio_plat = of_find_device_by_node(mdio_node);
893 if (!mdio_plat) {
894 dev_err(&pdev->dev,
895 "cannot find platform device from mdio node");
896 of_node_put(mdio_node);
897 err = -EIO;
898 goto err_mdiobus_init_fail;
899 }
900
901 mdio_data = dev_get_drvdata(&mdio_plat->dev);
902 if (!mdio_data) {
903 dev_err(&pdev->dev,
904 "cannot get mii bus reference from device data");
905 of_node_put(mdio_node);
906 err = -EIO;
907 goto err_mdiobus_init_fail;
908 }
909
910 miibus = mdio_data->mii_bus;
911 }
912
913 if (of_property_read_bool(np, "qcom,single-phy") &&
914 edma_cinfo->num_gmac == 1) {
915 err = ess_parse(edma_cinfo);
916 if (!err)
917 err = ess_reset(edma_cinfo);
918 if (err)
919 goto err_single_phy_init;
920 else
921 edma_cinfo->is_single_phy = true;
922 }
923
924 for_each_available_child_of_node(np, pnp) {
925 const char *mac_addr;
926
927 /* this check is needed if parent and daughter dts have
928 * different number of gmac nodes
929 */
930 if (idx_mac == edma_cinfo->num_gmac) {
931 of_node_put(np);
932 break;
933 }
934
935 mac_addr = of_get_mac_address(pnp);
936 if (mac_addr)
937 memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN);
938
939 idx_mac++;
940 }
941
942 /* Populate the adapter structure register the netdevice */
943 for (i = 0; i < edma_cinfo->num_gmac; i++) {
944 int k, m;
945
946 adapter[i] = netdev_priv(edma_netdev[i]);
947 adapter[i]->netdev = edma_netdev[i];
948 adapter[i]->pdev = pdev;
949 for (j = 0; j < CONFIG_NR_CPUS; j++) {
950 m = i % 2;
951 adapter[i]->tx_start_offset[j] =
952 ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
953 /* Share the queues with available net-devices.
954 * For instance , with 5 net-devices
955 * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
956 * and eth1/eth3 will get the remaining.
957 */
958 for (k = adapter[i]->tx_start_offset[j]; k <
959 (adapter[i]->tx_start_offset[j] + 2); k++) {
960 if (edma_fill_netdev(edma_cinfo, k, i, j)) {
961 pr_err("Netdev overflow Error\n");
962 goto err_register;
963 }
964 }
965 }
966
967 adapter[i]->edma_cinfo = edma_cinfo;
968 edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
969 edma_netdev[i]->max_mtu = 9000;
970 edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
971 | NETIF_F_HW_VLAN_CTAG_TX
972 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
973 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GRO;
974 edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
975 NETIF_F_HW_VLAN_CTAG_RX
976 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
977 NETIF_F_GRO;
978 edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
979 NETIF_F_TSO | NETIF_F_TSO6 |
980 NETIF_F_GRO;
981 edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
982 NETIF_F_TSO | NETIF_F_TSO6 |
983 NETIF_F_GRO;
984
985 #ifdef CONFIG_RFS_ACCEL
986 edma_netdev[i]->features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
987 edma_netdev[i]->hw_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
988 edma_netdev[i]->vlan_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
989 edma_netdev[i]->wanted_features |= NETIF_F_RXHASH | NETIF_F_NTUPLE;
990 #endif
991 edma_set_ethtool_ops(edma_netdev[i]);
992
993 /* This just fill in some default MAC address
994 */
995 if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
996 random_ether_addr(edma_netdev[i]->dev_addr);
997 pr_info("EDMA using MAC@ - using");
998 pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
999 *(edma_netdev[i]->dev_addr),
1000 *(edma_netdev[i]->dev_addr + 1),
1001 *(edma_netdev[i]->dev_addr + 2),
1002 *(edma_netdev[i]->dev_addr + 3),
1003 *(edma_netdev[i]->dev_addr + 4),
1004 *(edma_netdev[i]->dev_addr + 5));
1005 }
1006
1007 err = register_netdev(edma_netdev[i]);
1008 if (err)
1009 goto err_register;
1010
1011 /* carrier off reporting is important to
1012 * ethtool even BEFORE open
1013 */
1014 netif_carrier_off(edma_netdev[i]);
1015
1016 /* Allocate reverse irq cpu mapping structure for
1017 * receive queues
1018 */
1019 #ifdef CONFIG_RFS_ACCEL
1020 edma_netdev[i]->rx_cpu_rmap =
1021 alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
1022 if (!edma_netdev[i]->rx_cpu_rmap) {
1023 err = -ENOMEM;
1024 goto err_rmap_alloc_fail;
1025 }
1026 #endif
1027 }
1028
1029 for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
1030 edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
1031
1032 for_each_available_child_of_node(np, pnp) {
1033 const uint32_t *vlan_tag = NULL;
1034 int len;
1035
1036 /* this check is needed if parent and daughter dts have
1037 * different number of gmac nodes
1038 */
1039 if (idx == edma_cinfo->num_gmac)
1040 break;
1041
1042 /* Populate port-id to netdev lookup table */
1043 vlan_tag = of_get_property(pnp, "vlan_tag", &len);
1044 if (!vlan_tag) {
1045 pr_err("Vlan tag parsing Failed.\n");
1046 goto err_rmap_alloc_fail;
1047 }
1048
1049 adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
1050 vlan_tag++;
1051 portid_bmp = of_read_number(vlan_tag, 1);
1052 adapter[idx]->dp_bitmap = portid_bmp;
1053
1054 portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
1055 while (portid_bmp) {
1056 int port_bit = ffs(portid_bmp);
1057
1058 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
1059 goto err_rmap_alloc_fail;
1060 edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
1061 edma_netdev[idx];
1062 portid_bmp &= ~(1 << (port_bit - 1));
1063 }
1064
1065 if (!of_property_read_u32(pnp, "qcom,poll_required",
1066 &adapter[idx]->poll_required)) {
1067 if (adapter[idx]->poll_required) {
1068 of_property_read_u32(pnp, "qcom,phy_mdio_addr",
1069 &adapter[idx]->phy_mdio_addr);
1070 of_property_read_u32(pnp, "qcom,forced_speed",
1071 &adapter[idx]->forced_speed);
1072 of_property_read_u32(pnp, "qcom,forced_duplex",
1073 &adapter[idx]->forced_duplex);
1074
1075 /* create a phyid using MDIO bus id
1076 * and MDIO bus address
1077 */
1078 snprintf(adapter[idx]->phy_id,
1079 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1080 miibus->id,
1081 adapter[idx]->phy_mdio_addr);
1082 }
1083 } else {
1084 adapter[idx]->poll_required = 0;
1085 adapter[idx]->forced_speed = SPEED_1000;
1086 adapter[idx]->forced_duplex = DUPLEX_FULL;
1087 }
1088
1089 idx++;
1090 }
1091
1092 edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
1093 "net/edma",
1094 edma_table);
1095 if (!edma_cinfo->edma_ctl_table_hdr) {
1096 dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
1097 goto err_unregister_sysctl_tbl;
1098 }
1099
1100 /* Disable all 16 Tx and 8 rx irqs */
1101 edma_irq_disable(edma_cinfo);
1102
1103 err = edma_reset(edma_cinfo);
1104 if (err) {
1105 err = -EIO;
1106 goto err_reset;
1107 }
1108
1109 /* populate per_core_info, do a napi_Add, request 16 TX irqs,
1110 * 8 RX irqs, do a napi enable
1111 */
1112 for (i = 0; i < CONFIG_NR_CPUS; i++) {
1113 u8 rx_start;
1114
1115 edma_cinfo->edma_percpu_info[i].napi.state = 0;
1116
1117 netif_napi_add(edma_netdev[0],
1118 &edma_cinfo->edma_percpu_info[i].napi,
1119 edma_poll, 64);
1120 napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
1121 edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
1122 edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
1123 << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
1124 edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
1125 edma_cinfo->edma_percpu_info[i].rx_start =
1126 i << EDMA_RX_CPU_START_SHIFT;
1127 rx_start = i << EDMA_RX_CPU_START_SHIFT;
1128 edma_cinfo->edma_percpu_info[i].tx_status = 0;
1129 edma_cinfo->edma_percpu_info[i].rx_status = 0;
1130 edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
1131
1132 /* Request irq per core */
1133 for (j = edma_cinfo->edma_percpu_info[i].tx_start;
1134 j < tx_start[i] + 4; j++) {
1135 sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j);
1136 err = request_irq(edma_cinfo->tx_irq[j],
1137 edma_interrupt,
1138 0,
1139 &edma_tx_irq[j][0],
1140 &edma_cinfo->edma_percpu_info[i]);
1141 if (err)
1142 goto err_reset;
1143 }
1144
1145 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1146 j < (rx_start +
1147 ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
1148 j++) {
1149 sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j);
1150 err = request_irq(edma_cinfo->rx_irq[j],
1151 edma_interrupt,
1152 0,
1153 &edma_rx_irq[j][0],
1154 &edma_cinfo->edma_percpu_info[i]);
1155 if (err)
1156 goto err_reset;
1157 }
1158
1159 #ifdef CONFIG_RFS_ACCEL
1160 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1161 j < rx_start + 2; j += 2) {
1162 err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
1163 edma_cinfo->rx_irq[j]);
1164 if (err)
1165 goto err_rmap_add_fail;
1166 }
1167 #endif
1168 }
1169
1170 /* Used to clear interrupt status, allocate rx buffer,
1171 * configure edma descriptors registers
1172 */
1173 err = edma_configure(edma_cinfo);
1174 if (err) {
1175 err = -EIO;
1176 goto err_configure;
1177 }
1178
1179 /* Configure RSS indirection table.
1180 * 128 hash will be configured in the following
1181 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1182 * and so on
1183 */
1184 for (i = 0; i < EDMA_NUM_IDT; i++)
1185 edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
1186
1187 /* Configure load balance mapping table.
1188 * 4 table entry will be configured according to the
1189 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1190 * respectively.
1191 */
1192 edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
1193
1194 /* Configure Virtual queue for Tx rings
1195 * User can also change this value runtime through
1196 * a sysctl
1197 */
1198 edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
1199 edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
1200
1201 /* Configure Max AXI Burst write size to 128 bytes*/
1202 edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
1203 EDMA_AXIW_MAXWRSIZE_VALUE);
1204
1205 /* Enable All 16 tx and 8 rx irq mask */
1206 edma_irq_enable(edma_cinfo);
1207 edma_enable_tx_ctrl(&edma_cinfo->hw);
1208 edma_enable_rx_ctrl(&edma_cinfo->hw);
1209
1210 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1211 if (adapter[i]->poll_required) {
1212 int phy_mode = of_get_phy_mode(np);
1213
1214 if (phy_mode < 0)
1215 phy_mode = PHY_INTERFACE_MODE_SGMII;
1216 adapter[i]->phydev =
1217 phy_connect(edma_netdev[i],
1218 (const char *)adapter[i]->phy_id,
1219 &edma_adjust_link,
1220 phy_mode);
1221 if (IS_ERR(adapter[i]->phydev)) {
1222 dev_dbg(&pdev->dev, "PHY attach FAIL");
1223 err = -EIO;
1224 goto edma_phy_attach_fail;
1225 } else {
1226 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1227 adapter[i]->phydev->advertising);
1228 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1229 adapter[i]->phydev->advertising);
1230 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1231 adapter[i]->phydev->supported);
1232 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1233 adapter[i]->phydev->supported);
1234 }
1235 } else {
1236 adapter[i]->phydev = NULL;
1237 }
1238 }
1239
1240 spin_lock_init(&edma_cinfo->stats_lock);
1241
1242 timer_setup(&edma_cinfo->edma_stats_timer, edma_statistics_timer, 0);
1243 mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
1244
1245 return 0;
1246
1247 edma_phy_attach_fail:
1248 miibus = NULL;
1249 err_configure:
1250 #ifdef CONFIG_RFS_ACCEL
1251 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1252 free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
1253 adapter[i]->netdev->rx_cpu_rmap = NULL;
1254 }
1255 #endif
1256 err_rmap_add_fail:
1257 edma_free_irqs(adapter[0]);
1258 for (i = 0; i < CONFIG_NR_CPUS; i++)
1259 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1260 err_reset:
1261 err_unregister_sysctl_tbl:
1262 err_rmap_alloc_fail:
1263 for (i = 0; i < edma_cinfo->num_gmac; i++)
1264 unregister_netdev(edma_netdev[i]);
1265 err_register:
1266 err_single_phy_init:
1267 iounmap(edma_cinfo->ess_hw_addr);
1268 clk_disable_unprepare(edma_cinfo->ess_clk);
1269 err_mdiobus_init_fail:
1270 edma_free_rx_rings(edma_cinfo);
1271 err_rx_rinit:
1272 edma_free_tx_rings(edma_cinfo);
1273 err_tx_rinit:
1274 edma_free_queues(edma_cinfo);
1275 err_rx_qinit:
1276 err_tx_qinit:
1277 iounmap(edma_cinfo->hw.hw_addr);
1278 err_ioremap:
1279 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1280 if (edma_netdev[i])
1281 free_netdev(edma_netdev[i]);
1282 }
1283 err_cinfo:
1284 kfree(edma_cinfo);
1285 err_alloc:
1286 return err;
1287 }
1288
1289 /* edma_axi_remove()
1290 * Device Removal Routine
1291 *
1292 * edma_axi_remove is called by the platform subsystem to alert the driver
1293 * that it should release a platform device.
1294 */
1295 static int edma_axi_remove(struct platform_device *pdev)
1296 {
1297 struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
1298 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1299 struct edma_hw *hw = &edma_cinfo->hw;
1300 int i;
1301
1302 for (i = 0; i < edma_cinfo->num_gmac; i++)
1303 unregister_netdev(edma_netdev[i]);
1304
1305 edma_stop_rx_tx(hw);
1306 for (i = 0; i < CONFIG_NR_CPUS; i++)
1307 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1308
1309 edma_irq_disable(edma_cinfo);
1310 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1311 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1312 #ifdef CONFIG_RFS_ACCEL
1313 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1314 free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap);
1315 edma_netdev[i]->rx_cpu_rmap = NULL;
1316 }
1317 #endif
1318
1319 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1320 struct edma_adapter *adapter = netdev_priv(edma_netdev[i]);
1321
1322 if (adapter->phydev)
1323 phy_disconnect(adapter->phydev);
1324 }
1325
1326 del_timer_sync(&edma_cinfo->edma_stats_timer);
1327 edma_free_irqs(adapter);
1328 unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
1329 iounmap(edma_cinfo->ess_hw_addr);
1330 clk_disable_unprepare(edma_cinfo->ess_clk);
1331 edma_free_tx_resources(edma_cinfo);
1332 edma_free_rx_resources(edma_cinfo);
1333 edma_free_tx_rings(edma_cinfo);
1334 edma_free_rx_rings(edma_cinfo);
1335 edma_free_queues(edma_cinfo);
1336 for (i = 0; i < edma_cinfo->num_gmac; i++)
1337 free_netdev(edma_netdev[i]);
1338
1339 kfree(edma_cinfo);
1340
1341 return 0;
1342 }
1343
1344 static const struct of_device_id edma_of_mtable[] = {
1345 {.compatible = "qcom,ess-edma" },
1346 {}
1347 };
1348 MODULE_DEVICE_TABLE(of, edma_of_mtable);
1349
1350 static struct platform_driver edma_axi_driver = {
1351 .driver = {
1352 .name = edma_axi_driver_name,
1353 .of_match_table = edma_of_mtable,
1354 },
1355 .probe = edma_axi_probe,
1356 .remove = edma_axi_remove,
1357 };
1358
1359 module_platform_driver(edma_axi_driver);
1360
1361 MODULE_AUTHOR("Qualcomm Atheros Inc");
1362 MODULE_DESCRIPTION("QCA ESS EDMA driver");
1363 MODULE_LICENSE("GPL");