treewide: backport support for nvmem on non platform devices
[openwrt/staging/wigyori.git] / target / linux / ipq40xx / files / drivers / net / ethernet / qualcomm / essedma / edma_axi.c
1 /*
2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 */
15
16 #include <linux/cpu_rmap.h>
17 #include <linux/of.h>
18 #include <linux/of_net.h>
19 #include <linux/timer.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_address.h>
22 #include <linux/of_mdio.h>
23 #include <linux/clk.h>
24 #include <linux/string.h>
25 #include <linux/reset.h>
26 #include "edma.h"
27 #include "ess_edma.h"
28
29 /* Weight round robin and virtual QID mask */
30 #define EDMA_WRR_VID_SCTL_MASK 0xffff
31
32 /* Weight round robin and virtual QID shift */
33 #define EDMA_WRR_VID_SCTL_SHIFT 16
34
35 char edma_axi_driver_name[] = "ess_edma";
36 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
37 NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP;
38
39 static u32 edma_hw_addr;
40
41 char edma_tx_irq[16][64];
42 char edma_rx_irq[8][64];
43 struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED];
44 static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1,
45 EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3};
46 static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1,
47 EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3};
48
49 static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN;
50 static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN;
51 static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN;
52 static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN;
53 static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN;
54 static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN;
55 static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN;
56 static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE;
57 static u32 edma_rss_idt_idx;
58
59 static int edma_weight_assigned_to_q __read_mostly;
60 static int edma_queue_to_virtual_q __read_mostly;
61 static bool edma_enable_rstp __read_mostly;
62 static int edma_athr_hdr_eth_type __read_mostly;
63
64 static int page_mode;
65 module_param(page_mode, int, 0);
66 MODULE_PARM_DESC(page_mode, "enable page mode");
67
68 static int overwrite_mode;
69 module_param(overwrite_mode, int, 0);
70 MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting");
71
72 static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE;
73 module_param(jumbo_mru, int, 0);
74 MODULE_PARM_DESC(jumbo_mru, "enable fraglist support");
75
76 static int num_rxq = 4;
77 module_param(num_rxq, int, 0);
78 MODULE_PARM_DESC(num_rxq, "change the number of rx queues");
79
80 void edma_write_reg(u16 reg_addr, u32 reg_value)
81 {
82 writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr)));
83 }
84
85 void edma_read_reg(u16 reg_addr, volatile u32 *reg_value)
86 {
87 *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr));
88 }
89
90 static void ess_write_reg(struct edma_common_info *edma, u16 reg_addr, u32 reg_value)
91 {
92 writel(reg_value, ((void __iomem *)
93 ((unsigned long)edma->ess_hw_addr + reg_addr)));
94 }
95
96 static void ess_read_reg(struct edma_common_info *edma, u16 reg_addr,
97 volatile u32 *reg_value)
98 {
99 *reg_value = readl((void __iomem *)
100 ((unsigned long)edma->ess_hw_addr + reg_addr));
101 }
102
103 static int ess_reset(struct edma_common_info *edma)
104 {
105 struct device_node *switch_node = NULL;
106 struct reset_control *ess_rst;
107 u32 regval;
108
109 switch_node = of_find_node_by_name(NULL, "ess-switch");
110 if (!switch_node) {
111 pr_err("switch-node not found\n");
112 return -EINVAL;
113 }
114
115 ess_rst = of_reset_control_get(switch_node, "ess_rst");
116 of_node_put(switch_node);
117
118 if (IS_ERR(ess_rst)) {
119 pr_err("failed to find ess_rst!\n");
120 return -ENOENT;
121 }
122
123 reset_control_assert(ess_rst);
124 msleep(10);
125 reset_control_deassert(ess_rst);
126 msleep(100);
127 reset_control_put(ess_rst);
128
129 /* Enable only port 5 <--> port 0
130 * bits 0:6 bitmap of ports it can fwd to */
131 #define SET_PORT_BMP(r,v) \
132 ess_read_reg(edma, r, &regval); \
133 ess_write_reg(edma, r, ((regval & ~0x3F) | v));
134
135 SET_PORT_BMP(ESS_PORT0_LOOKUP_CTRL,0x20);
136 SET_PORT_BMP(ESS_PORT1_LOOKUP_CTRL,0x00);
137 SET_PORT_BMP(ESS_PORT2_LOOKUP_CTRL,0x00);
138 SET_PORT_BMP(ESS_PORT3_LOOKUP_CTRL,0x00);
139 SET_PORT_BMP(ESS_PORT4_LOOKUP_CTRL,0x00);
140 SET_PORT_BMP(ESS_PORT5_LOOKUP_CTRL,0x01);
141 ess_write_reg(edma, ESS_RGMII_CTRL, 0x400);
142 ess_write_reg(edma, ESS_PORT0_STATUS, ESS_PORT_1G_FDX);
143 ess_write_reg(edma, ESS_PORT5_STATUS, ESS_PORT_1G_FDX);
144 ess_write_reg(edma, ESS_PORT0_HEADER_CTRL, 0);
145 #undef SET_PORT_BMP
146
147 /* forward multicast and broadcast frames to CPU */
148 ess_write_reg(edma, ESS_FWD_CTRL1,
149 (ESS_PORTS_ALL << ESS_FWD_CTRL1_UC_FLOOD_S) |
150 (ESS_PORTS_ALL << ESS_FWD_CTRL1_MC_FLOOD_S) |
151 (ESS_PORTS_ALL << ESS_FWD_CTRL1_BC_FLOOD_S));
152
153 return 0;
154 }
155
156 void ess_set_port_status_speed(struct edma_common_info *edma,
157 struct phy_device *phydev, uint8_t port_id)
158 {
159 uint16_t reg_off = ESS_PORT0_STATUS + (4 * port_id);
160 uint32_t reg_val = 0;
161
162 ess_read_reg(edma, reg_off, &reg_val);
163
164 /* reset the speed bits [0:1] */
165 reg_val &= ~ESS_PORT_STATUS_SPEED_INV;
166
167 /* set the new speed */
168 switch(phydev->speed) {
169 case SPEED_1000: reg_val |= ESS_PORT_STATUS_SPEED_1000; break;
170 case SPEED_100: reg_val |= ESS_PORT_STATUS_SPEED_100; break;
171 case SPEED_10: reg_val |= ESS_PORT_STATUS_SPEED_10; break;
172 default: reg_val |= ESS_PORT_STATUS_SPEED_INV; break;
173 }
174
175 /* check full/half duplex */
176 if (phydev->duplex) {
177 reg_val |= ESS_PORT_STATUS_DUPLEX_MODE;
178 } else {
179 reg_val &= ~ESS_PORT_STATUS_DUPLEX_MODE;
180 }
181
182 ess_write_reg(edma, reg_off, reg_val);
183 }
184
185 /* edma_change_tx_coalesce()
186 * change tx interrupt moderation timer
187 */
188 void edma_change_tx_coalesce(int usecs)
189 {
190 u32 reg_value;
191
192 /* Here, we right shift the value from the user by 1, this is
193 * done because IMT resolution timer is 2usecs. 1 count
194 * of this register corresponds to 2 usecs.
195 */
196 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
197 reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16));
198 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
199 }
200
201 /* edma_change_rx_coalesce()
202 * change rx interrupt moderation timer
203 */
204 void edma_change_rx_coalesce(int usecs)
205 {
206 u32 reg_value;
207
208 /* Here, we right shift the value from the user by 1, this is
209 * done because IMT resolution timer is 2usecs. 1 count
210 * of this register corresponds to 2 usecs.
211 */
212 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, &reg_value);
213 reg_value = ((reg_value & 0xffff0000) | (usecs >> 1));
214 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value);
215 }
216
217 /* edma_get_tx_rx_coalesce()
218 * Get tx/rx interrupt moderation value
219 */
220 void edma_get_tx_rx_coalesce(u32 *reg_val)
221 {
222 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val);
223 }
224
225 void edma_read_append_stats(struct edma_common_info *edma_cinfo)
226 {
227 uint32_t *p;
228 int i;
229 u32 stat;
230
231 spin_lock_bh(&edma_cinfo->stats_lock);
232 p = (uint32_t *)&(edma_cinfo->edma_ethstats);
233
234 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
235 edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat);
236 *p += stat;
237 p++;
238 }
239
240 for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) {
241 edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat);
242 *p += stat;
243 p++;
244 }
245
246 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
247 edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat);
248 *p += stat;
249 p++;
250 }
251
252 for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) {
253 edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat);
254 *p += stat;
255 p++;
256 }
257
258 spin_unlock_bh(&edma_cinfo->stats_lock);
259 }
260
261 static void edma_statistics_timer(struct timer_list *t)
262 {
263 struct edma_common_info *edma_cinfo =
264 from_timer(edma_cinfo, t, edma_stats_timer);
265
266 edma_read_append_stats(edma_cinfo);
267
268 mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
269 }
270
271 static int edma_enable_stp_rstp(struct ctl_table *table, int write,
272 void __user *buffer, size_t *lenp,
273 loff_t *ppos)
274 {
275 int ret;
276
277 ret = proc_dointvec(table, write, buffer, lenp, ppos);
278 if (write)
279 edma_set_stp_rstp(edma_enable_rstp);
280
281 return ret;
282 }
283
284 static int edma_ath_hdr_eth_type(struct ctl_table *table, int write,
285 void __user *buffer, size_t *lenp,
286 loff_t *ppos)
287 {
288 int ret;
289
290 ret = proc_dointvec(table, write, buffer, lenp, ppos);
291 if (write)
292 edma_assign_ath_hdr_type(edma_athr_hdr_eth_type);
293
294 return ret;
295 }
296
297 static int edma_change_default_lan_vlan(struct ctl_table *table, int write,
298 void __user *buffer, size_t *lenp,
299 loff_t *ppos)
300 {
301 struct edma_adapter *adapter;
302 int ret;
303
304 if (!edma_netdev[1]) {
305 pr_err("Netdevice for default_lan does not exist\n");
306 return -1;
307 }
308
309 adapter = netdev_priv(edma_netdev[1]);
310
311 ret = proc_dointvec(table, write, buffer, lenp, ppos);
312
313 if (write)
314 adapter->default_vlan_tag = edma_default_ltag;
315
316 return ret;
317 }
318
319 static int edma_change_default_wan_vlan(struct ctl_table *table, int write,
320 void __user *buffer, size_t *lenp,
321 loff_t *ppos)
322 {
323 struct edma_adapter *adapter;
324 int ret;
325
326 if (!edma_netdev[0]) {
327 pr_err("Netdevice for default_wan does not exist\n");
328 return -1;
329 }
330
331 adapter = netdev_priv(edma_netdev[0]);
332
333 ret = proc_dointvec(table, write, buffer, lenp, ppos);
334
335 if (write)
336 adapter->default_vlan_tag = edma_default_wtag;
337
338 return ret;
339 }
340
341 static int edma_change_group1_vtag(struct ctl_table *table, int write,
342 void __user *buffer, size_t *lenp,
343 loff_t *ppos)
344 {
345 struct edma_adapter *adapter;
346 struct edma_common_info *edma_cinfo;
347 int ret;
348
349 if (!edma_netdev[0]) {
350 pr_err("Netdevice for Group 1 does not exist\n");
351 return -1;
352 }
353
354 adapter = netdev_priv(edma_netdev[0]);
355 edma_cinfo = adapter->edma_cinfo;
356
357 ret = proc_dointvec(table, write, buffer, lenp, ppos);
358
359 if (write)
360 adapter->default_vlan_tag = edma_default_group1_vtag;
361
362 return ret;
363 }
364
365 static int edma_change_group2_vtag(struct ctl_table *table, int write,
366 void __user *buffer, size_t *lenp,
367 loff_t *ppos)
368 {
369 struct edma_adapter *adapter;
370 struct edma_common_info *edma_cinfo;
371 int ret;
372
373 if (!edma_netdev[1]) {
374 pr_err("Netdevice for Group 2 does not exist\n");
375 return -1;
376 }
377
378 adapter = netdev_priv(edma_netdev[1]);
379 edma_cinfo = adapter->edma_cinfo;
380
381 ret = proc_dointvec(table, write, buffer, lenp, ppos);
382
383 if (write)
384 adapter->default_vlan_tag = edma_default_group2_vtag;
385
386 return ret;
387 }
388
389 static int edma_change_group3_vtag(struct ctl_table *table, int write,
390 void __user *buffer, size_t *lenp,
391 loff_t *ppos)
392 {
393 struct edma_adapter *adapter;
394 struct edma_common_info *edma_cinfo;
395 int ret;
396
397 if (!edma_netdev[2]) {
398 pr_err("Netdevice for Group 3 does not exist\n");
399 return -1;
400 }
401
402 adapter = netdev_priv(edma_netdev[2]);
403 edma_cinfo = adapter->edma_cinfo;
404
405 ret = proc_dointvec(table, write, buffer, lenp, ppos);
406
407 if (write)
408 adapter->default_vlan_tag = edma_default_group3_vtag;
409
410 return ret;
411 }
412
413 static int edma_change_group4_vtag(struct ctl_table *table, int write,
414 void __user *buffer, size_t *lenp,
415 loff_t *ppos)
416 {
417 struct edma_adapter *adapter;
418 struct edma_common_info *edma_cinfo;
419 int ret;
420
421 if (!edma_netdev[3]) {
422 pr_err("Netdevice for Group 4 does not exist\n");
423 return -1;
424 }
425
426 adapter = netdev_priv(edma_netdev[3]);
427 edma_cinfo = adapter->edma_cinfo;
428
429 ret = proc_dointvec(table, write, buffer, lenp, ppos);
430
431 if (write)
432 adapter->default_vlan_tag = edma_default_group4_vtag;
433
434 return ret;
435 }
436
437 static int edma_change_group5_vtag(struct ctl_table *table, int write,
438 void __user *buffer, size_t *lenp,
439 loff_t *ppos)
440 {
441 struct edma_adapter *adapter;
442 struct edma_common_info *edma_cinfo;
443 int ret;
444
445 if (!edma_netdev[4]) {
446 pr_err("Netdevice for Group 5 does not exist\n");
447 return -1;
448 }
449
450 adapter = netdev_priv(edma_netdev[4]);
451 edma_cinfo = adapter->edma_cinfo;
452
453 ret = proc_dointvec(table, write, buffer, lenp, ppos);
454
455 if (write)
456 adapter->default_vlan_tag = edma_default_group5_vtag;
457
458 return ret;
459 }
460
461 static int edma_set_rss_idt_value(struct ctl_table *table, int write,
462 void __user *buffer, size_t *lenp,
463 loff_t *ppos)
464 {
465 int ret;
466
467 ret = proc_dointvec(table, write, buffer, lenp, ppos);
468 if (write && !ret)
469 edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx),
470 edma_rss_idt_val);
471 return ret;
472 }
473
474 static int edma_set_rss_idt_idx(struct ctl_table *table, int write,
475 void __user *buffer, size_t *lenp,
476 loff_t *ppos)
477 {
478 int ret;
479 u32 old_value = edma_rss_idt_idx;
480
481 ret = proc_dointvec(table, write, buffer, lenp, ppos);
482 if (!write || ret)
483 return ret;
484
485 if (edma_rss_idt_idx >= EDMA_NUM_IDT) {
486 pr_err("Invalid RSS indirection table index %d\n",
487 edma_rss_idt_idx);
488 edma_rss_idt_idx = old_value;
489 return -EINVAL;
490 }
491 return ret;
492 }
493
494 static int edma_weight_assigned_to_queues(struct ctl_table *table, int write,
495 void __user *buffer, size_t *lenp,
496 loff_t *ppos)
497 {
498 int ret, queue_id, weight;
499 u32 reg_data, data, reg_addr;
500
501 ret = proc_dointvec(table, write, buffer, lenp, ppos);
502 if (write) {
503 queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK;
504 if (queue_id < 0 || queue_id > 15) {
505 pr_err("queue_id not within desired range\n");
506 return -EINVAL;
507 }
508
509 weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT;
510 if (weight < 0 || weight > 0xF) {
511 pr_err("queue_id not within desired range\n");
512 return -EINVAL;
513 }
514
515 data = weight << EDMA_WRR_SHIFT(queue_id);
516
517 reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3);
518 edma_read_reg(reg_addr, &reg_data);
519 reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id));
520 edma_write_reg(reg_addr, data | reg_data);
521 }
522
523 return ret;
524 }
525
526 static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write,
527 void __user *buffer, size_t *lenp,
528 loff_t *ppos)
529 {
530 int ret, queue_id, virtual_qid;
531 u32 reg_data, data, reg_addr;
532
533 ret = proc_dointvec(table, write, buffer, lenp, ppos);
534 if (write) {
535 queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK;
536 if (queue_id < 0 || queue_id > 15) {
537 pr_err("queue_id not within desired range\n");
538 return -EINVAL;
539 }
540
541 virtual_qid = edma_queue_to_virtual_q >>
542 EDMA_WRR_VID_SCTL_SHIFT;
543 if (virtual_qid < 0 || virtual_qid > 8) {
544 pr_err("queue_id not within desired range\n");
545 return -EINVAL;
546 }
547
548 data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id);
549
550 reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3);
551 edma_read_reg(reg_addr, &reg_data);
552 reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id));
553 edma_write_reg(reg_addr, data | reg_data);
554 }
555
556 return ret;
557 }
558
559 static struct ctl_table edma_table[] = {
560 {
561 .procname = "default_lan_tag",
562 .data = &edma_default_ltag,
563 .maxlen = sizeof(int),
564 .mode = 0644,
565 .proc_handler = edma_change_default_lan_vlan
566 },
567 {
568 .procname = "default_wan_tag",
569 .data = &edma_default_wtag,
570 .maxlen = sizeof(int),
571 .mode = 0644,
572 .proc_handler = edma_change_default_wan_vlan
573 },
574 {
575 .procname = "weight_assigned_to_queues",
576 .data = &edma_weight_assigned_to_q,
577 .maxlen = sizeof(int),
578 .mode = 0644,
579 .proc_handler = edma_weight_assigned_to_queues
580 },
581 {
582 .procname = "queue_to_virtual_queue_map",
583 .data = &edma_queue_to_virtual_q,
584 .maxlen = sizeof(int),
585 .mode = 0644,
586 .proc_handler = edma_queue_to_virtual_queue_map
587 },
588 {
589 .procname = "enable_stp_rstp",
590 .data = &edma_enable_rstp,
591 .maxlen = sizeof(int),
592 .mode = 0644,
593 .proc_handler = edma_enable_stp_rstp
594 },
595 {
596 .procname = "athr_hdr_eth_type",
597 .data = &edma_athr_hdr_eth_type,
598 .maxlen = sizeof(int),
599 .mode = 0644,
600 .proc_handler = edma_ath_hdr_eth_type
601 },
602 {
603 .procname = "default_group1_vlan_tag",
604 .data = &edma_default_group1_vtag,
605 .maxlen = sizeof(int),
606 .mode = 0644,
607 .proc_handler = edma_change_group1_vtag
608 },
609 {
610 .procname = "default_group2_vlan_tag",
611 .data = &edma_default_group2_vtag,
612 .maxlen = sizeof(int),
613 .mode = 0644,
614 .proc_handler = edma_change_group2_vtag
615 },
616 {
617 .procname = "default_group3_vlan_tag",
618 .data = &edma_default_group3_vtag,
619 .maxlen = sizeof(int),
620 .mode = 0644,
621 .proc_handler = edma_change_group3_vtag
622 },
623 {
624 .procname = "default_group4_vlan_tag",
625 .data = &edma_default_group4_vtag,
626 .maxlen = sizeof(int),
627 .mode = 0644,
628 .proc_handler = edma_change_group4_vtag
629 },
630 {
631 .procname = "default_group5_vlan_tag",
632 .data = &edma_default_group5_vtag,
633 .maxlen = sizeof(int),
634 .mode = 0644,
635 .proc_handler = edma_change_group5_vtag
636 },
637 {
638 .procname = "edma_rss_idt_value",
639 .data = &edma_rss_idt_val,
640 .maxlen = sizeof(int),
641 .mode = 0644,
642 .proc_handler = edma_set_rss_idt_value
643 },
644 {
645 .procname = "edma_rss_idt_idx",
646 .data = &edma_rss_idt_idx,
647 .maxlen = sizeof(int),
648 .mode = 0644,
649 .proc_handler = edma_set_rss_idt_idx
650 },
651 {}
652 };
653
654 static int ess_parse(struct edma_common_info *edma)
655 {
656 struct device_node *switch_node;
657 int ret = -EINVAL;
658
659 switch_node = of_find_node_by_name(NULL, "ess-switch");
660 if (!switch_node) {
661 pr_err("cannot find ess-switch node\n");
662 goto out;
663 }
664
665 edma->ess_hw_addr = of_io_request_and_map(switch_node,
666 0, KBUILD_MODNAME);
667 if (!edma->ess_hw_addr) {
668 pr_err("%s ioremap fail.", __func__);
669 goto out;
670 }
671
672 edma->ess_clk = of_clk_get_by_name(switch_node, "ess_clk");
673 ret = clk_prepare_enable(edma->ess_clk);
674 out:
675 of_node_put(switch_node);
676 return ret;
677 }
678
679 /* edma_axi_netdev_ops
680 * Describe the operations supported by registered netdevices
681 *
682 * static const struct net_device_ops edma_axi_netdev_ops = {
683 * .ndo_open = edma_open,
684 * .ndo_stop = edma_close,
685 * .ndo_start_xmit = edma_xmit_frame,
686 * .ndo_set_mac_address = edma_set_mac_addr,
687 * }
688 */
689 static const struct net_device_ops edma_axi_netdev_ops = {
690 .ndo_open = edma_open,
691 .ndo_stop = edma_close,
692 .ndo_start_xmit = edma_xmit,
693 .ndo_set_mac_address = edma_set_mac_addr,
694 #ifdef CONFIG_RFS_ACCEL
695 .ndo_rx_flow_steer = edma_rx_flow_steer,
696 .ndo_register_rfs_filter = edma_register_rfs_filter,
697 .ndo_get_default_vlan_tag = edma_get_default_vlan_tag,
698 #endif
699 .ndo_get_stats = edma_get_stats,
700 };
701
702 /* edma_axi_probe()
703 * Initialise an adapter identified by a platform_device structure.
704 *
705 * The OS initialization, configuring of the adapter private structure,
706 * and a hardware reset occur in the probe.
707 */
708 static int edma_axi_probe(struct platform_device *pdev)
709 {
710 struct edma_common_info *edma_cinfo;
711 struct edma_hw *hw;
712 struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED];
713 struct resource *res;
714 struct device_node *np = pdev->dev.of_node;
715 struct device_node *pnp;
716 struct device_node *mdio_node = NULL;
717 struct mii_bus *miibus = NULL;
718 int i, j, k, err = 0;
719 int portid_bmp;
720 int idx = 0, idx_mac = 0;
721
722 if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) {
723 dev_err(&pdev->dev, "Invalid CPU Cores\n");
724 return -EINVAL;
725 }
726
727 if ((num_rxq != 4) && (num_rxq != 8)) {
728 dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n");
729 return -EINVAL;
730 }
731 edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL);
732 if (!edma_cinfo) {
733 err = -ENOMEM;
734 goto err_alloc;
735 }
736
737 edma_cinfo->pdev = pdev;
738
739 of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac);
740 if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) {
741 pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
742 err = -EINVAL;
743 goto err_cinfo;
744 }
745
746 /* Initialize the netdev array before allocation
747 * to avoid double free
748 */
749 for (i = 0 ; i < edma_cinfo->num_gmac ; i++)
750 edma_netdev[i] = NULL;
751
752 for (i = 0 ; i < edma_cinfo->num_gmac ; i++) {
753 edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter),
754 EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE);
755
756 if (!edma_netdev[i]) {
757 dev_err(&pdev->dev,
758 "net device alloc fails for index=%d\n", i);
759 err = -ENODEV;
760 goto err_ioremap;
761 }
762
763 SET_NETDEV_DEV(edma_netdev[i], &pdev->dev);
764 platform_set_drvdata(pdev, edma_netdev[i]);
765 edma_cinfo->netdev[i] = edma_netdev[i];
766 }
767
768 /* Fill ring details */
769 edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE;
770 edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4);
771 edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE;
772
773 /* Update num rx queues based on module parameter */
774 edma_cinfo->num_rx_queues = num_rxq;
775 edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2);
776
777 edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE;
778
779 hw = &edma_cinfo->hw;
780
781 /* Fill HW defaults */
782 hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK;
783 hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK;
784
785 of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode);
786 of_property_read_u32(np, "qcom,rx_head_buf_size",
787 &hw->rx_head_buff_size);
788
789 if (overwrite_mode) {
790 dev_info(&pdev->dev, "page mode overwritten");
791 edma_cinfo->page_mode = page_mode;
792 }
793
794 if (jumbo_mru)
795 edma_cinfo->fraglist_mode = 1;
796
797 if (edma_cinfo->page_mode)
798 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO;
799 else if (edma_cinfo->fraglist_mode)
800 hw->rx_head_buff_size = jumbo_mru;
801 else if (!hw->rx_head_buff_size)
802 hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE;
803
804 hw->misc_intr_mask = 0;
805 hw->wol_intr_mask = 0;
806
807 hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE;
808 hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE;
809
810 /* configure RSS type to the different protocol that can be
811 * supported
812 */
813 hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP |
814 EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP |
815 EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6;
816
817 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
818
819 edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res);
820 if (IS_ERR(edma_cinfo->hw.hw_addr)) {
821 err = PTR_ERR(edma_cinfo->hw.hw_addr);
822 goto err_ioremap;
823 }
824
825 edma_hw_addr = (u32)edma_cinfo->hw.hw_addr;
826
827 /* Parse tx queue interrupt number from device tree */
828 for (i = 0; i < edma_cinfo->num_tx_queues; i++)
829 edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i);
830
831 /* Parse rx queue interrupt number from device tree
832 * Here we are setting j to point to the point where we
833 * left tx interrupt parsing(i.e 16) and run run the loop
834 * from 0 to 7 to parse rx interrupt number.
835 */
836 for (i = 0, j = edma_cinfo->num_tx_queues, k = 0;
837 i < edma_cinfo->num_rx_queues; i++) {
838 edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j);
839 k += ((num_rxq == 4) ? 2 : 1);
840 j += ((num_rxq == 4) ? 2 : 1);
841 }
842
843 edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size;
844 edma_cinfo->rx_page_buffer_len = PAGE_SIZE;
845
846 err = edma_alloc_queues_tx(edma_cinfo);
847 if (err) {
848 dev_err(&pdev->dev, "Allocation of TX queue failed\n");
849 goto err_tx_qinit;
850 }
851
852 err = edma_alloc_queues_rx(edma_cinfo);
853 if (err) {
854 dev_err(&pdev->dev, "Allocation of RX queue failed\n");
855 goto err_rx_qinit;
856 }
857
858 err = edma_alloc_tx_rings(edma_cinfo);
859 if (err) {
860 dev_err(&pdev->dev, "Allocation of TX resources failed\n");
861 goto err_tx_rinit;
862 }
863
864 err = edma_alloc_rx_rings(edma_cinfo);
865 if (err) {
866 dev_err(&pdev->dev, "Allocation of RX resources failed\n");
867 goto err_rx_rinit;
868 }
869
870 /* Initialize netdev and netdev bitmap for transmit descriptor rings */
871 for (i = 0; i < edma_cinfo->num_tx_queues; i++) {
872 struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i];
873 int j;
874
875 etdr->netdev_bmp = 0;
876 for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) {
877 etdr->netdev[j] = NULL;
878 etdr->nq[j] = NULL;
879 }
880 }
881
882 if (of_property_read_bool(np, "qcom,mdio_supported")) {
883 mdio_node = of_find_compatible_node(NULL, NULL,
884 "qcom,ipq4019-mdio");
885 if (!mdio_node) {
886 dev_err(&pdev->dev, "cannot find mdio node by phandle");
887 err = -EIO;
888 goto err_mdiobus_init_fail;
889 }
890
891 miibus = of_mdio_find_bus(mdio_node);
892 if (!miibus)
893 return -EINVAL;
894 }
895
896 if (of_property_read_bool(np, "qcom,single-phy") &&
897 edma_cinfo->num_gmac == 1) {
898 err = ess_parse(edma_cinfo);
899 if (!err)
900 err = ess_reset(edma_cinfo);
901 if (err)
902 goto err_single_phy_init;
903 else
904 edma_cinfo->is_single_phy = true;
905 }
906
907 for_each_available_child_of_node(np, pnp) {
908 /* this check is needed if parent and daughter dts have
909 * different number of gmac nodes
910 */
911 if (idx_mac == edma_cinfo->num_gmac) {
912 of_node_put(np);
913 break;
914 }
915
916 of_get_mac_address(pnp, edma_netdev[idx_mac]->dev_addr);
917
918 idx_mac++;
919 }
920
921 /* Populate the adapter structure register the netdevice */
922 for (i = 0; i < edma_cinfo->num_gmac; i++) {
923 int k, m;
924
925 adapter[i] = netdev_priv(edma_netdev[i]);
926 adapter[i]->netdev = edma_netdev[i];
927 adapter[i]->pdev = pdev;
928 for (j = 0; j < CONFIG_NR_CPUS; j++) {
929 m = i % 2;
930 adapter[i]->tx_start_offset[j] =
931 ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1));
932 /* Share the queues with available net-devices.
933 * For instance , with 5 net-devices
934 * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
935 * and eth1/eth3 will get the remaining.
936 */
937 for (k = adapter[i]->tx_start_offset[j]; k <
938 (adapter[i]->tx_start_offset[j] + 2); k++) {
939 if (edma_fill_netdev(edma_cinfo, k, i, j)) {
940 pr_err("Netdev overflow Error\n");
941 goto err_register;
942 }
943 }
944 }
945
946 adapter[i]->edma_cinfo = edma_cinfo;
947 edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops;
948 edma_netdev[i]->max_mtu = 9000;
949 edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM
950 | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG |
951 NETIF_F_TSO | NETIF_F_GRO | NETIF_F_HW_VLAN_CTAG_TX;
952 edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
953 NETIF_F_HW_VLAN_CTAG_RX
954 | NETIF_F_SG | NETIF_F_TSO | NETIF_F_GRO;
955 edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG |
956 NETIF_F_TSO | NETIF_F_GRO;
957 edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG |
958 NETIF_F_TSO | NETIF_F_GRO;
959
960 #ifdef CONFIG_RFS_ACCEL
961 edma_netdev[i]->features |= NETIF_F_NTUPLE | NETIF_F_RXHASH;
962 edma_netdev[i]->hw_features |= NETIF_F_NTUPLE | NETIF_F_RXHASH;
963 edma_netdev[i]->vlan_features |= NETIF_F_NTUPLE | NETIF_F_RXHASH;
964 edma_netdev[i]->wanted_features |= NETIF_F_NTUPLE | NETIF_F_RXHASH;
965 #endif
966 edma_set_ethtool_ops(edma_netdev[i]);
967
968 /* This just fill in some default MAC address
969 */
970 if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) {
971 random_ether_addr(edma_netdev[i]->dev_addr);
972 pr_info("EDMA using MAC@ - using");
973 pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
974 *(edma_netdev[i]->dev_addr),
975 *(edma_netdev[i]->dev_addr + 1),
976 *(edma_netdev[i]->dev_addr + 2),
977 *(edma_netdev[i]->dev_addr + 3),
978 *(edma_netdev[i]->dev_addr + 4),
979 *(edma_netdev[i]->dev_addr + 5));
980 }
981
982 err = register_netdev(edma_netdev[i]);
983 if (err)
984 goto err_register;
985
986 /* carrier off reporting is important to
987 * ethtool even BEFORE open
988 */
989 netif_carrier_off(edma_netdev[i]);
990
991 /* Allocate reverse irq cpu mapping structure for
992 * receive queues
993 */
994 #ifdef CONFIG_RFS_ACCEL
995 edma_netdev[i]->rx_cpu_rmap =
996 alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE);
997 if (!edma_netdev[i]->rx_cpu_rmap) {
998 err = -ENOMEM;
999 goto err_rmap_alloc_fail;
1000 }
1001 #endif
1002 }
1003
1004 for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++)
1005 edma_cinfo->portid_netdev_lookup_tbl[i] = NULL;
1006
1007 for_each_available_child_of_node(np, pnp) {
1008 const uint32_t *vlan_tag = NULL;
1009 int len;
1010
1011 /* this check is needed if parent and daughter dts have
1012 * different number of gmac nodes
1013 */
1014 if (idx == edma_cinfo->num_gmac)
1015 break;
1016
1017 /* Populate port-id to netdev lookup table */
1018 vlan_tag = of_get_property(pnp, "vlan_tag", &len);
1019 if (!vlan_tag) {
1020 pr_err("Vlan tag parsing Failed.\n");
1021 goto err_rmap_alloc_fail;
1022 }
1023
1024 adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1);
1025 vlan_tag++;
1026 portid_bmp = of_read_number(vlan_tag, 1);
1027 adapter[idx]->dp_bitmap = portid_bmp;
1028
1029 portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */
1030 while (portid_bmp) {
1031 int port_bit = ffs(portid_bmp);
1032
1033 if (port_bit > EDMA_MAX_PORTID_SUPPORTED)
1034 goto err_rmap_alloc_fail;
1035 edma_cinfo->portid_netdev_lookup_tbl[port_bit] =
1036 edma_netdev[idx];
1037 portid_bmp &= ~(1 << (port_bit - 1));
1038 }
1039
1040 if (!of_property_read_u32(pnp, "qcom,poll_required",
1041 &adapter[idx]->poll_required)) {
1042 if (adapter[idx]->poll_required) {
1043 of_property_read_u32(pnp, "qcom,phy_mdio_addr",
1044 &adapter[idx]->phy_mdio_addr);
1045 of_property_read_u32(pnp, "qcom,forced_speed",
1046 &adapter[idx]->forced_speed);
1047 of_property_read_u32(pnp, "qcom,forced_duplex",
1048 &adapter[idx]->forced_duplex);
1049
1050 /* create a phyid using MDIO bus id
1051 * and MDIO bus address
1052 */
1053 snprintf(adapter[idx]->phy_id,
1054 MII_BUS_ID_SIZE + 3, PHY_ID_FMT,
1055 miibus->id,
1056 adapter[idx]->phy_mdio_addr);
1057 }
1058 } else {
1059 adapter[idx]->poll_required = 0;
1060 adapter[idx]->forced_speed = SPEED_1000;
1061 adapter[idx]->forced_duplex = DUPLEX_FULL;
1062 }
1063
1064 idx++;
1065 }
1066
1067 edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net,
1068 "net/edma",
1069 edma_table);
1070 if (!edma_cinfo->edma_ctl_table_hdr) {
1071 dev_err(&pdev->dev, "edma sysctl table hdr not registered\n");
1072 goto err_unregister_sysctl_tbl;
1073 }
1074
1075 /* Disable all 16 Tx and 8 rx irqs */
1076 edma_irq_disable(edma_cinfo);
1077
1078 err = edma_reset(edma_cinfo);
1079 if (err) {
1080 err = -EIO;
1081 goto err_reset;
1082 }
1083
1084 /* populate per_core_info, do a napi_Add, request 16 TX irqs,
1085 * 8 RX irqs, do a napi enable
1086 */
1087 for (i = 0; i < CONFIG_NR_CPUS; i++) {
1088 u8 rx_start;
1089
1090 edma_cinfo->edma_percpu_info[i].napi.state = 0;
1091
1092 netif_napi_add(edma_netdev[0],
1093 &edma_cinfo->edma_percpu_info[i].napi,
1094 edma_poll, 64);
1095 napi_enable(&edma_cinfo->edma_percpu_info[i].napi);
1096 edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i];
1097 edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK
1098 << (i << EDMA_RX_PER_CPU_MASK_SHIFT);
1099 edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i];
1100 edma_cinfo->edma_percpu_info[i].rx_start =
1101 i << EDMA_RX_CPU_START_SHIFT;
1102 rx_start = i << EDMA_RX_CPU_START_SHIFT;
1103 edma_cinfo->edma_percpu_info[i].tx_status = 0;
1104 edma_cinfo->edma_percpu_info[i].rx_status = 0;
1105 edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo;
1106
1107 /* Request irq per core */
1108 for (j = edma_cinfo->edma_percpu_info[i].tx_start;
1109 j < tx_start[i] + 4; j++) {
1110 sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j);
1111 err = request_irq(edma_cinfo->tx_irq[j],
1112 edma_interrupt,
1113 0,
1114 &edma_tx_irq[j][0],
1115 &edma_cinfo->edma_percpu_info[i]);
1116 if (err)
1117 goto err_reset;
1118 }
1119
1120 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1121 j < (rx_start +
1122 ((edma_cinfo->num_rx_queues == 4) ? 1 : 2));
1123 j++) {
1124 sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j);
1125 err = request_irq(edma_cinfo->rx_irq[j],
1126 edma_interrupt,
1127 0,
1128 &edma_rx_irq[j][0],
1129 &edma_cinfo->edma_percpu_info[i]);
1130 if (err)
1131 goto err_reset;
1132 }
1133
1134 #ifdef CONFIG_RFS_ACCEL
1135 for (j = edma_cinfo->edma_percpu_info[i].rx_start;
1136 j < rx_start + 2; j += 2) {
1137 err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap,
1138 edma_cinfo->rx_irq[j]);
1139 if (err)
1140 goto err_rmap_add_fail;
1141 }
1142 #endif
1143 }
1144
1145 /* Used to clear interrupt status, allocate rx buffer,
1146 * configure edma descriptors registers
1147 */
1148 err = edma_configure(edma_cinfo);
1149 if (err) {
1150 err = -EIO;
1151 goto err_configure;
1152 }
1153
1154 /* Configure RSS indirection table.
1155 * 128 hash will be configured in the following
1156 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1157 * and so on
1158 */
1159 for (i = 0; i < EDMA_NUM_IDT; i++)
1160 edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE);
1161
1162 /* Configure load balance mapping table.
1163 * 4 table entry will be configured according to the
1164 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1165 * respectively.
1166 */
1167 edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE);
1168
1169 /* Configure Virtual queue for Tx rings
1170 * User can also change this value runtime through
1171 * a sysctl
1172 */
1173 edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE);
1174 edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE);
1175
1176 /* Configure Max AXI Burst write size to 128 bytes*/
1177 edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE,
1178 EDMA_AXIW_MAXWRSIZE_VALUE);
1179
1180 /* Enable All 16 tx and 8 rx irq mask */
1181 edma_irq_enable(edma_cinfo);
1182 edma_enable_tx_ctrl(&edma_cinfo->hw);
1183 edma_enable_rx_ctrl(&edma_cinfo->hw);
1184
1185 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1186 if (adapter[i]->poll_required) {
1187 int phy_mode = of_get_phy_mode(np);
1188
1189 if (phy_mode < 0)
1190 phy_mode = PHY_INTERFACE_MODE_SGMII;
1191 adapter[i]->phydev =
1192 phy_connect(edma_netdev[i],
1193 (const char *)adapter[i]->phy_id,
1194 &edma_adjust_link,
1195 phy_mode);
1196 if (IS_ERR(adapter[i]->phydev)) {
1197 dev_dbg(&pdev->dev, "PHY attach FAIL");
1198 err = -EIO;
1199 goto edma_phy_attach_fail;
1200 } else {
1201 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1202 adapter[i]->phydev->advertising);
1203 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1204 adapter[i]->phydev->advertising);
1205 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT,
1206 adapter[i]->phydev->supported);
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
1208 adapter[i]->phydev->supported);
1209 }
1210 } else {
1211 adapter[i]->phydev = NULL;
1212 }
1213 }
1214
1215 spin_lock_init(&edma_cinfo->stats_lock);
1216
1217 timer_setup(&edma_cinfo->edma_stats_timer, edma_statistics_timer, 0);
1218 mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ);
1219
1220 return 0;
1221
1222 edma_phy_attach_fail:
1223 miibus = NULL;
1224 err_configure:
1225 #ifdef CONFIG_RFS_ACCEL
1226 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1227 free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap);
1228 adapter[i]->netdev->rx_cpu_rmap = NULL;
1229 }
1230 #endif
1231 err_rmap_add_fail:
1232 edma_free_irqs(adapter[0]);
1233 for (i = 0; i < CONFIG_NR_CPUS; i++)
1234 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1235 err_reset:
1236 err_unregister_sysctl_tbl:
1237 err_rmap_alloc_fail:
1238 for (i = 0; i < edma_cinfo->num_gmac; i++)
1239 unregister_netdev(edma_netdev[i]);
1240 err_register:
1241 err_single_phy_init:
1242 iounmap(edma_cinfo->ess_hw_addr);
1243 clk_disable_unprepare(edma_cinfo->ess_clk);
1244 err_mdiobus_init_fail:
1245 edma_free_rx_rings(edma_cinfo);
1246 err_rx_rinit:
1247 edma_free_tx_rings(edma_cinfo);
1248 err_tx_rinit:
1249 edma_free_queues(edma_cinfo);
1250 err_rx_qinit:
1251 err_tx_qinit:
1252 iounmap(edma_cinfo->hw.hw_addr);
1253 err_ioremap:
1254 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1255 if (edma_netdev[i])
1256 free_netdev(edma_netdev[i]);
1257 }
1258 err_cinfo:
1259 kfree(edma_cinfo);
1260 err_alloc:
1261 return err;
1262 }
1263
1264 /* edma_axi_remove()
1265 * Device Removal Routine
1266 *
1267 * edma_axi_remove is called by the platform subsystem to alert the driver
1268 * that it should release a platform device.
1269 */
1270 static int edma_axi_remove(struct platform_device *pdev)
1271 {
1272 struct edma_adapter *adapter = netdev_priv(edma_netdev[0]);
1273 struct edma_common_info *edma_cinfo = adapter->edma_cinfo;
1274 struct edma_hw *hw = &edma_cinfo->hw;
1275 int i;
1276
1277 for (i = 0; i < edma_cinfo->num_gmac; i++)
1278 unregister_netdev(edma_netdev[i]);
1279
1280 edma_stop_rx_tx(hw);
1281 for (i = 0; i < CONFIG_NR_CPUS; i++)
1282 napi_disable(&edma_cinfo->edma_percpu_info[i].napi);
1283
1284 edma_irq_disable(edma_cinfo);
1285 edma_write_reg(EDMA_REG_RX_ISR, 0xff);
1286 edma_write_reg(EDMA_REG_TX_ISR, 0xffff);
1287 #ifdef CONFIG_RFS_ACCEL
1288 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1289 free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap);
1290 edma_netdev[i]->rx_cpu_rmap = NULL;
1291 }
1292 #endif
1293
1294 for (i = 0; i < edma_cinfo->num_gmac; i++) {
1295 struct edma_adapter *adapter = netdev_priv(edma_netdev[i]);
1296
1297 if (adapter->phydev)
1298 phy_disconnect(adapter->phydev);
1299 }
1300
1301 del_timer_sync(&edma_cinfo->edma_stats_timer);
1302 edma_free_irqs(adapter);
1303 unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr);
1304 iounmap(edma_cinfo->ess_hw_addr);
1305 clk_disable_unprepare(edma_cinfo->ess_clk);
1306 edma_free_tx_resources(edma_cinfo);
1307 edma_free_rx_resources(edma_cinfo);
1308 edma_free_tx_rings(edma_cinfo);
1309 edma_free_rx_rings(edma_cinfo);
1310 edma_free_queues(edma_cinfo);
1311 for (i = 0; i < edma_cinfo->num_gmac; i++)
1312 free_netdev(edma_netdev[i]);
1313
1314 kfree(edma_cinfo);
1315
1316 return 0;
1317 }
1318
1319 static const struct of_device_id edma_of_mtable[] = {
1320 {.compatible = "qcom,ess-edma" },
1321 {}
1322 };
1323 MODULE_DEVICE_TABLE(of, edma_of_mtable);
1324
1325 static struct platform_driver edma_axi_driver = {
1326 .driver = {
1327 .name = edma_axi_driver_name,
1328 .of_match_table = edma_of_mtable,
1329 },
1330 .probe = edma_axi_probe,
1331 .remove = edma_axi_remove,
1332 };
1333
1334 module_platform_driver(edma_axi_driver);
1335
1336 MODULE_AUTHOR("Qualcomm Atheros Inc");
1337 MODULE_DESCRIPTION("QCA ESS EDMA driver");
1338 MODULE_LICENSE("GPL");