2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/cpu_rmap.h>
18 #include <linux/of_net.h>
19 #include <linux/timer.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_address.h>
22 #include <linux/of_mdio.h>
23 #include <linux/clk.h>
24 #include <linux/string.h>
25 #include <linux/reset.h>
26 #include <linux/version.h>
30 /* Weight round robin and virtual QID mask */
31 #define EDMA_WRR_VID_SCTL_MASK 0xffff
33 /* Weight round robin and virtual QID shift */
34 #define EDMA_WRR_VID_SCTL_SHIFT 16
36 char edma_axi_driver_name
[] = "ess_edma";
37 static const u32 default_msg
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
38 NETIF_MSG_LINK
| NETIF_MSG_TIMER
| NETIF_MSG_IFDOWN
| NETIF_MSG_IFUP
;
40 static u32 edma_hw_addr
;
42 char edma_tx_irq
[16][64];
43 char edma_rx_irq
[8][64];
44 struct net_device
*edma_netdev
[EDMA_MAX_PORTID_SUPPORTED
];
45 static u16 tx_start
[4] = {EDMA_TXQ_START_CORE0
, EDMA_TXQ_START_CORE1
,
46 EDMA_TXQ_START_CORE2
, EDMA_TXQ_START_CORE3
};
47 static u32 tx_mask
[4] = {EDMA_TXQ_IRQ_MASK_CORE0
, EDMA_TXQ_IRQ_MASK_CORE1
,
48 EDMA_TXQ_IRQ_MASK_CORE2
, EDMA_TXQ_IRQ_MASK_CORE3
};
50 static u32 edma_default_ltag __read_mostly
= EDMA_LAN_DEFAULT_VLAN
;
51 static u32 edma_default_wtag __read_mostly
= EDMA_WAN_DEFAULT_VLAN
;
52 static u32 edma_default_group1_vtag __read_mostly
= EDMA_DEFAULT_GROUP1_VLAN
;
53 static u32 edma_default_group2_vtag __read_mostly
= EDMA_DEFAULT_GROUP2_VLAN
;
54 static u32 edma_default_group3_vtag __read_mostly
= EDMA_DEFAULT_GROUP3_VLAN
;
55 static u32 edma_default_group4_vtag __read_mostly
= EDMA_DEFAULT_GROUP4_VLAN
;
56 static u32 edma_default_group5_vtag __read_mostly
= EDMA_DEFAULT_GROUP5_VLAN
;
57 static u32 edma_rss_idt_val
= EDMA_RSS_IDT_VALUE
;
58 static u32 edma_rss_idt_idx
;
60 static int edma_weight_assigned_to_q __read_mostly
;
61 static int edma_queue_to_virtual_q __read_mostly
;
62 static bool edma_enable_rstp __read_mostly
;
63 static int edma_athr_hdr_eth_type __read_mostly
;
66 module_param(page_mode
, int, 0);
67 MODULE_PARM_DESC(page_mode
, "enable page mode");
69 static int overwrite_mode
;
70 module_param(overwrite_mode
, int, 0);
71 MODULE_PARM_DESC(overwrite_mode
, "overwrite default page_mode setting");
73 static int jumbo_mru
= EDMA_RX_HEAD_BUFF_SIZE
;
74 module_param(jumbo_mru
, int, 0);
75 MODULE_PARM_DESC(jumbo_mru
, "enable fraglist support");
77 static int num_rxq
= 4;
78 module_param(num_rxq
, int, 0);
79 MODULE_PARM_DESC(num_rxq
, "change the number of rx queues");
81 void edma_write_reg(u16 reg_addr
, u32 reg_value
)
83 writel(reg_value
, ((void __iomem
*)(edma_hw_addr
+ reg_addr
)));
86 void edma_read_reg(u16 reg_addr
, volatile u32
*reg_value
)
88 *reg_value
= readl((void __iomem
*)(edma_hw_addr
+ reg_addr
));
91 static void ess_write_reg(struct edma_common_info
*edma
, u16 reg_addr
, u32 reg_value
)
93 writel(reg_value
, ((void __iomem
*)
94 ((unsigned long)edma
->ess_hw_addr
+ reg_addr
)));
97 static void ess_read_reg(struct edma_common_info
*edma
, u16 reg_addr
,
98 volatile u32
*reg_value
)
100 *reg_value
= readl((void __iomem
*)
101 ((unsigned long)edma
->ess_hw_addr
+ reg_addr
));
104 static int ess_reset(struct edma_common_info
*edma
)
106 struct device_node
*switch_node
= NULL
;
107 struct reset_control
*ess_rst
;
110 switch_node
= of_find_node_by_name(NULL
, "ess-switch");
112 pr_err("switch-node not found\n");
116 ess_rst
= of_reset_control_get(switch_node
, "ess_rst");
117 of_node_put(switch_node
);
119 if (IS_ERR(ess_rst
)) {
120 pr_err("failed to find ess_rst!\n");
124 reset_control_assert(ess_rst
);
126 reset_control_deassert(ess_rst
);
128 reset_control_put(ess_rst
);
130 /* Enable only port 5 <--> port 0
131 * bits 0:6 bitmap of ports it can fwd to */
132 #define SET_PORT_BMP(r,v) \
133 ess_read_reg(edma, r, ®val); \
134 ess_write_reg(edma, r, ((regval & ~0x3F) | v));
136 SET_PORT_BMP(ESS_PORT0_LOOKUP_CTRL
,0x20);
137 SET_PORT_BMP(ESS_PORT1_LOOKUP_CTRL
,0x00);
138 SET_PORT_BMP(ESS_PORT2_LOOKUP_CTRL
,0x00);
139 SET_PORT_BMP(ESS_PORT3_LOOKUP_CTRL
,0x00);
140 SET_PORT_BMP(ESS_PORT4_LOOKUP_CTRL
,0x00);
141 SET_PORT_BMP(ESS_PORT5_LOOKUP_CTRL
,0x01);
142 ess_write_reg(edma
, ESS_RGMII_CTRL
, 0x400);
143 ess_write_reg(edma
, ESS_PORT0_STATUS
, ESS_PORT_1G_FDX
);
144 ess_write_reg(edma
, ESS_PORT5_STATUS
, ESS_PORT_1G_FDX
);
145 ess_write_reg(edma
, ESS_PORT0_HEADER_CTRL
, 0);
148 /* forward multicast and broadcast frames to CPU */
149 ess_write_reg(edma
, ESS_FWD_CTRL1
,
150 (ESS_PORTS_ALL
<< ESS_FWD_CTRL1_UC_FLOOD_S
) |
151 (ESS_PORTS_ALL
<< ESS_FWD_CTRL1_MC_FLOOD_S
) |
152 (ESS_PORTS_ALL
<< ESS_FWD_CTRL1_BC_FLOOD_S
));
157 void ess_set_port_status_speed(struct edma_common_info
*edma
,
158 struct phy_device
*phydev
, uint8_t port_id
)
160 uint16_t reg_off
= ESS_PORT0_STATUS
+ (4 * port_id
);
161 uint32_t reg_val
= 0;
163 ess_read_reg(edma
, reg_off
, ®_val
);
165 /* reset the speed bits [0:1] */
166 reg_val
&= ~ESS_PORT_STATUS_SPEED_INV
;
168 /* set the new speed */
169 switch(phydev
->speed
) {
170 case SPEED_1000
: reg_val
|= ESS_PORT_STATUS_SPEED_1000
; break;
171 case SPEED_100
: reg_val
|= ESS_PORT_STATUS_SPEED_100
; break;
172 case SPEED_10
: reg_val
|= ESS_PORT_STATUS_SPEED_10
; break;
173 default: reg_val
|= ESS_PORT_STATUS_SPEED_INV
; break;
176 /* check full/half duplex */
177 if (phydev
->duplex
) {
178 reg_val
|= ESS_PORT_STATUS_DUPLEX_MODE
;
180 reg_val
&= ~ESS_PORT_STATUS_DUPLEX_MODE
;
183 ess_write_reg(edma
, reg_off
, reg_val
);
186 /* edma_change_tx_coalesce()
187 * change tx interrupt moderation timer
189 void edma_change_tx_coalesce(int usecs
)
193 /* Here, we right shift the value from the user by 1, this is
194 * done because IMT resolution timer is 2usecs. 1 count
195 * of this register corresponds to 2 usecs.
197 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, ®_value
);
198 reg_value
= ((reg_value
& 0xffff) | ((usecs
>> 1) << 16));
199 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, reg_value
);
202 /* edma_change_rx_coalesce()
203 * change rx interrupt moderation timer
205 void edma_change_rx_coalesce(int usecs
)
209 /* Here, we right shift the value from the user by 1, this is
210 * done because IMT resolution timer is 2usecs. 1 count
211 * of this register corresponds to 2 usecs.
213 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, ®_value
);
214 reg_value
= ((reg_value
& 0xffff0000) | (usecs
>> 1));
215 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, reg_value
);
218 /* edma_get_tx_rx_coalesce()
219 * Get tx/rx interrupt moderation value
221 void edma_get_tx_rx_coalesce(u32
*reg_val
)
223 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, reg_val
);
226 void edma_read_append_stats(struct edma_common_info
*edma_cinfo
)
232 spin_lock_bh(&edma_cinfo
->stats_lock
);
233 p
= (uint32_t *)&(edma_cinfo
->edma_ethstats
);
235 for (i
= 0; i
< EDMA_MAX_TRANSMIT_QUEUE
; i
++) {
236 edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i
), &stat
);
241 for (i
= 0; i
< EDMA_MAX_TRANSMIT_QUEUE
; i
++) {
242 edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i
), &stat
);
247 for (i
= 0; i
< EDMA_MAX_RECEIVE_QUEUE
; i
++) {
248 edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i
), &stat
);
253 for (i
= 0; i
< EDMA_MAX_RECEIVE_QUEUE
; i
++) {
254 edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i
), &stat
);
259 spin_unlock_bh(&edma_cinfo
->stats_lock
);
262 static void edma_statistics_timer(struct timer_list
*t
)
264 struct edma_common_info
*edma_cinfo
=
265 from_timer(edma_cinfo
, t
, edma_stats_timer
);
267 edma_read_append_stats(edma_cinfo
);
269 mod_timer(&edma_cinfo
->edma_stats_timer
, jiffies
+ 1*HZ
);
272 static int edma_enable_stp_rstp(struct ctl_table
*table
, int write
,
273 void __user
*buffer
, size_t *lenp
,
278 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
280 edma_set_stp_rstp(edma_enable_rstp
);
285 static int edma_ath_hdr_eth_type(struct ctl_table
*table
, int write
,
286 void __user
*buffer
, size_t *lenp
,
291 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
293 edma_assign_ath_hdr_type(edma_athr_hdr_eth_type
);
298 static int edma_change_default_lan_vlan(struct ctl_table
*table
, int write
,
299 void __user
*buffer
, size_t *lenp
,
302 struct edma_adapter
*adapter
;
305 if (!edma_netdev
[1]) {
306 pr_err("Netdevice for default_lan does not exist\n");
310 adapter
= netdev_priv(edma_netdev
[1]);
312 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
315 adapter
->default_vlan_tag
= edma_default_ltag
;
320 static int edma_change_default_wan_vlan(struct ctl_table
*table
, int write
,
321 void __user
*buffer
, size_t *lenp
,
324 struct edma_adapter
*adapter
;
327 if (!edma_netdev
[0]) {
328 pr_err("Netdevice for default_wan does not exist\n");
332 adapter
= netdev_priv(edma_netdev
[0]);
334 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
337 adapter
->default_vlan_tag
= edma_default_wtag
;
342 static int edma_change_group1_vtag(struct ctl_table
*table
, int write
,
343 void __user
*buffer
, size_t *lenp
,
346 struct edma_adapter
*adapter
;
347 struct edma_common_info
*edma_cinfo
;
350 if (!edma_netdev
[0]) {
351 pr_err("Netdevice for Group 1 does not exist\n");
355 adapter
= netdev_priv(edma_netdev
[0]);
356 edma_cinfo
= adapter
->edma_cinfo
;
358 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
361 adapter
->default_vlan_tag
= edma_default_group1_vtag
;
366 static int edma_change_group2_vtag(struct ctl_table
*table
, int write
,
367 void __user
*buffer
, size_t *lenp
,
370 struct edma_adapter
*adapter
;
371 struct edma_common_info
*edma_cinfo
;
374 if (!edma_netdev
[1]) {
375 pr_err("Netdevice for Group 2 does not exist\n");
379 adapter
= netdev_priv(edma_netdev
[1]);
380 edma_cinfo
= adapter
->edma_cinfo
;
382 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
385 adapter
->default_vlan_tag
= edma_default_group2_vtag
;
390 static int edma_change_group3_vtag(struct ctl_table
*table
, int write
,
391 void __user
*buffer
, size_t *lenp
,
394 struct edma_adapter
*adapter
;
395 struct edma_common_info
*edma_cinfo
;
398 if (!edma_netdev
[2]) {
399 pr_err("Netdevice for Group 3 does not exist\n");
403 adapter
= netdev_priv(edma_netdev
[2]);
404 edma_cinfo
= adapter
->edma_cinfo
;
406 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
409 adapter
->default_vlan_tag
= edma_default_group3_vtag
;
414 static int edma_change_group4_vtag(struct ctl_table
*table
, int write
,
415 void __user
*buffer
, size_t *lenp
,
418 struct edma_adapter
*adapter
;
419 struct edma_common_info
*edma_cinfo
;
422 if (!edma_netdev
[3]) {
423 pr_err("Netdevice for Group 4 does not exist\n");
427 adapter
= netdev_priv(edma_netdev
[3]);
428 edma_cinfo
= adapter
->edma_cinfo
;
430 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
433 adapter
->default_vlan_tag
= edma_default_group4_vtag
;
438 static int edma_change_group5_vtag(struct ctl_table
*table
, int write
,
439 void __user
*buffer
, size_t *lenp
,
442 struct edma_adapter
*adapter
;
443 struct edma_common_info
*edma_cinfo
;
446 if (!edma_netdev
[4]) {
447 pr_err("Netdevice for Group 5 does not exist\n");
451 adapter
= netdev_priv(edma_netdev
[4]);
452 edma_cinfo
= adapter
->edma_cinfo
;
454 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
457 adapter
->default_vlan_tag
= edma_default_group5_vtag
;
462 static int edma_set_rss_idt_value(struct ctl_table
*table
, int write
,
463 void __user
*buffer
, size_t *lenp
,
468 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
470 edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx
),
475 static int edma_set_rss_idt_idx(struct ctl_table
*table
, int write
,
476 void __user
*buffer
, size_t *lenp
,
480 u32 old_value
= edma_rss_idt_idx
;
482 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
486 if (edma_rss_idt_idx
>= EDMA_NUM_IDT
) {
487 pr_err("Invalid RSS indirection table index %d\n",
489 edma_rss_idt_idx
= old_value
;
495 static int edma_weight_assigned_to_queues(struct ctl_table
*table
, int write
,
496 void __user
*buffer
, size_t *lenp
,
499 int ret
, queue_id
, weight
;
500 u32 reg_data
, data
, reg_addr
;
502 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
504 queue_id
= edma_weight_assigned_to_q
& EDMA_WRR_VID_SCTL_MASK
;
505 if (queue_id
< 0 || queue_id
> 15) {
506 pr_err("queue_id not within desired range\n");
510 weight
= edma_weight_assigned_to_q
>> EDMA_WRR_VID_SCTL_SHIFT
;
511 if (weight
< 0 || weight
> 0xF) {
512 pr_err("queue_id not within desired range\n");
516 data
= weight
<< EDMA_WRR_SHIFT(queue_id
);
518 reg_addr
= EDMA_REG_WRR_CTRL_Q0_Q3
+ (queue_id
& ~0x3);
519 edma_read_reg(reg_addr
, ®_data
);
520 reg_data
&= ~(1 << EDMA_WRR_SHIFT(queue_id
));
521 edma_write_reg(reg_addr
, data
| reg_data
);
527 static int edma_queue_to_virtual_queue_map(struct ctl_table
*table
, int write
,
528 void __user
*buffer
, size_t *lenp
,
531 int ret
, queue_id
, virtual_qid
;
532 u32 reg_data
, data
, reg_addr
;
534 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
536 queue_id
= edma_queue_to_virtual_q
& EDMA_WRR_VID_SCTL_MASK
;
537 if (queue_id
< 0 || queue_id
> 15) {
538 pr_err("queue_id not within desired range\n");
542 virtual_qid
= edma_queue_to_virtual_q
>>
543 EDMA_WRR_VID_SCTL_SHIFT
;
544 if (virtual_qid
< 0 || virtual_qid
> 8) {
545 pr_err("queue_id not within desired range\n");
549 data
= virtual_qid
<< EDMA_VQ_ID_SHIFT(queue_id
);
551 reg_addr
= EDMA_REG_VQ_CTRL0
+ (queue_id
& ~0x3);
552 edma_read_reg(reg_addr
, ®_data
);
553 reg_data
&= ~(1 << EDMA_VQ_ID_SHIFT(queue_id
));
554 edma_write_reg(reg_addr
, data
| reg_data
);
560 static struct ctl_table edma_table
[] = {
562 .procname
= "default_lan_tag",
563 .data
= &edma_default_ltag
,
564 .maxlen
= sizeof(int),
566 .proc_handler
= edma_change_default_lan_vlan
569 .procname
= "default_wan_tag",
570 .data
= &edma_default_wtag
,
571 .maxlen
= sizeof(int),
573 .proc_handler
= edma_change_default_wan_vlan
576 .procname
= "weight_assigned_to_queues",
577 .data
= &edma_weight_assigned_to_q
,
578 .maxlen
= sizeof(int),
580 .proc_handler
= edma_weight_assigned_to_queues
583 .procname
= "queue_to_virtual_queue_map",
584 .data
= &edma_queue_to_virtual_q
,
585 .maxlen
= sizeof(int),
587 .proc_handler
= edma_queue_to_virtual_queue_map
590 .procname
= "enable_stp_rstp",
591 .data
= &edma_enable_rstp
,
592 .maxlen
= sizeof(int),
594 .proc_handler
= edma_enable_stp_rstp
597 .procname
= "athr_hdr_eth_type",
598 .data
= &edma_athr_hdr_eth_type
,
599 .maxlen
= sizeof(int),
601 .proc_handler
= edma_ath_hdr_eth_type
604 .procname
= "default_group1_vlan_tag",
605 .data
= &edma_default_group1_vtag
,
606 .maxlen
= sizeof(int),
608 .proc_handler
= edma_change_group1_vtag
611 .procname
= "default_group2_vlan_tag",
612 .data
= &edma_default_group2_vtag
,
613 .maxlen
= sizeof(int),
615 .proc_handler
= edma_change_group2_vtag
618 .procname
= "default_group3_vlan_tag",
619 .data
= &edma_default_group3_vtag
,
620 .maxlen
= sizeof(int),
622 .proc_handler
= edma_change_group3_vtag
625 .procname
= "default_group4_vlan_tag",
626 .data
= &edma_default_group4_vtag
,
627 .maxlen
= sizeof(int),
629 .proc_handler
= edma_change_group4_vtag
632 .procname
= "default_group5_vlan_tag",
633 .data
= &edma_default_group5_vtag
,
634 .maxlen
= sizeof(int),
636 .proc_handler
= edma_change_group5_vtag
639 .procname
= "edma_rss_idt_value",
640 .data
= &edma_rss_idt_val
,
641 .maxlen
= sizeof(int),
643 .proc_handler
= edma_set_rss_idt_value
646 .procname
= "edma_rss_idt_idx",
647 .data
= &edma_rss_idt_idx
,
648 .maxlen
= sizeof(int),
650 .proc_handler
= edma_set_rss_idt_idx
655 static int ess_parse(struct edma_common_info
*edma
)
657 struct device_node
*switch_node
;
660 switch_node
= of_find_node_by_name(NULL
, "ess-switch");
662 pr_err("cannot find ess-switch node\n");
666 edma
->ess_hw_addr
= of_io_request_and_map(switch_node
,
668 if (!edma
->ess_hw_addr
) {
669 pr_err("%s ioremap fail.", __func__
);
673 edma
->ess_clk
= of_clk_get_by_name(switch_node
, "ess_clk");
674 ret
= clk_prepare_enable(edma
->ess_clk
);
676 of_node_put(switch_node
);
680 /* edma_axi_netdev_ops
681 * Describe the operations supported by registered netdevices
683 * static const struct net_device_ops edma_axi_netdev_ops = {
684 * .ndo_open = edma_open,
685 * .ndo_stop = edma_close,
686 * .ndo_start_xmit = edma_xmit_frame,
687 * .ndo_set_mac_address = edma_set_mac_addr,
690 static const struct net_device_ops edma_axi_netdev_ops
= {
691 .ndo_open
= edma_open
,
692 .ndo_stop
= edma_close
,
693 .ndo_start_xmit
= edma_xmit
,
694 .ndo_set_mac_address
= edma_set_mac_addr
,
695 #ifdef CONFIG_RFS_ACCEL
696 .ndo_rx_flow_steer
= edma_rx_flow_steer
,
697 .ndo_register_rfs_filter
= edma_register_rfs_filter
,
698 .ndo_get_default_vlan_tag
= edma_get_default_vlan_tag
,
700 .ndo_get_stats
= edma_get_stats
,
704 * Initialise an adapter identified by a platform_device structure.
706 * The OS initialization, configuring of the adapter private structure,
707 * and a hardware reset occur in the probe.
709 static int edma_axi_probe(struct platform_device
*pdev
)
711 struct edma_common_info
*edma_cinfo
;
713 struct edma_adapter
*adapter
[EDMA_MAX_PORTID_SUPPORTED
];
714 struct resource
*res
;
715 struct device_node
*np
= pdev
->dev
.of_node
;
716 struct device_node
*pnp
;
717 struct device_node
*mdio_node
= NULL
;
718 struct mii_bus
*miibus
= NULL
;
719 int i
, j
, k
, err
= 0;
721 int idx
= 0, idx_mac
= 0;
723 if (CONFIG_NR_CPUS
!= EDMA_CPU_CORES_SUPPORTED
) {
724 dev_err(&pdev
->dev
, "Invalid CPU Cores\n");
728 if ((num_rxq
!= 4) && (num_rxq
!= 8)) {
729 dev_err(&pdev
->dev
, "Invalid RX queue, edma probe failed\n");
732 edma_cinfo
= kzalloc(sizeof(struct edma_common_info
), GFP_KERNEL
);
738 edma_cinfo
->pdev
= pdev
;
740 of_property_read_u32(np
, "qcom,num_gmac", &edma_cinfo
->num_gmac
);
741 if (edma_cinfo
->num_gmac
> EDMA_MAX_PORTID_SUPPORTED
) {
742 pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
747 /* Initialize the netdev array before allocation
748 * to avoid double free
750 for (i
= 0 ; i
< edma_cinfo
->num_gmac
; i
++)
751 edma_netdev
[i
] = NULL
;
753 for (i
= 0 ; i
< edma_cinfo
->num_gmac
; i
++) {
754 edma_netdev
[i
] = alloc_etherdev_mqs(sizeof(struct edma_adapter
),
755 EDMA_NETDEV_TX_QUEUE
, EDMA_NETDEV_RX_QUEUE
);
757 if (!edma_netdev
[i
]) {
759 "net device alloc fails for index=%d\n", i
);
764 SET_NETDEV_DEV(edma_netdev
[i
], &pdev
->dev
);
765 platform_set_drvdata(pdev
, edma_netdev
[i
]);
766 edma_cinfo
->netdev
[i
] = edma_netdev
[i
];
769 /* Fill ring details */
770 edma_cinfo
->num_tx_queues
= EDMA_MAX_TRANSMIT_QUEUE
;
771 edma_cinfo
->num_txq_per_core
= (EDMA_MAX_TRANSMIT_QUEUE
/ 4);
772 edma_cinfo
->tx_ring_count
= EDMA_TX_RING_SIZE
;
774 /* Update num rx queues based on module parameter */
775 edma_cinfo
->num_rx_queues
= num_rxq
;
776 edma_cinfo
->num_rxq_per_core
= ((num_rxq
== 4) ? 1 : 2);
778 edma_cinfo
->rx_ring_count
= EDMA_RX_RING_SIZE
;
780 hw
= &edma_cinfo
->hw
;
782 /* Fill HW defaults */
783 hw
->tx_intr_mask
= EDMA_TX_IMR_NORMAL_MASK
;
784 hw
->rx_intr_mask
= EDMA_RX_IMR_NORMAL_MASK
;
786 of_property_read_u32(np
, "qcom,page-mode", &edma_cinfo
->page_mode
);
787 of_property_read_u32(np
, "qcom,rx_head_buf_size",
788 &hw
->rx_head_buff_size
);
790 if (overwrite_mode
) {
791 dev_info(&pdev
->dev
, "page mode overwritten");
792 edma_cinfo
->page_mode
= page_mode
;
796 edma_cinfo
->fraglist_mode
= 1;
798 if (edma_cinfo
->page_mode
)
799 hw
->rx_head_buff_size
= EDMA_RX_HEAD_BUFF_SIZE_JUMBO
;
800 else if (edma_cinfo
->fraglist_mode
)
801 hw
->rx_head_buff_size
= jumbo_mru
;
802 else if (!hw
->rx_head_buff_size
)
803 hw
->rx_head_buff_size
= EDMA_RX_HEAD_BUFF_SIZE
;
805 hw
->misc_intr_mask
= 0;
806 hw
->wol_intr_mask
= 0;
808 hw
->intr_clear_type
= EDMA_INTR_CLEAR_TYPE
;
809 hw
->intr_sw_idx_w
= EDMA_INTR_SW_IDX_W_TYPE
;
811 /* configure RSS type to the different protocol that can be
814 hw
->rss_type
= EDMA_RSS_TYPE_IPV4TCP
| EDMA_RSS_TYPE_IPV6_TCP
|
815 EDMA_RSS_TYPE_IPV4_UDP
| EDMA_RSS_TYPE_IPV6UDP
|
816 EDMA_RSS_TYPE_IPV4
| EDMA_RSS_TYPE_IPV6
;
818 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
820 edma_cinfo
->hw
.hw_addr
= devm_ioremap_resource(&pdev
->dev
, res
);
821 if (IS_ERR(edma_cinfo
->hw
.hw_addr
)) {
822 err
= PTR_ERR(edma_cinfo
->hw
.hw_addr
);
826 edma_hw_addr
= (u32
)edma_cinfo
->hw
.hw_addr
;
828 /* Parse tx queue interrupt number from device tree */
829 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++)
830 edma_cinfo
->tx_irq
[i
] = platform_get_irq(pdev
, i
);
832 /* Parse rx queue interrupt number from device tree
833 * Here we are setting j to point to the point where we
834 * left tx interrupt parsing(i.e 16) and run run the loop
835 * from 0 to 7 to parse rx interrupt number.
837 for (i
= 0, j
= edma_cinfo
->num_tx_queues
, k
= 0;
838 i
< edma_cinfo
->num_rx_queues
; i
++) {
839 edma_cinfo
->rx_irq
[k
] = platform_get_irq(pdev
, j
);
840 k
+= ((num_rxq
== 4) ? 2 : 1);
841 j
+= ((num_rxq
== 4) ? 2 : 1);
844 edma_cinfo
->rx_head_buffer_len
= edma_cinfo
->hw
.rx_head_buff_size
;
845 edma_cinfo
->rx_page_buffer_len
= PAGE_SIZE
;
847 err
= edma_alloc_queues_tx(edma_cinfo
);
849 dev_err(&pdev
->dev
, "Allocation of TX queue failed\n");
853 err
= edma_alloc_queues_rx(edma_cinfo
);
855 dev_err(&pdev
->dev
, "Allocation of RX queue failed\n");
859 err
= edma_alloc_tx_rings(edma_cinfo
);
861 dev_err(&pdev
->dev
, "Allocation of TX resources failed\n");
865 err
= edma_alloc_rx_rings(edma_cinfo
);
867 dev_err(&pdev
->dev
, "Allocation of RX resources failed\n");
871 /* Initialize netdev and netdev bitmap for transmit descriptor rings */
872 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
873 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[i
];
876 etdr
->netdev_bmp
= 0;
877 for (j
= 0; j
< EDMA_MAX_NETDEV_PER_QUEUE
; j
++) {
878 etdr
->netdev
[j
] = NULL
;
883 if (of_property_read_bool(np
, "qcom,mdio_supported")) {
884 mdio_node
= of_find_compatible_node(NULL
, NULL
,
885 "qcom,ipq4019-mdio");
887 dev_err(&pdev
->dev
, "cannot find mdio node by phandle");
889 goto err_mdiobus_init_fail
;
892 miibus
= of_mdio_find_bus(mdio_node
);
897 if (of_property_read_bool(np
, "qcom,single-phy") &&
898 edma_cinfo
->num_gmac
== 1) {
899 err
= ess_parse(edma_cinfo
);
901 err
= ess_reset(edma_cinfo
);
903 goto err_single_phy_init
;
905 edma_cinfo
->is_single_phy
= true;
908 for_each_available_child_of_node(np
, pnp
) {
909 /* this check is needed if parent and daughter dts have
910 * different number of gmac nodes
912 if (idx_mac
== edma_cinfo
->num_gmac
) {
917 of_get_mac_address(pnp
, edma_netdev
[idx_mac
]->dev_addr
);
922 /* Populate the adapter structure register the netdevice */
923 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
926 adapter
[i
] = netdev_priv(edma_netdev
[i
]);
927 adapter
[i
]->netdev
= edma_netdev
[i
];
928 adapter
[i
]->pdev
= pdev
;
929 for (j
= 0; j
< CONFIG_NR_CPUS
; j
++) {
931 adapter
[i
]->tx_start_offset
[j
] =
932 ((j
<< EDMA_TX_CPU_START_SHIFT
) + (m
<< 1));
933 /* Share the queues with available net-devices.
934 * For instance , with 5 net-devices
935 * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
936 * and eth1/eth3 will get the remaining.
938 for (k
= adapter
[i
]->tx_start_offset
[j
]; k
<
939 (adapter
[i
]->tx_start_offset
[j
] + 2); k
++) {
940 if (edma_fill_netdev(edma_cinfo
, k
, i
, j
)) {
941 pr_err("Netdev overflow Error\n");
947 adapter
[i
]->edma_cinfo
= edma_cinfo
;
948 edma_netdev
[i
]->netdev_ops
= &edma_axi_netdev_ops
;
949 edma_netdev
[i
]->max_mtu
= 9000;
950 edma_netdev
[i
]->features
= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
951 | NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_SG
|
952 NETIF_F_TSO
| NETIF_F_GRO
| NETIF_F_HW_VLAN_CTAG_TX
;
953 edma_netdev
[i
]->hw_features
= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
|
954 NETIF_F_HW_VLAN_CTAG_RX
955 | NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_GRO
;
956 edma_netdev
[i
]->vlan_features
= NETIF_F_HW_CSUM
| NETIF_F_SG
|
957 NETIF_F_TSO
| NETIF_F_GRO
;
958 edma_netdev
[i
]->wanted_features
= NETIF_F_HW_CSUM
| NETIF_F_SG
|
959 NETIF_F_TSO
| NETIF_F_GRO
;
961 #ifdef CONFIG_RFS_ACCEL
962 edma_netdev
[i
]->features
|= NETIF_F_NTUPLE
| NETIF_F_RXHASH
;
963 edma_netdev
[i
]->hw_features
|= NETIF_F_NTUPLE
| NETIF_F_RXHASH
;
964 edma_netdev
[i
]->vlan_features
|= NETIF_F_NTUPLE
| NETIF_F_RXHASH
;
965 edma_netdev
[i
]->wanted_features
|= NETIF_F_NTUPLE
| NETIF_F_RXHASH
;
967 edma_set_ethtool_ops(edma_netdev
[i
]);
969 /* This just fill in some default MAC address
971 if (!is_valid_ether_addr(edma_netdev
[i
]->dev_addr
)) {
972 random_ether_addr(edma_netdev
[i
]->dev_addr
);
973 pr_info("EDMA using MAC@ - using");
974 pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
975 *(edma_netdev
[i
]->dev_addr
),
976 *(edma_netdev
[i
]->dev_addr
+ 1),
977 *(edma_netdev
[i
]->dev_addr
+ 2),
978 *(edma_netdev
[i
]->dev_addr
+ 3),
979 *(edma_netdev
[i
]->dev_addr
+ 4),
980 *(edma_netdev
[i
]->dev_addr
+ 5));
983 err
= register_netdev(edma_netdev
[i
]);
987 /* carrier off reporting is important to
988 * ethtool even BEFORE open
990 netif_carrier_off(edma_netdev
[i
]);
992 /* Allocate reverse irq cpu mapping structure for
995 #ifdef CONFIG_RFS_ACCEL
996 edma_netdev
[i
]->rx_cpu_rmap
=
997 alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE
);
998 if (!edma_netdev
[i
]->rx_cpu_rmap
) {
1000 goto err_rmap_alloc_fail
;
1005 for (i
= 0; i
< EDMA_MAX_PORTID_BITMAP_INDEX
; i
++)
1006 edma_cinfo
->portid_netdev_lookup_tbl
[i
] = NULL
;
1008 for_each_available_child_of_node(np
, pnp
) {
1009 const uint32_t *vlan_tag
= NULL
;
1012 /* this check is needed if parent and daughter dts have
1013 * different number of gmac nodes
1015 if (idx
== edma_cinfo
->num_gmac
)
1018 /* Populate port-id to netdev lookup table */
1019 vlan_tag
= of_get_property(pnp
, "vlan_tag", &len
);
1021 pr_err("Vlan tag parsing Failed.\n");
1022 goto err_rmap_alloc_fail
;
1025 adapter
[idx
]->default_vlan_tag
= of_read_number(vlan_tag
, 1);
1027 portid_bmp
= of_read_number(vlan_tag
, 1);
1028 adapter
[idx
]->dp_bitmap
= portid_bmp
;
1030 portid_bmp
= portid_bmp
>> 1; /* We ignore CPU Port bit 0 */
1031 while (portid_bmp
) {
1032 int port_bit
= ffs(portid_bmp
);
1034 if (port_bit
> EDMA_MAX_PORTID_SUPPORTED
)
1035 goto err_rmap_alloc_fail
;
1036 edma_cinfo
->portid_netdev_lookup_tbl
[port_bit
] =
1038 portid_bmp
&= ~(1 << (port_bit
- 1));
1041 if (!of_property_read_u32(pnp
, "qcom,poll_required",
1042 &adapter
[idx
]->poll_required
)) {
1043 if (adapter
[idx
]->poll_required
) {
1044 of_property_read_u32(pnp
, "qcom,phy_mdio_addr",
1045 &adapter
[idx
]->phy_mdio_addr
);
1046 of_property_read_u32(pnp
, "qcom,forced_speed",
1047 &adapter
[idx
]->forced_speed
);
1048 of_property_read_u32(pnp
, "qcom,forced_duplex",
1049 &adapter
[idx
]->forced_duplex
);
1051 /* create a phyid using MDIO bus id
1052 * and MDIO bus address
1054 snprintf(adapter
[idx
]->phy_id
,
1055 MII_BUS_ID_SIZE
+ 3, PHY_ID_FMT
,
1057 adapter
[idx
]->phy_mdio_addr
);
1060 adapter
[idx
]->poll_required
= 0;
1061 adapter
[idx
]->forced_speed
= SPEED_1000
;
1062 adapter
[idx
]->forced_duplex
= DUPLEX_FULL
;
1068 edma_cinfo
->edma_ctl_table_hdr
= register_net_sysctl(&init_net
,
1071 if (!edma_cinfo
->edma_ctl_table_hdr
) {
1072 dev_err(&pdev
->dev
, "edma sysctl table hdr not registered\n");
1073 goto err_unregister_sysctl_tbl
;
1076 /* Disable all 16 Tx and 8 rx irqs */
1077 edma_irq_disable(edma_cinfo
);
1079 err
= edma_reset(edma_cinfo
);
1085 /* populate per_core_info, do a napi_Add, request 16 TX irqs,
1086 * 8 RX irqs, do a napi enable
1088 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++) {
1091 edma_cinfo
->edma_percpu_info
[i
].napi
.state
= 0;
1093 netif_napi_add(edma_netdev
[0],
1094 &edma_cinfo
->edma_percpu_info
[i
].napi
,
1096 napi_enable(&edma_cinfo
->edma_percpu_info
[i
].napi
);
1097 edma_cinfo
->edma_percpu_info
[i
].tx_mask
= tx_mask
[i
];
1098 edma_cinfo
->edma_percpu_info
[i
].rx_mask
= EDMA_RX_PER_CPU_MASK
1099 << (i
<< EDMA_RX_PER_CPU_MASK_SHIFT
);
1100 edma_cinfo
->edma_percpu_info
[i
].tx_start
= tx_start
[i
];
1101 edma_cinfo
->edma_percpu_info
[i
].rx_start
=
1102 i
<< EDMA_RX_CPU_START_SHIFT
;
1103 rx_start
= i
<< EDMA_RX_CPU_START_SHIFT
;
1104 edma_cinfo
->edma_percpu_info
[i
].tx_status
= 0;
1105 edma_cinfo
->edma_percpu_info
[i
].rx_status
= 0;
1106 edma_cinfo
->edma_percpu_info
[i
].edma_cinfo
= edma_cinfo
;
1108 /* Request irq per core */
1109 for (j
= edma_cinfo
->edma_percpu_info
[i
].tx_start
;
1110 j
< tx_start
[i
] + 4; j
++) {
1111 sprintf(&edma_tx_irq
[j
][0], "edma_eth_tx%d", j
);
1112 err
= request_irq(edma_cinfo
->tx_irq
[j
],
1116 &edma_cinfo
->edma_percpu_info
[i
]);
1121 for (j
= edma_cinfo
->edma_percpu_info
[i
].rx_start
;
1123 ((edma_cinfo
->num_rx_queues
== 4) ? 1 : 2));
1125 sprintf(&edma_rx_irq
[j
][0], "edma_eth_rx%d", j
);
1126 err
= request_irq(edma_cinfo
->rx_irq
[j
],
1130 &edma_cinfo
->edma_percpu_info
[i
]);
1135 #ifdef CONFIG_RFS_ACCEL
1136 for (j
= edma_cinfo
->edma_percpu_info
[i
].rx_start
;
1137 j
< rx_start
+ 2; j
+= 2) {
1138 err
= irq_cpu_rmap_add(edma_netdev
[0]->rx_cpu_rmap
,
1139 edma_cinfo
->rx_irq
[j
]);
1141 goto err_rmap_add_fail
;
1146 /* Used to clear interrupt status, allocate rx buffer,
1147 * configure edma descriptors registers
1149 err
= edma_configure(edma_cinfo
);
1155 /* Configure RSS indirection table.
1156 * 128 hash will be configured in the following
1157 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1160 for (i
= 0; i
< EDMA_NUM_IDT
; i
++)
1161 edma_write_reg(EDMA_REG_RSS_IDT(i
), EDMA_RSS_IDT_VALUE
);
1163 /* Configure load balance mapping table.
1164 * 4 table entry will be configured according to the
1165 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1168 edma_write_reg(EDMA_REG_LB_RING
, EDMA_LB_REG_VALUE
);
1170 /* Configure Virtual queue for Tx rings
1171 * User can also change this value runtime through
1174 edma_write_reg(EDMA_REG_VQ_CTRL0
, EDMA_VQ_REG_VALUE
);
1175 edma_write_reg(EDMA_REG_VQ_CTRL1
, EDMA_VQ_REG_VALUE
);
1177 /* Configure Max AXI Burst write size to 128 bytes*/
1178 edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE
,
1179 EDMA_AXIW_MAXWRSIZE_VALUE
);
1181 /* Enable All 16 tx and 8 rx irq mask */
1182 edma_irq_enable(edma_cinfo
);
1183 edma_enable_tx_ctrl(&edma_cinfo
->hw
);
1184 edma_enable_rx_ctrl(&edma_cinfo
->hw
);
1186 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1187 if (adapter
[i
]->poll_required
) {
1188 #if LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0)
1189 phy_interface_t phy_mode
;
1191 err
= of_get_phy_mode(np
, &phy_mode
);
1193 phy_mode
= PHY_INTERFACE_MODE_SGMII
;
1195 int phy_mode
= of_get_phy_mode(np
);
1197 phy_mode
= PHY_INTERFACE_MODE_SGMII
;
1199 adapter
[i
]->phydev
=
1200 phy_connect(edma_netdev
[i
],
1201 (const char *)adapter
[i
]->phy_id
,
1204 if (IS_ERR(adapter
[i
]->phydev
)) {
1205 dev_dbg(&pdev
->dev
, "PHY attach FAIL");
1207 goto edma_phy_attach_fail
;
1209 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
1210 adapter
[i
]->phydev
->advertising
);
1211 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
1212 adapter
[i
]->phydev
->advertising
);
1213 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
1214 adapter
[i
]->phydev
->supported
);
1215 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
1216 adapter
[i
]->phydev
->supported
);
1219 adapter
[i
]->phydev
= NULL
;
1223 spin_lock_init(&edma_cinfo
->stats_lock
);
1225 timer_setup(&edma_cinfo
->edma_stats_timer
, edma_statistics_timer
, 0);
1226 mod_timer(&edma_cinfo
->edma_stats_timer
, jiffies
+ 1*HZ
);
1230 edma_phy_attach_fail
:
1233 #ifdef CONFIG_RFS_ACCEL
1234 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1235 free_irq_cpu_rmap(adapter
[i
]->netdev
->rx_cpu_rmap
);
1236 adapter
[i
]->netdev
->rx_cpu_rmap
= NULL
;
1240 edma_free_irqs(adapter
[0]);
1241 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++)
1242 napi_disable(&edma_cinfo
->edma_percpu_info
[i
].napi
);
1244 err_unregister_sysctl_tbl
:
1245 err_rmap_alloc_fail
:
1246 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++)
1247 unregister_netdev(edma_netdev
[i
]);
1249 err_single_phy_init
:
1250 iounmap(edma_cinfo
->ess_hw_addr
);
1251 clk_disable_unprepare(edma_cinfo
->ess_clk
);
1252 err_mdiobus_init_fail
:
1253 edma_free_rx_rings(edma_cinfo
);
1255 edma_free_tx_rings(edma_cinfo
);
1257 edma_free_queues(edma_cinfo
);
1260 iounmap(edma_cinfo
->hw
.hw_addr
);
1262 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1264 free_netdev(edma_netdev
[i
]);
1272 /* edma_axi_remove()
1273 * Device Removal Routine
1275 * edma_axi_remove is called by the platform subsystem to alert the driver
1276 * that it should release a platform device.
1278 static int edma_axi_remove(struct platform_device
*pdev
)
1280 struct edma_adapter
*adapter
= netdev_priv(edma_netdev
[0]);
1281 struct edma_common_info
*edma_cinfo
= adapter
->edma_cinfo
;
1282 struct edma_hw
*hw
= &edma_cinfo
->hw
;
1285 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++)
1286 unregister_netdev(edma_netdev
[i
]);
1288 edma_stop_rx_tx(hw
);
1289 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++)
1290 napi_disable(&edma_cinfo
->edma_percpu_info
[i
].napi
);
1292 edma_irq_disable(edma_cinfo
);
1293 edma_write_reg(EDMA_REG_RX_ISR
, 0xff);
1294 edma_write_reg(EDMA_REG_TX_ISR
, 0xffff);
1295 #ifdef CONFIG_RFS_ACCEL
1296 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1297 free_irq_cpu_rmap(edma_netdev
[i
]->rx_cpu_rmap
);
1298 edma_netdev
[i
]->rx_cpu_rmap
= NULL
;
1302 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1303 struct edma_adapter
*adapter
= netdev_priv(edma_netdev
[i
]);
1305 if (adapter
->phydev
)
1306 phy_disconnect(adapter
->phydev
);
1309 del_timer_sync(&edma_cinfo
->edma_stats_timer
);
1310 edma_free_irqs(adapter
);
1311 unregister_net_sysctl_table(edma_cinfo
->edma_ctl_table_hdr
);
1312 iounmap(edma_cinfo
->ess_hw_addr
);
1313 clk_disable_unprepare(edma_cinfo
->ess_clk
);
1314 edma_free_tx_resources(edma_cinfo
);
1315 edma_free_rx_resources(edma_cinfo
);
1316 edma_free_tx_rings(edma_cinfo
);
1317 edma_free_rx_rings(edma_cinfo
);
1318 edma_free_queues(edma_cinfo
);
1319 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++)
1320 free_netdev(edma_netdev
[i
]);
1327 static const struct of_device_id edma_of_mtable
[] = {
1328 {.compatible
= "qcom,ess-edma" },
1331 MODULE_DEVICE_TABLE(of
, edma_of_mtable
);
1333 static struct platform_driver edma_axi_driver
= {
1335 .name
= edma_axi_driver_name
,
1336 .of_match_table
= edma_of_mtable
,
1338 .probe
= edma_axi_probe
,
1339 .remove
= edma_axi_remove
,
1342 module_platform_driver(edma_axi_driver
);
1344 MODULE_AUTHOR("Qualcomm Atheros Inc");
1345 MODULE_DESCRIPTION("QCA ESS EDMA driver");
1346 MODULE_LICENSE("GPL");