2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/cpu_rmap.h>
18 #include <linux/of_net.h>
19 #include <linux/timer.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_address.h>
22 #include <linux/of_mdio.h>
23 #include <linux/clk.h>
24 #include <linux/string.h>
25 #include <linux/reset.h>
29 /* Weight round robin and virtual QID mask */
30 #define EDMA_WRR_VID_SCTL_MASK 0xffff
32 /* Weight round robin and virtual QID shift */
33 #define EDMA_WRR_VID_SCTL_SHIFT 16
35 char edma_axi_driver_name
[] = "ess_edma";
36 static const u32 default_msg
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
37 NETIF_MSG_LINK
| NETIF_MSG_TIMER
| NETIF_MSG_IFDOWN
| NETIF_MSG_IFUP
;
39 static u32 edma_hw_addr
;
41 char edma_tx_irq
[16][64];
42 char edma_rx_irq
[8][64];
43 struct net_device
*edma_netdev
[EDMA_MAX_PORTID_SUPPORTED
];
44 static u16 tx_start
[4] = {EDMA_TXQ_START_CORE0
, EDMA_TXQ_START_CORE1
,
45 EDMA_TXQ_START_CORE2
, EDMA_TXQ_START_CORE3
};
46 static u32 tx_mask
[4] = {EDMA_TXQ_IRQ_MASK_CORE0
, EDMA_TXQ_IRQ_MASK_CORE1
,
47 EDMA_TXQ_IRQ_MASK_CORE2
, EDMA_TXQ_IRQ_MASK_CORE3
};
49 static u32 edma_default_ltag __read_mostly
= EDMA_LAN_DEFAULT_VLAN
;
50 static u32 edma_default_wtag __read_mostly
= EDMA_WAN_DEFAULT_VLAN
;
51 static u32 edma_default_group1_vtag __read_mostly
= EDMA_DEFAULT_GROUP1_VLAN
;
52 static u32 edma_default_group2_vtag __read_mostly
= EDMA_DEFAULT_GROUP2_VLAN
;
53 static u32 edma_default_group3_vtag __read_mostly
= EDMA_DEFAULT_GROUP3_VLAN
;
54 static u32 edma_default_group4_vtag __read_mostly
= EDMA_DEFAULT_GROUP4_VLAN
;
55 static u32 edma_default_group5_vtag __read_mostly
= EDMA_DEFAULT_GROUP5_VLAN
;
56 static u32 edma_rss_idt_val
= EDMA_RSS_IDT_VALUE
;
57 static u32 edma_rss_idt_idx
;
59 static int edma_weight_assigned_to_q __read_mostly
;
60 static int edma_queue_to_virtual_q __read_mostly
;
61 static bool edma_enable_rstp __read_mostly
;
62 static int edma_athr_hdr_eth_type __read_mostly
;
65 module_param(page_mode
, int, 0);
66 MODULE_PARM_DESC(page_mode
, "enable page mode");
68 static int overwrite_mode
;
69 module_param(overwrite_mode
, int, 0);
70 MODULE_PARM_DESC(overwrite_mode
, "overwrite default page_mode setting");
72 static int jumbo_mru
= EDMA_RX_HEAD_BUFF_SIZE
;
73 module_param(jumbo_mru
, int, 0);
74 MODULE_PARM_DESC(jumbo_mru
, "enable fraglist support");
76 static int num_rxq
= 4;
77 module_param(num_rxq
, int, 0);
78 MODULE_PARM_DESC(num_rxq
, "change the number of rx queues");
80 void edma_write_reg(u16 reg_addr
, u32 reg_value
)
82 writel(reg_value
, ((void __iomem
*)(edma_hw_addr
+ reg_addr
)));
85 void edma_read_reg(u16 reg_addr
, volatile u32
*reg_value
)
87 *reg_value
= readl((void __iomem
*)(edma_hw_addr
+ reg_addr
));
90 static void ess_write_reg(struct edma_common_info
*edma
, u16 reg_addr
, u32 reg_value
)
92 writel(reg_value
, ((void __iomem
*)
93 ((unsigned long)edma
->ess_hw_addr
+ reg_addr
)));
96 static void ess_read_reg(struct edma_common_info
*edma
, u16 reg_addr
,
97 volatile u32
*reg_value
)
99 *reg_value
= readl((void __iomem
*)
100 ((unsigned long)edma
->ess_hw_addr
+ reg_addr
));
103 static int ess_reset(struct edma_common_info
*edma
)
105 struct device_node
*switch_node
= NULL
;
106 struct reset_control
*ess_rst
;
109 switch_node
= of_find_node_by_name(NULL
, "ess-switch");
111 pr_err("switch-node not found\n");
115 ess_rst
= of_reset_control_get(switch_node
, "ess_rst");
116 of_node_put(switch_node
);
118 if (IS_ERR(ess_rst
)) {
119 pr_err("failed to find ess_rst!\n");
123 reset_control_assert(ess_rst
);
125 reset_control_deassert(ess_rst
);
127 reset_control_put(ess_rst
);
129 /* Enable only port 5 <--> port 0
130 * bits 0:6 bitmap of ports it can fwd to */
131 #define SET_PORT_BMP(r,v) \
132 ess_read_reg(edma, r, ®val); \
133 ess_write_reg(edma, r, ((regval & ~0x3F) | v));
135 SET_PORT_BMP(ESS_PORT0_LOOKUP_CTRL
,0x20);
136 SET_PORT_BMP(ESS_PORT1_LOOKUP_CTRL
,0x00);
137 SET_PORT_BMP(ESS_PORT2_LOOKUP_CTRL
,0x00);
138 SET_PORT_BMP(ESS_PORT3_LOOKUP_CTRL
,0x00);
139 SET_PORT_BMP(ESS_PORT4_LOOKUP_CTRL
,0x00);
140 SET_PORT_BMP(ESS_PORT5_LOOKUP_CTRL
,0x01);
141 ess_write_reg(edma
, ESS_RGMII_CTRL
, 0x400);
142 ess_write_reg(edma
, ESS_PORT0_STATUS
, ESS_PORT_1G_FDX
);
143 ess_write_reg(edma
, ESS_PORT5_STATUS
, ESS_PORT_1G_FDX
);
144 ess_write_reg(edma
, ESS_PORT0_HEADER_CTRL
, 0);
147 /* forward multicast and broadcast frames to CPU */
148 ess_write_reg(edma
, ESS_FWD_CTRL1
,
149 (ESS_PORTS_ALL
<< ESS_FWD_CTRL1_UC_FLOOD_S
) |
150 (ESS_PORTS_ALL
<< ESS_FWD_CTRL1_MC_FLOOD_S
) |
151 (ESS_PORTS_ALL
<< ESS_FWD_CTRL1_BC_FLOOD_S
));
156 void ess_set_port_status_speed(struct edma_common_info
*edma
,
157 struct phy_device
*phydev
, uint8_t port_id
)
159 uint16_t reg_off
= ESS_PORT0_STATUS
+ (4 * port_id
);
160 uint32_t reg_val
= 0;
162 ess_read_reg(edma
, reg_off
, ®_val
);
164 /* reset the speed bits [0:1] */
165 reg_val
&= ~ESS_PORT_STATUS_SPEED_INV
;
167 /* set the new speed */
168 switch(phydev
->speed
) {
169 case SPEED_1000
: reg_val
|= ESS_PORT_STATUS_SPEED_1000
; break;
170 case SPEED_100
: reg_val
|= ESS_PORT_STATUS_SPEED_100
; break;
171 case SPEED_10
: reg_val
|= ESS_PORT_STATUS_SPEED_10
; break;
172 default: reg_val
|= ESS_PORT_STATUS_SPEED_INV
; break;
175 /* check full/half duplex */
176 if (phydev
->duplex
) {
177 reg_val
|= ESS_PORT_STATUS_DUPLEX_MODE
;
179 reg_val
&= ~ESS_PORT_STATUS_DUPLEX_MODE
;
182 ess_write_reg(edma
, reg_off
, reg_val
);
185 /* edma_change_tx_coalesce()
186 * change tx interrupt moderation timer
188 void edma_change_tx_coalesce(int usecs
)
192 /* Here, we right shift the value from the user by 1, this is
193 * done because IMT resolution timer is 2usecs. 1 count
194 * of this register corresponds to 2 usecs.
196 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, ®_value
);
197 reg_value
= ((reg_value
& 0xffff) | ((usecs
>> 1) << 16));
198 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, reg_value
);
201 /* edma_change_rx_coalesce()
202 * change rx interrupt moderation timer
204 void edma_change_rx_coalesce(int usecs
)
208 /* Here, we right shift the value from the user by 1, this is
209 * done because IMT resolution timer is 2usecs. 1 count
210 * of this register corresponds to 2 usecs.
212 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, ®_value
);
213 reg_value
= ((reg_value
& 0xffff0000) | (usecs
>> 1));
214 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, reg_value
);
217 /* edma_get_tx_rx_coalesce()
218 * Get tx/rx interrupt moderation value
220 void edma_get_tx_rx_coalesce(u32
*reg_val
)
222 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, reg_val
);
225 void edma_read_append_stats(struct edma_common_info
*edma_cinfo
)
231 spin_lock_bh(&edma_cinfo
->stats_lock
);
232 p
= (uint32_t *)&(edma_cinfo
->edma_ethstats
);
234 for (i
= 0; i
< EDMA_MAX_TRANSMIT_QUEUE
; i
++) {
235 edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i
), &stat
);
240 for (i
= 0; i
< EDMA_MAX_TRANSMIT_QUEUE
; i
++) {
241 edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i
), &stat
);
246 for (i
= 0; i
< EDMA_MAX_RECEIVE_QUEUE
; i
++) {
247 edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i
), &stat
);
252 for (i
= 0; i
< EDMA_MAX_RECEIVE_QUEUE
; i
++) {
253 edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i
), &stat
);
258 spin_unlock_bh(&edma_cinfo
->stats_lock
);
261 static void edma_statistics_timer(struct timer_list
*t
)
263 struct edma_common_info
*edma_cinfo
=
264 from_timer(edma_cinfo
, t
, edma_stats_timer
);
266 edma_read_append_stats(edma_cinfo
);
268 mod_timer(&edma_cinfo
->edma_stats_timer
, jiffies
+ 1*HZ
);
271 static int edma_enable_stp_rstp(struct ctl_table
*table
, int write
,
272 void __user
*buffer
, size_t *lenp
,
277 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
279 edma_set_stp_rstp(edma_enable_rstp
);
284 static int edma_ath_hdr_eth_type(struct ctl_table
*table
, int write
,
285 void __user
*buffer
, size_t *lenp
,
290 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
292 edma_assign_ath_hdr_type(edma_athr_hdr_eth_type
);
297 static int edma_change_default_lan_vlan(struct ctl_table
*table
, int write
,
298 void __user
*buffer
, size_t *lenp
,
301 struct edma_adapter
*adapter
;
304 if (!edma_netdev
[1]) {
305 pr_err("Netdevice for default_lan does not exist\n");
309 adapter
= netdev_priv(edma_netdev
[1]);
311 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
314 adapter
->default_vlan_tag
= edma_default_ltag
;
319 static int edma_change_default_wan_vlan(struct ctl_table
*table
, int write
,
320 void __user
*buffer
, size_t *lenp
,
323 struct edma_adapter
*adapter
;
326 if (!edma_netdev
[0]) {
327 pr_err("Netdevice for default_wan does not exist\n");
331 adapter
= netdev_priv(edma_netdev
[0]);
333 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
336 adapter
->default_vlan_tag
= edma_default_wtag
;
341 static int edma_change_group1_vtag(struct ctl_table
*table
, int write
,
342 void __user
*buffer
, size_t *lenp
,
345 struct edma_adapter
*adapter
;
346 struct edma_common_info
*edma_cinfo
;
349 if (!edma_netdev
[0]) {
350 pr_err("Netdevice for Group 1 does not exist\n");
354 adapter
= netdev_priv(edma_netdev
[0]);
355 edma_cinfo
= adapter
->edma_cinfo
;
357 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
360 adapter
->default_vlan_tag
= edma_default_group1_vtag
;
365 static int edma_change_group2_vtag(struct ctl_table
*table
, int write
,
366 void __user
*buffer
, size_t *lenp
,
369 struct edma_adapter
*adapter
;
370 struct edma_common_info
*edma_cinfo
;
373 if (!edma_netdev
[1]) {
374 pr_err("Netdevice for Group 2 does not exist\n");
378 adapter
= netdev_priv(edma_netdev
[1]);
379 edma_cinfo
= adapter
->edma_cinfo
;
381 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
384 adapter
->default_vlan_tag
= edma_default_group2_vtag
;
389 static int edma_change_group3_vtag(struct ctl_table
*table
, int write
,
390 void __user
*buffer
, size_t *lenp
,
393 struct edma_adapter
*adapter
;
394 struct edma_common_info
*edma_cinfo
;
397 if (!edma_netdev
[2]) {
398 pr_err("Netdevice for Group 3 does not exist\n");
402 adapter
= netdev_priv(edma_netdev
[2]);
403 edma_cinfo
= adapter
->edma_cinfo
;
405 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
408 adapter
->default_vlan_tag
= edma_default_group3_vtag
;
413 static int edma_change_group4_vtag(struct ctl_table
*table
, int write
,
414 void __user
*buffer
, size_t *lenp
,
417 struct edma_adapter
*adapter
;
418 struct edma_common_info
*edma_cinfo
;
421 if (!edma_netdev
[3]) {
422 pr_err("Netdevice for Group 4 does not exist\n");
426 adapter
= netdev_priv(edma_netdev
[3]);
427 edma_cinfo
= adapter
->edma_cinfo
;
429 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
432 adapter
->default_vlan_tag
= edma_default_group4_vtag
;
437 static int edma_change_group5_vtag(struct ctl_table
*table
, int write
,
438 void __user
*buffer
, size_t *lenp
,
441 struct edma_adapter
*adapter
;
442 struct edma_common_info
*edma_cinfo
;
445 if (!edma_netdev
[4]) {
446 pr_err("Netdevice for Group 5 does not exist\n");
450 adapter
= netdev_priv(edma_netdev
[4]);
451 edma_cinfo
= adapter
->edma_cinfo
;
453 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
456 adapter
->default_vlan_tag
= edma_default_group5_vtag
;
461 static int edma_set_rss_idt_value(struct ctl_table
*table
, int write
,
462 void __user
*buffer
, size_t *lenp
,
467 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
469 edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx
),
474 static int edma_set_rss_idt_idx(struct ctl_table
*table
, int write
,
475 void __user
*buffer
, size_t *lenp
,
479 u32 old_value
= edma_rss_idt_idx
;
481 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
485 if (edma_rss_idt_idx
>= EDMA_NUM_IDT
) {
486 pr_err("Invalid RSS indirection table index %d\n",
488 edma_rss_idt_idx
= old_value
;
494 static int edma_weight_assigned_to_queues(struct ctl_table
*table
, int write
,
495 void __user
*buffer
, size_t *lenp
,
498 int ret
, queue_id
, weight
;
499 u32 reg_data
, data
, reg_addr
;
501 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
503 queue_id
= edma_weight_assigned_to_q
& EDMA_WRR_VID_SCTL_MASK
;
504 if (queue_id
< 0 || queue_id
> 15) {
505 pr_err("queue_id not within desired range\n");
509 weight
= edma_weight_assigned_to_q
>> EDMA_WRR_VID_SCTL_SHIFT
;
510 if (weight
< 0 || weight
> 0xF) {
511 pr_err("queue_id not within desired range\n");
515 data
= weight
<< EDMA_WRR_SHIFT(queue_id
);
517 reg_addr
= EDMA_REG_WRR_CTRL_Q0_Q3
+ (queue_id
& ~0x3);
518 edma_read_reg(reg_addr
, ®_data
);
519 reg_data
&= ~(1 << EDMA_WRR_SHIFT(queue_id
));
520 edma_write_reg(reg_addr
, data
| reg_data
);
526 static int edma_queue_to_virtual_queue_map(struct ctl_table
*table
, int write
,
527 void __user
*buffer
, size_t *lenp
,
530 int ret
, queue_id
, virtual_qid
;
531 u32 reg_data
, data
, reg_addr
;
533 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
535 queue_id
= edma_queue_to_virtual_q
& EDMA_WRR_VID_SCTL_MASK
;
536 if (queue_id
< 0 || queue_id
> 15) {
537 pr_err("queue_id not within desired range\n");
541 virtual_qid
= edma_queue_to_virtual_q
>>
542 EDMA_WRR_VID_SCTL_SHIFT
;
543 if (virtual_qid
< 0 || virtual_qid
> 8) {
544 pr_err("queue_id not within desired range\n");
548 data
= virtual_qid
<< EDMA_VQ_ID_SHIFT(queue_id
);
550 reg_addr
= EDMA_REG_VQ_CTRL0
+ (queue_id
& ~0x3);
551 edma_read_reg(reg_addr
, ®_data
);
552 reg_data
&= ~(1 << EDMA_VQ_ID_SHIFT(queue_id
));
553 edma_write_reg(reg_addr
, data
| reg_data
);
559 static struct ctl_table edma_table
[] = {
561 .procname
= "default_lan_tag",
562 .data
= &edma_default_ltag
,
563 .maxlen
= sizeof(int),
565 .proc_handler
= edma_change_default_lan_vlan
568 .procname
= "default_wan_tag",
569 .data
= &edma_default_wtag
,
570 .maxlen
= sizeof(int),
572 .proc_handler
= edma_change_default_wan_vlan
575 .procname
= "weight_assigned_to_queues",
576 .data
= &edma_weight_assigned_to_q
,
577 .maxlen
= sizeof(int),
579 .proc_handler
= edma_weight_assigned_to_queues
582 .procname
= "queue_to_virtual_queue_map",
583 .data
= &edma_queue_to_virtual_q
,
584 .maxlen
= sizeof(int),
586 .proc_handler
= edma_queue_to_virtual_queue_map
589 .procname
= "enable_stp_rstp",
590 .data
= &edma_enable_rstp
,
591 .maxlen
= sizeof(int),
593 .proc_handler
= edma_enable_stp_rstp
596 .procname
= "athr_hdr_eth_type",
597 .data
= &edma_athr_hdr_eth_type
,
598 .maxlen
= sizeof(int),
600 .proc_handler
= edma_ath_hdr_eth_type
603 .procname
= "default_group1_vlan_tag",
604 .data
= &edma_default_group1_vtag
,
605 .maxlen
= sizeof(int),
607 .proc_handler
= edma_change_group1_vtag
610 .procname
= "default_group2_vlan_tag",
611 .data
= &edma_default_group2_vtag
,
612 .maxlen
= sizeof(int),
614 .proc_handler
= edma_change_group2_vtag
617 .procname
= "default_group3_vlan_tag",
618 .data
= &edma_default_group3_vtag
,
619 .maxlen
= sizeof(int),
621 .proc_handler
= edma_change_group3_vtag
624 .procname
= "default_group4_vlan_tag",
625 .data
= &edma_default_group4_vtag
,
626 .maxlen
= sizeof(int),
628 .proc_handler
= edma_change_group4_vtag
631 .procname
= "default_group5_vlan_tag",
632 .data
= &edma_default_group5_vtag
,
633 .maxlen
= sizeof(int),
635 .proc_handler
= edma_change_group5_vtag
638 .procname
= "edma_rss_idt_value",
639 .data
= &edma_rss_idt_val
,
640 .maxlen
= sizeof(int),
642 .proc_handler
= edma_set_rss_idt_value
645 .procname
= "edma_rss_idt_idx",
646 .data
= &edma_rss_idt_idx
,
647 .maxlen
= sizeof(int),
649 .proc_handler
= edma_set_rss_idt_idx
654 static int ess_parse(struct edma_common_info
*edma
)
656 struct device_node
*switch_node
;
659 switch_node
= of_find_node_by_name(NULL
, "ess-switch");
661 pr_err("cannot find ess-switch node\n");
665 edma
->ess_hw_addr
= of_io_request_and_map(switch_node
,
667 if (!edma
->ess_hw_addr
) {
668 pr_err("%s ioremap fail.", __func__
);
672 edma
->ess_clk
= of_clk_get_by_name(switch_node
, "ess_clk");
673 ret
= clk_prepare_enable(edma
->ess_clk
);
675 of_node_put(switch_node
);
679 /* edma_axi_netdev_ops
680 * Describe the operations supported by registered netdevices
682 * static const struct net_device_ops edma_axi_netdev_ops = {
683 * .ndo_open = edma_open,
684 * .ndo_stop = edma_close,
685 * .ndo_start_xmit = edma_xmit_frame,
686 * .ndo_set_mac_address = edma_set_mac_addr,
689 static const struct net_device_ops edma_axi_netdev_ops
= {
690 .ndo_open
= edma_open
,
691 .ndo_stop
= edma_close
,
692 .ndo_start_xmit
= edma_xmit
,
693 .ndo_set_mac_address
= edma_set_mac_addr
,
694 #ifdef CONFIG_RFS_ACCEL
695 .ndo_rx_flow_steer
= edma_rx_flow_steer
,
696 .ndo_register_rfs_filter
= edma_register_rfs_filter
,
697 .ndo_get_default_vlan_tag
= edma_get_default_vlan_tag
,
699 .ndo_get_stats
= edma_get_stats
,
703 * Initialise an adapter identified by a platform_device structure.
705 * The OS initialization, configuring of the adapter private structure,
706 * and a hardware reset occur in the probe.
708 static int edma_axi_probe(struct platform_device
*pdev
)
710 struct edma_common_info
*edma_cinfo
;
712 struct edma_adapter
*adapter
[EDMA_MAX_PORTID_SUPPORTED
];
713 struct resource
*res
;
714 struct device_node
*np
= pdev
->dev
.of_node
;
715 struct device_node
*pnp
;
716 struct device_node
*mdio_node
= NULL
;
717 struct mii_bus
*miibus
= NULL
;
718 int i
, j
, k
, err
= 0;
720 int idx
= 0, idx_mac
= 0;
722 if (CONFIG_NR_CPUS
!= EDMA_CPU_CORES_SUPPORTED
) {
723 dev_err(&pdev
->dev
, "Invalid CPU Cores\n");
727 if ((num_rxq
!= 4) && (num_rxq
!= 8)) {
728 dev_err(&pdev
->dev
, "Invalid RX queue, edma probe failed\n");
731 edma_cinfo
= kzalloc(sizeof(struct edma_common_info
), GFP_KERNEL
);
737 edma_cinfo
->pdev
= pdev
;
739 of_property_read_u32(np
, "qcom,num_gmac", &edma_cinfo
->num_gmac
);
740 if (edma_cinfo
->num_gmac
> EDMA_MAX_PORTID_SUPPORTED
) {
741 pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
746 /* Initialize the netdev array before allocation
747 * to avoid double free
749 for (i
= 0 ; i
< edma_cinfo
->num_gmac
; i
++)
750 edma_netdev
[i
] = NULL
;
752 for (i
= 0 ; i
< edma_cinfo
->num_gmac
; i
++) {
753 edma_netdev
[i
] = alloc_etherdev_mqs(sizeof(struct edma_adapter
),
754 EDMA_NETDEV_TX_QUEUE
, EDMA_NETDEV_RX_QUEUE
);
756 if (!edma_netdev
[i
]) {
758 "net device alloc fails for index=%d\n", i
);
763 SET_NETDEV_DEV(edma_netdev
[i
], &pdev
->dev
);
764 platform_set_drvdata(pdev
, edma_netdev
[i
]);
765 edma_cinfo
->netdev
[i
] = edma_netdev
[i
];
768 /* Fill ring details */
769 edma_cinfo
->num_tx_queues
= EDMA_MAX_TRANSMIT_QUEUE
;
770 edma_cinfo
->num_txq_per_core
= (EDMA_MAX_TRANSMIT_QUEUE
/ 4);
771 edma_cinfo
->tx_ring_count
= EDMA_TX_RING_SIZE
;
773 /* Update num rx queues based on module parameter */
774 edma_cinfo
->num_rx_queues
= num_rxq
;
775 edma_cinfo
->num_rxq_per_core
= ((num_rxq
== 4) ? 1 : 2);
777 edma_cinfo
->rx_ring_count
= EDMA_RX_RING_SIZE
;
779 hw
= &edma_cinfo
->hw
;
781 /* Fill HW defaults */
782 hw
->tx_intr_mask
= EDMA_TX_IMR_NORMAL_MASK
;
783 hw
->rx_intr_mask
= EDMA_RX_IMR_NORMAL_MASK
;
785 of_property_read_u32(np
, "qcom,page-mode", &edma_cinfo
->page_mode
);
786 of_property_read_u32(np
, "qcom,rx_head_buf_size",
787 &hw
->rx_head_buff_size
);
789 if (overwrite_mode
) {
790 dev_info(&pdev
->dev
, "page mode overwritten");
791 edma_cinfo
->page_mode
= page_mode
;
795 edma_cinfo
->fraglist_mode
= 1;
797 if (edma_cinfo
->page_mode
)
798 hw
->rx_head_buff_size
= EDMA_RX_HEAD_BUFF_SIZE_JUMBO
;
799 else if (edma_cinfo
->fraglist_mode
)
800 hw
->rx_head_buff_size
= jumbo_mru
;
801 else if (!hw
->rx_head_buff_size
)
802 hw
->rx_head_buff_size
= EDMA_RX_HEAD_BUFF_SIZE
;
804 hw
->misc_intr_mask
= 0;
805 hw
->wol_intr_mask
= 0;
807 hw
->intr_clear_type
= EDMA_INTR_CLEAR_TYPE
;
808 hw
->intr_sw_idx_w
= EDMA_INTR_SW_IDX_W_TYPE
;
810 /* configure RSS type to the different protocol that can be
813 hw
->rss_type
= EDMA_RSS_TYPE_IPV4TCP
| EDMA_RSS_TYPE_IPV6_TCP
|
814 EDMA_RSS_TYPE_IPV4_UDP
| EDMA_RSS_TYPE_IPV6UDP
|
815 EDMA_RSS_TYPE_IPV4
| EDMA_RSS_TYPE_IPV6
;
817 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
819 edma_cinfo
->hw
.hw_addr
= devm_ioremap_resource(&pdev
->dev
, res
);
820 if (IS_ERR(edma_cinfo
->hw
.hw_addr
)) {
821 err
= PTR_ERR(edma_cinfo
->hw
.hw_addr
);
825 edma_hw_addr
= (u32
)edma_cinfo
->hw
.hw_addr
;
827 /* Parse tx queue interrupt number from device tree */
828 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++)
829 edma_cinfo
->tx_irq
[i
] = platform_get_irq(pdev
, i
);
831 /* Parse rx queue interrupt number from device tree
832 * Here we are setting j to point to the point where we
833 * left tx interrupt parsing(i.e 16) and run run the loop
834 * from 0 to 7 to parse rx interrupt number.
836 for (i
= 0, j
= edma_cinfo
->num_tx_queues
, k
= 0;
837 i
< edma_cinfo
->num_rx_queues
; i
++) {
838 edma_cinfo
->rx_irq
[k
] = platform_get_irq(pdev
, j
);
839 k
+= ((num_rxq
== 4) ? 2 : 1);
840 j
+= ((num_rxq
== 4) ? 2 : 1);
843 edma_cinfo
->rx_head_buffer_len
= edma_cinfo
->hw
.rx_head_buff_size
;
844 edma_cinfo
->rx_page_buffer_len
= PAGE_SIZE
;
846 err
= edma_alloc_queues_tx(edma_cinfo
);
848 dev_err(&pdev
->dev
, "Allocation of TX queue failed\n");
852 err
= edma_alloc_queues_rx(edma_cinfo
);
854 dev_err(&pdev
->dev
, "Allocation of RX queue failed\n");
858 err
= edma_alloc_tx_rings(edma_cinfo
);
860 dev_err(&pdev
->dev
, "Allocation of TX resources failed\n");
864 err
= edma_alloc_rx_rings(edma_cinfo
);
866 dev_err(&pdev
->dev
, "Allocation of RX resources failed\n");
870 /* Initialize netdev and netdev bitmap for transmit descriptor rings */
871 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
872 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[i
];
875 etdr
->netdev_bmp
= 0;
876 for (j
= 0; j
< EDMA_MAX_NETDEV_PER_QUEUE
; j
++) {
877 etdr
->netdev
[j
] = NULL
;
882 if (of_property_read_bool(np
, "qcom,mdio_supported")) {
883 mdio_node
= of_find_compatible_node(NULL
, NULL
,
884 "qcom,ipq4019-mdio");
886 dev_err(&pdev
->dev
, "cannot find mdio node by phandle");
888 goto err_mdiobus_init_fail
;
891 miibus
= of_mdio_find_bus(mdio_node
);
896 if (of_property_read_bool(np
, "qcom,single-phy") &&
897 edma_cinfo
->num_gmac
== 1) {
898 err
= ess_parse(edma_cinfo
);
900 err
= ess_reset(edma_cinfo
);
902 goto err_single_phy_init
;
904 edma_cinfo
->is_single_phy
= true;
907 for_each_available_child_of_node(np
, pnp
) {
908 const char *mac_addr
;
910 /* this check is needed if parent and daughter dts have
911 * different number of gmac nodes
913 if (idx_mac
== edma_cinfo
->num_gmac
) {
918 mac_addr
= of_get_mac_address(pnp
);
919 if (!IS_ERR(mac_addr
))
920 memcpy(edma_netdev
[idx_mac
]->dev_addr
, mac_addr
, ETH_ALEN
);
925 /* Populate the adapter structure register the netdevice */
926 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
929 adapter
[i
] = netdev_priv(edma_netdev
[i
]);
930 adapter
[i
]->netdev
= edma_netdev
[i
];
931 adapter
[i
]->pdev
= pdev
;
932 for (j
= 0; j
< CONFIG_NR_CPUS
; j
++) {
934 adapter
[i
]->tx_start_offset
[j
] =
935 ((j
<< EDMA_TX_CPU_START_SHIFT
) + (m
<< 1));
936 /* Share the queues with available net-devices.
937 * For instance , with 5 net-devices
938 * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
939 * and eth1/eth3 will get the remaining.
941 for (k
= adapter
[i
]->tx_start_offset
[j
]; k
<
942 (adapter
[i
]->tx_start_offset
[j
] + 2); k
++) {
943 if (edma_fill_netdev(edma_cinfo
, k
, i
, j
)) {
944 pr_err("Netdev overflow Error\n");
950 adapter
[i
]->edma_cinfo
= edma_cinfo
;
951 edma_netdev
[i
]->netdev_ops
= &edma_axi_netdev_ops
;
952 edma_netdev
[i
]->max_mtu
= 9000;
953 edma_netdev
[i
]->features
= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
954 | NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_SG
|
955 NETIF_F_TSO
| NETIF_F_GRO
| NETIF_F_HW_VLAN_CTAG_TX
;
956 edma_netdev
[i
]->hw_features
= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
|
957 NETIF_F_HW_VLAN_CTAG_RX
958 | NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_GRO
;
959 edma_netdev
[i
]->vlan_features
= NETIF_F_HW_CSUM
| NETIF_F_SG
|
960 NETIF_F_TSO
| NETIF_F_GRO
;
961 edma_netdev
[i
]->wanted_features
= NETIF_F_HW_CSUM
| NETIF_F_SG
|
962 NETIF_F_TSO
| NETIF_F_GRO
;
964 #ifdef CONFIG_RFS_ACCEL
965 edma_netdev
[i
]->features
|= NETIF_F_NTUPLE
| NETIF_F_RXHASH
;
966 edma_netdev
[i
]->hw_features
|= NETIF_F_NTUPLE
| NETIF_F_RXHASH
;
967 edma_netdev
[i
]->vlan_features
|= NETIF_F_NTUPLE
| NETIF_F_RXHASH
;
968 edma_netdev
[i
]->wanted_features
|= NETIF_F_NTUPLE
| NETIF_F_RXHASH
;
970 edma_set_ethtool_ops(edma_netdev
[i
]);
972 /* This just fill in some default MAC address
974 if (!is_valid_ether_addr(edma_netdev
[i
]->dev_addr
)) {
975 random_ether_addr(edma_netdev
[i
]->dev_addr
);
976 pr_info("EDMA using MAC@ - using");
977 pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
978 *(edma_netdev
[i
]->dev_addr
),
979 *(edma_netdev
[i
]->dev_addr
+ 1),
980 *(edma_netdev
[i
]->dev_addr
+ 2),
981 *(edma_netdev
[i
]->dev_addr
+ 3),
982 *(edma_netdev
[i
]->dev_addr
+ 4),
983 *(edma_netdev
[i
]->dev_addr
+ 5));
986 err
= register_netdev(edma_netdev
[i
]);
990 /* carrier off reporting is important to
991 * ethtool even BEFORE open
993 netif_carrier_off(edma_netdev
[i
]);
995 /* Allocate reverse irq cpu mapping structure for
998 #ifdef CONFIG_RFS_ACCEL
999 edma_netdev
[i
]->rx_cpu_rmap
=
1000 alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE
);
1001 if (!edma_netdev
[i
]->rx_cpu_rmap
) {
1003 goto err_rmap_alloc_fail
;
1008 for (i
= 0; i
< EDMA_MAX_PORTID_BITMAP_INDEX
; i
++)
1009 edma_cinfo
->portid_netdev_lookup_tbl
[i
] = NULL
;
1011 for_each_available_child_of_node(np
, pnp
) {
1012 const uint32_t *vlan_tag
= NULL
;
1015 /* this check is needed if parent and daughter dts have
1016 * different number of gmac nodes
1018 if (idx
== edma_cinfo
->num_gmac
)
1021 /* Populate port-id to netdev lookup table */
1022 vlan_tag
= of_get_property(pnp
, "vlan_tag", &len
);
1024 pr_err("Vlan tag parsing Failed.\n");
1025 goto err_rmap_alloc_fail
;
1028 adapter
[idx
]->default_vlan_tag
= of_read_number(vlan_tag
, 1);
1030 portid_bmp
= of_read_number(vlan_tag
, 1);
1031 adapter
[idx
]->dp_bitmap
= portid_bmp
;
1033 portid_bmp
= portid_bmp
>> 1; /* We ignore CPU Port bit 0 */
1034 while (portid_bmp
) {
1035 int port_bit
= ffs(portid_bmp
);
1037 if (port_bit
> EDMA_MAX_PORTID_SUPPORTED
)
1038 goto err_rmap_alloc_fail
;
1039 edma_cinfo
->portid_netdev_lookup_tbl
[port_bit
] =
1041 portid_bmp
&= ~(1 << (port_bit
- 1));
1044 if (!of_property_read_u32(pnp
, "qcom,poll_required",
1045 &adapter
[idx
]->poll_required
)) {
1046 if (adapter
[idx
]->poll_required
) {
1047 of_property_read_u32(pnp
, "qcom,phy_mdio_addr",
1048 &adapter
[idx
]->phy_mdio_addr
);
1049 of_property_read_u32(pnp
, "qcom,forced_speed",
1050 &adapter
[idx
]->forced_speed
);
1051 of_property_read_u32(pnp
, "qcom,forced_duplex",
1052 &adapter
[idx
]->forced_duplex
);
1054 /* create a phyid using MDIO bus id
1055 * and MDIO bus address
1057 snprintf(adapter
[idx
]->phy_id
,
1058 MII_BUS_ID_SIZE
+ 3, PHY_ID_FMT
,
1060 adapter
[idx
]->phy_mdio_addr
);
1063 adapter
[idx
]->poll_required
= 0;
1064 adapter
[idx
]->forced_speed
= SPEED_1000
;
1065 adapter
[idx
]->forced_duplex
= DUPLEX_FULL
;
1071 edma_cinfo
->edma_ctl_table_hdr
= register_net_sysctl(&init_net
,
1074 if (!edma_cinfo
->edma_ctl_table_hdr
) {
1075 dev_err(&pdev
->dev
, "edma sysctl table hdr not registered\n");
1076 goto err_unregister_sysctl_tbl
;
1079 /* Disable all 16 Tx and 8 rx irqs */
1080 edma_irq_disable(edma_cinfo
);
1082 err
= edma_reset(edma_cinfo
);
1088 /* populate per_core_info, do a napi_Add, request 16 TX irqs,
1089 * 8 RX irqs, do a napi enable
1091 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++) {
1094 edma_cinfo
->edma_percpu_info
[i
].napi
.state
= 0;
1096 netif_napi_add(edma_netdev
[0],
1097 &edma_cinfo
->edma_percpu_info
[i
].napi
,
1099 napi_enable(&edma_cinfo
->edma_percpu_info
[i
].napi
);
1100 edma_cinfo
->edma_percpu_info
[i
].tx_mask
= tx_mask
[i
];
1101 edma_cinfo
->edma_percpu_info
[i
].rx_mask
= EDMA_RX_PER_CPU_MASK
1102 << (i
<< EDMA_RX_PER_CPU_MASK_SHIFT
);
1103 edma_cinfo
->edma_percpu_info
[i
].tx_start
= tx_start
[i
];
1104 edma_cinfo
->edma_percpu_info
[i
].rx_start
=
1105 i
<< EDMA_RX_CPU_START_SHIFT
;
1106 rx_start
= i
<< EDMA_RX_CPU_START_SHIFT
;
1107 edma_cinfo
->edma_percpu_info
[i
].tx_status
= 0;
1108 edma_cinfo
->edma_percpu_info
[i
].rx_status
= 0;
1109 edma_cinfo
->edma_percpu_info
[i
].edma_cinfo
= edma_cinfo
;
1111 /* Request irq per core */
1112 for (j
= edma_cinfo
->edma_percpu_info
[i
].tx_start
;
1113 j
< tx_start
[i
] + 4; j
++) {
1114 sprintf(&edma_tx_irq
[j
][0], "edma_eth_tx%d", j
);
1115 err
= request_irq(edma_cinfo
->tx_irq
[j
],
1119 &edma_cinfo
->edma_percpu_info
[i
]);
1124 for (j
= edma_cinfo
->edma_percpu_info
[i
].rx_start
;
1126 ((edma_cinfo
->num_rx_queues
== 4) ? 1 : 2));
1128 sprintf(&edma_rx_irq
[j
][0], "edma_eth_rx%d", j
);
1129 err
= request_irq(edma_cinfo
->rx_irq
[j
],
1133 &edma_cinfo
->edma_percpu_info
[i
]);
1138 #ifdef CONFIG_RFS_ACCEL
1139 for (j
= edma_cinfo
->edma_percpu_info
[i
].rx_start
;
1140 j
< rx_start
+ 2; j
+= 2) {
1141 err
= irq_cpu_rmap_add(edma_netdev
[0]->rx_cpu_rmap
,
1142 edma_cinfo
->rx_irq
[j
]);
1144 goto err_rmap_add_fail
;
1149 /* Used to clear interrupt status, allocate rx buffer,
1150 * configure edma descriptors registers
1152 err
= edma_configure(edma_cinfo
);
1158 /* Configure RSS indirection table.
1159 * 128 hash will be configured in the following
1160 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1163 for (i
= 0; i
< EDMA_NUM_IDT
; i
++)
1164 edma_write_reg(EDMA_REG_RSS_IDT(i
), EDMA_RSS_IDT_VALUE
);
1166 /* Configure load balance mapping table.
1167 * 4 table entry will be configured according to the
1168 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1171 edma_write_reg(EDMA_REG_LB_RING
, EDMA_LB_REG_VALUE
);
1173 /* Configure Virtual queue for Tx rings
1174 * User can also change this value runtime through
1177 edma_write_reg(EDMA_REG_VQ_CTRL0
, EDMA_VQ_REG_VALUE
);
1178 edma_write_reg(EDMA_REG_VQ_CTRL1
, EDMA_VQ_REG_VALUE
);
1180 /* Configure Max AXI Burst write size to 128 bytes*/
1181 edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE
,
1182 EDMA_AXIW_MAXWRSIZE_VALUE
);
1184 /* Enable All 16 tx and 8 rx irq mask */
1185 edma_irq_enable(edma_cinfo
);
1186 edma_enable_tx_ctrl(&edma_cinfo
->hw
);
1187 edma_enable_rx_ctrl(&edma_cinfo
->hw
);
1189 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1190 if (adapter
[i
]->poll_required
) {
1191 int phy_mode
= of_get_phy_mode(np
);
1194 phy_mode
= PHY_INTERFACE_MODE_SGMII
;
1195 adapter
[i
]->phydev
=
1196 phy_connect(edma_netdev
[i
],
1197 (const char *)adapter
[i
]->phy_id
,
1200 if (IS_ERR(adapter
[i
]->phydev
)) {
1201 dev_dbg(&pdev
->dev
, "PHY attach FAIL");
1203 goto edma_phy_attach_fail
;
1205 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
1206 adapter
[i
]->phydev
->advertising
);
1207 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
1208 adapter
[i
]->phydev
->advertising
);
1209 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
1210 adapter
[i
]->phydev
->supported
);
1211 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
1212 adapter
[i
]->phydev
->supported
);
1215 adapter
[i
]->phydev
= NULL
;
1219 spin_lock_init(&edma_cinfo
->stats_lock
);
1221 timer_setup(&edma_cinfo
->edma_stats_timer
, edma_statistics_timer
, 0);
1222 mod_timer(&edma_cinfo
->edma_stats_timer
, jiffies
+ 1*HZ
);
1226 edma_phy_attach_fail
:
1229 #ifdef CONFIG_RFS_ACCEL
1230 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1231 free_irq_cpu_rmap(adapter
[i
]->netdev
->rx_cpu_rmap
);
1232 adapter
[i
]->netdev
->rx_cpu_rmap
= NULL
;
1236 edma_free_irqs(adapter
[0]);
1237 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++)
1238 napi_disable(&edma_cinfo
->edma_percpu_info
[i
].napi
);
1240 err_unregister_sysctl_tbl
:
1241 err_rmap_alloc_fail
:
1242 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++)
1243 unregister_netdev(edma_netdev
[i
]);
1245 err_single_phy_init
:
1246 iounmap(edma_cinfo
->ess_hw_addr
);
1247 clk_disable_unprepare(edma_cinfo
->ess_clk
);
1248 err_mdiobus_init_fail
:
1249 edma_free_rx_rings(edma_cinfo
);
1251 edma_free_tx_rings(edma_cinfo
);
1253 edma_free_queues(edma_cinfo
);
1256 iounmap(edma_cinfo
->hw
.hw_addr
);
1258 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1260 free_netdev(edma_netdev
[i
]);
1268 /* edma_axi_remove()
1269 * Device Removal Routine
1271 * edma_axi_remove is called by the platform subsystem to alert the driver
1272 * that it should release a platform device.
1274 static int edma_axi_remove(struct platform_device
*pdev
)
1276 struct edma_adapter
*adapter
= netdev_priv(edma_netdev
[0]);
1277 struct edma_common_info
*edma_cinfo
= adapter
->edma_cinfo
;
1278 struct edma_hw
*hw
= &edma_cinfo
->hw
;
1281 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++)
1282 unregister_netdev(edma_netdev
[i
]);
1284 edma_stop_rx_tx(hw
);
1285 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++)
1286 napi_disable(&edma_cinfo
->edma_percpu_info
[i
].napi
);
1288 edma_irq_disable(edma_cinfo
);
1289 edma_write_reg(EDMA_REG_RX_ISR
, 0xff);
1290 edma_write_reg(EDMA_REG_TX_ISR
, 0xffff);
1291 #ifdef CONFIG_RFS_ACCEL
1292 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1293 free_irq_cpu_rmap(edma_netdev
[i
]->rx_cpu_rmap
);
1294 edma_netdev
[i
]->rx_cpu_rmap
= NULL
;
1298 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1299 struct edma_adapter
*adapter
= netdev_priv(edma_netdev
[i
]);
1301 if (adapter
->phydev
)
1302 phy_disconnect(adapter
->phydev
);
1305 del_timer_sync(&edma_cinfo
->edma_stats_timer
);
1306 edma_free_irqs(adapter
);
1307 unregister_net_sysctl_table(edma_cinfo
->edma_ctl_table_hdr
);
1308 iounmap(edma_cinfo
->ess_hw_addr
);
1309 clk_disable_unprepare(edma_cinfo
->ess_clk
);
1310 edma_free_tx_resources(edma_cinfo
);
1311 edma_free_rx_resources(edma_cinfo
);
1312 edma_free_tx_rings(edma_cinfo
);
1313 edma_free_rx_rings(edma_cinfo
);
1314 edma_free_queues(edma_cinfo
);
1315 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++)
1316 free_netdev(edma_netdev
[i
]);
1323 static const struct of_device_id edma_of_mtable
[] = {
1324 {.compatible
= "qcom,ess-edma" },
1327 MODULE_DEVICE_TABLE(of
, edma_of_mtable
);
1329 static struct platform_driver edma_axi_driver
= {
1331 .name
= edma_axi_driver_name
,
1332 .of_match_table
= edma_of_mtable
,
1334 .probe
= edma_axi_probe
,
1335 .remove
= edma_axi_remove
,
1338 module_platform_driver(edma_axi_driver
);
1340 MODULE_AUTHOR("Qualcomm Atheros Inc");
1341 MODULE_DESCRIPTION("QCA ESS EDMA driver");
1342 MODULE_LICENSE("GPL");