2 * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 #include <linux/cpu_rmap.h>
18 #include <linux/of_net.h>
19 #include <linux/timer.h>
20 #include <linux/of_platform.h>
21 #include <linux/of_address.h>
22 #include <linux/clk.h>
23 #include <linux/string.h>
24 #include <linux/reset.h>
28 /* Weight round robin and virtual QID mask */
29 #define EDMA_WRR_VID_SCTL_MASK 0xffff
31 /* Weight round robin and virtual QID shift */
32 #define EDMA_WRR_VID_SCTL_SHIFT 16
34 char edma_axi_driver_name
[] = "ess_edma";
35 static const u32 default_msg
= NETIF_MSG_DRV
| NETIF_MSG_PROBE
|
36 NETIF_MSG_LINK
| NETIF_MSG_TIMER
| NETIF_MSG_IFDOWN
| NETIF_MSG_IFUP
;
38 static u32 edma_hw_addr
;
40 char edma_tx_irq
[16][64];
41 char edma_rx_irq
[8][64];
42 struct net_device
*edma_netdev
[EDMA_MAX_PORTID_SUPPORTED
];
43 static u16 tx_start
[4] = {EDMA_TXQ_START_CORE0
, EDMA_TXQ_START_CORE1
,
44 EDMA_TXQ_START_CORE2
, EDMA_TXQ_START_CORE3
};
45 static u32 tx_mask
[4] = {EDMA_TXQ_IRQ_MASK_CORE0
, EDMA_TXQ_IRQ_MASK_CORE1
,
46 EDMA_TXQ_IRQ_MASK_CORE2
, EDMA_TXQ_IRQ_MASK_CORE3
};
48 static u32 edma_default_ltag __read_mostly
= EDMA_LAN_DEFAULT_VLAN
;
49 static u32 edma_default_wtag __read_mostly
= EDMA_WAN_DEFAULT_VLAN
;
50 static u32 edma_default_group1_vtag __read_mostly
= EDMA_DEFAULT_GROUP1_VLAN
;
51 static u32 edma_default_group2_vtag __read_mostly
= EDMA_DEFAULT_GROUP2_VLAN
;
52 static u32 edma_default_group3_vtag __read_mostly
= EDMA_DEFAULT_GROUP3_VLAN
;
53 static u32 edma_default_group4_vtag __read_mostly
= EDMA_DEFAULT_GROUP4_VLAN
;
54 static u32 edma_default_group5_vtag __read_mostly
= EDMA_DEFAULT_GROUP5_VLAN
;
55 static u32 edma_rss_idt_val
= EDMA_RSS_IDT_VALUE
;
56 static u32 edma_rss_idt_idx
;
58 static int edma_weight_assigned_to_q __read_mostly
;
59 static int edma_queue_to_virtual_q __read_mostly
;
60 static bool edma_enable_rstp __read_mostly
;
61 static int edma_athr_hdr_eth_type __read_mostly
;
64 module_param(page_mode
, int, 0);
65 MODULE_PARM_DESC(page_mode
, "enable page mode");
67 static int overwrite_mode
;
68 module_param(overwrite_mode
, int, 0);
69 MODULE_PARM_DESC(overwrite_mode
, "overwrite default page_mode setting");
71 static int jumbo_mru
= EDMA_RX_HEAD_BUFF_SIZE
;
72 module_param(jumbo_mru
, int, 0);
73 MODULE_PARM_DESC(jumbo_mru
, "enable fraglist support");
75 static int num_rxq
= 4;
76 module_param(num_rxq
, int, 0);
77 MODULE_PARM_DESC(num_rxq
, "change the number of rx queues");
79 void edma_write_reg(u16 reg_addr
, u32 reg_value
)
81 writel(reg_value
, ((void __iomem
*)(edma_hw_addr
+ reg_addr
)));
84 void edma_read_reg(u16 reg_addr
, volatile u32
*reg_value
)
86 *reg_value
= readl((void __iomem
*)(edma_hw_addr
+ reg_addr
));
89 static void ess_write_reg(struct edma_common_info
*edma
, u16 reg_addr
, u32 reg_value
)
91 writel(reg_value
, ((void __iomem
*)
92 ((unsigned long)edma
->ess_hw_addr
+ reg_addr
)));
95 static void ess_read_reg(struct edma_common_info
*edma
, u16 reg_addr
,
96 volatile u32
*reg_value
)
98 *reg_value
= readl((void __iomem
*)
99 ((unsigned long)edma
->ess_hw_addr
+ reg_addr
));
102 static int ess_reset(struct edma_common_info
*edma
)
104 struct device_node
*switch_node
= NULL
;
105 struct reset_control
*ess_rst
;
108 switch_node
= of_find_node_by_name(NULL
, "ess-switch");
110 pr_err("switch-node not found\n");
114 ess_rst
= of_reset_control_get(switch_node
, "ess_rst");
115 of_node_put(switch_node
);
117 if (IS_ERR(ess_rst
)) {
118 pr_err("failed to find ess_rst!\n");
122 reset_control_assert(ess_rst
);
124 reset_control_deassert(ess_rst
);
126 reset_control_put(ess_rst
);
128 /* Enable only port 5 <--> port 0
129 * bits 0:6 bitmap of ports it can fwd to */
130 #define SET_PORT_BMP(r,v) \
131 ess_read_reg(edma, r, ®val); \
132 ess_write_reg(edma, r, ((regval & ~0x3F) | v));
134 SET_PORT_BMP(ESS_PORT0_LOOKUP_CTRL
,0x20);
135 SET_PORT_BMP(ESS_PORT1_LOOKUP_CTRL
,0x00);
136 SET_PORT_BMP(ESS_PORT2_LOOKUP_CTRL
,0x00);
137 SET_PORT_BMP(ESS_PORT3_LOOKUP_CTRL
,0x00);
138 SET_PORT_BMP(ESS_PORT4_LOOKUP_CTRL
,0x00);
139 SET_PORT_BMP(ESS_PORT5_LOOKUP_CTRL
,0x01);
140 ess_write_reg(edma
, ESS_RGMII_CTRL
, 0x400);
141 ess_write_reg(edma
, ESS_PORT0_STATUS
, ESS_PORT_1G_FDX
);
142 ess_write_reg(edma
, ESS_PORT5_STATUS
, ESS_PORT_1G_FDX
);
143 ess_write_reg(edma
, ESS_PORT0_HEADER_CTRL
, 0);
146 /* forward multicast and broadcast frames to CPU */
147 ess_write_reg(edma
, ESS_FWD_CTRL1
,
148 (ESS_PORTS_ALL
<< ESS_FWD_CTRL1_UC_FLOOD_S
) |
149 (ESS_PORTS_ALL
<< ESS_FWD_CTRL1_MC_FLOOD_S
) |
150 (ESS_PORTS_ALL
<< ESS_FWD_CTRL1_BC_FLOOD_S
));
155 void ess_set_port_status_speed(struct edma_common_info
*edma
,
156 struct phy_device
*phydev
, uint8_t port_id
)
158 uint16_t reg_off
= ESS_PORT0_STATUS
+ (4 * port_id
);
159 uint32_t reg_val
= 0;
161 ess_read_reg(edma
, reg_off
, ®_val
);
163 /* reset the speed bits [0:1] */
164 reg_val
&= ~ESS_PORT_STATUS_SPEED_INV
;
166 /* set the new speed */
167 switch(phydev
->speed
) {
168 case SPEED_1000
: reg_val
|= ESS_PORT_STATUS_SPEED_1000
; break;
169 case SPEED_100
: reg_val
|= ESS_PORT_STATUS_SPEED_100
; break;
170 case SPEED_10
: reg_val
|= ESS_PORT_STATUS_SPEED_10
; break;
171 default: reg_val
|= ESS_PORT_STATUS_SPEED_INV
; break;
174 /* check full/half duplex */
175 if (phydev
->duplex
) {
176 reg_val
|= ESS_PORT_STATUS_DUPLEX_MODE
;
178 reg_val
&= ~ESS_PORT_STATUS_DUPLEX_MODE
;
181 ess_write_reg(edma
, reg_off
, reg_val
);
184 /* edma_change_tx_coalesce()
185 * change tx interrupt moderation timer
187 void edma_change_tx_coalesce(int usecs
)
191 /* Here, we right shift the value from the user by 1, this is
192 * done because IMT resolution timer is 2usecs. 1 count
193 * of this register corresponds to 2 usecs.
195 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, ®_value
);
196 reg_value
= ((reg_value
& 0xffff) | ((usecs
>> 1) << 16));
197 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, reg_value
);
200 /* edma_change_rx_coalesce()
201 * change rx interrupt moderation timer
203 void edma_change_rx_coalesce(int usecs
)
207 /* Here, we right shift the value from the user by 1, this is
208 * done because IMT resolution timer is 2usecs. 1 count
209 * of this register corresponds to 2 usecs.
211 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, ®_value
);
212 reg_value
= ((reg_value
& 0xffff0000) | (usecs
>> 1));
213 edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, reg_value
);
216 /* edma_get_tx_rx_coalesce()
217 * Get tx/rx interrupt moderation value
219 void edma_get_tx_rx_coalesce(u32
*reg_val
)
221 edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT
, reg_val
);
224 void edma_read_append_stats(struct edma_common_info
*edma_cinfo
)
230 spin_lock_bh(&edma_cinfo
->stats_lock
);
231 p
= (uint32_t *)&(edma_cinfo
->edma_ethstats
);
233 for (i
= 0; i
< EDMA_MAX_TRANSMIT_QUEUE
; i
++) {
234 edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i
), &stat
);
239 for (i
= 0; i
< EDMA_MAX_TRANSMIT_QUEUE
; i
++) {
240 edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i
), &stat
);
245 for (i
= 0; i
< EDMA_MAX_RECEIVE_QUEUE
; i
++) {
246 edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i
), &stat
);
251 for (i
= 0; i
< EDMA_MAX_RECEIVE_QUEUE
; i
++) {
252 edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i
), &stat
);
257 spin_unlock_bh(&edma_cinfo
->stats_lock
);
260 static void edma_statistics_timer(struct timer_list
*t
)
262 struct edma_common_info
*edma_cinfo
=
263 from_timer(edma_cinfo
, t
, edma_stats_timer
);
265 edma_read_append_stats(edma_cinfo
);
267 mod_timer(&edma_cinfo
->edma_stats_timer
, jiffies
+ 1*HZ
);
270 static int edma_enable_stp_rstp(struct ctl_table
*table
, int write
,
271 void __user
*buffer
, size_t *lenp
,
276 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
278 edma_set_stp_rstp(edma_enable_rstp
);
283 static int edma_ath_hdr_eth_type(struct ctl_table
*table
, int write
,
284 void __user
*buffer
, size_t *lenp
,
289 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
291 edma_assign_ath_hdr_type(edma_athr_hdr_eth_type
);
296 static int edma_change_default_lan_vlan(struct ctl_table
*table
, int write
,
297 void __user
*buffer
, size_t *lenp
,
300 struct edma_adapter
*adapter
;
303 if (!edma_netdev
[1]) {
304 pr_err("Netdevice for default_lan does not exist\n");
308 adapter
= netdev_priv(edma_netdev
[1]);
310 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
313 adapter
->default_vlan_tag
= edma_default_ltag
;
318 static int edma_change_default_wan_vlan(struct ctl_table
*table
, int write
,
319 void __user
*buffer
, size_t *lenp
,
322 struct edma_adapter
*adapter
;
325 if (!edma_netdev
[0]) {
326 pr_err("Netdevice for default_wan does not exist\n");
330 adapter
= netdev_priv(edma_netdev
[0]);
332 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
335 adapter
->default_vlan_tag
= edma_default_wtag
;
340 static int edma_change_group1_vtag(struct ctl_table
*table
, int write
,
341 void __user
*buffer
, size_t *lenp
,
344 struct edma_adapter
*adapter
;
345 struct edma_common_info
*edma_cinfo
;
348 if (!edma_netdev
[0]) {
349 pr_err("Netdevice for Group 1 does not exist\n");
353 adapter
= netdev_priv(edma_netdev
[0]);
354 edma_cinfo
= adapter
->edma_cinfo
;
356 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
359 adapter
->default_vlan_tag
= edma_default_group1_vtag
;
364 static int edma_change_group2_vtag(struct ctl_table
*table
, int write
,
365 void __user
*buffer
, size_t *lenp
,
368 struct edma_adapter
*adapter
;
369 struct edma_common_info
*edma_cinfo
;
372 if (!edma_netdev
[1]) {
373 pr_err("Netdevice for Group 2 does not exist\n");
377 adapter
= netdev_priv(edma_netdev
[1]);
378 edma_cinfo
= adapter
->edma_cinfo
;
380 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
383 adapter
->default_vlan_tag
= edma_default_group2_vtag
;
388 static int edma_change_group3_vtag(struct ctl_table
*table
, int write
,
389 void __user
*buffer
, size_t *lenp
,
392 struct edma_adapter
*adapter
;
393 struct edma_common_info
*edma_cinfo
;
396 if (!edma_netdev
[2]) {
397 pr_err("Netdevice for Group 3 does not exist\n");
401 adapter
= netdev_priv(edma_netdev
[2]);
402 edma_cinfo
= adapter
->edma_cinfo
;
404 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
407 adapter
->default_vlan_tag
= edma_default_group3_vtag
;
412 static int edma_change_group4_vtag(struct ctl_table
*table
, int write
,
413 void __user
*buffer
, size_t *lenp
,
416 struct edma_adapter
*adapter
;
417 struct edma_common_info
*edma_cinfo
;
420 if (!edma_netdev
[3]) {
421 pr_err("Netdevice for Group 4 does not exist\n");
425 adapter
= netdev_priv(edma_netdev
[3]);
426 edma_cinfo
= adapter
->edma_cinfo
;
428 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
431 adapter
->default_vlan_tag
= edma_default_group4_vtag
;
436 static int edma_change_group5_vtag(struct ctl_table
*table
, int write
,
437 void __user
*buffer
, size_t *lenp
,
440 struct edma_adapter
*adapter
;
441 struct edma_common_info
*edma_cinfo
;
444 if (!edma_netdev
[4]) {
445 pr_err("Netdevice for Group 5 does not exist\n");
449 adapter
= netdev_priv(edma_netdev
[4]);
450 edma_cinfo
= adapter
->edma_cinfo
;
452 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
455 adapter
->default_vlan_tag
= edma_default_group5_vtag
;
460 static int edma_set_rss_idt_value(struct ctl_table
*table
, int write
,
461 void __user
*buffer
, size_t *lenp
,
466 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
468 edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx
),
473 static int edma_set_rss_idt_idx(struct ctl_table
*table
, int write
,
474 void __user
*buffer
, size_t *lenp
,
478 u32 old_value
= edma_rss_idt_idx
;
480 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
484 if (edma_rss_idt_idx
>= EDMA_NUM_IDT
) {
485 pr_err("Invalid RSS indirection table index %d\n",
487 edma_rss_idt_idx
= old_value
;
493 static int edma_weight_assigned_to_queues(struct ctl_table
*table
, int write
,
494 void __user
*buffer
, size_t *lenp
,
497 int ret
, queue_id
, weight
;
498 u32 reg_data
, data
, reg_addr
;
500 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
502 queue_id
= edma_weight_assigned_to_q
& EDMA_WRR_VID_SCTL_MASK
;
503 if (queue_id
< 0 || queue_id
> 15) {
504 pr_err("queue_id not within desired range\n");
508 weight
= edma_weight_assigned_to_q
>> EDMA_WRR_VID_SCTL_SHIFT
;
509 if (weight
< 0 || weight
> 0xF) {
510 pr_err("queue_id not within desired range\n");
514 data
= weight
<< EDMA_WRR_SHIFT(queue_id
);
516 reg_addr
= EDMA_REG_WRR_CTRL_Q0_Q3
+ (queue_id
& ~0x3);
517 edma_read_reg(reg_addr
, ®_data
);
518 reg_data
&= ~(1 << EDMA_WRR_SHIFT(queue_id
));
519 edma_write_reg(reg_addr
, data
| reg_data
);
525 static int edma_queue_to_virtual_queue_map(struct ctl_table
*table
, int write
,
526 void __user
*buffer
, size_t *lenp
,
529 int ret
, queue_id
, virtual_qid
;
530 u32 reg_data
, data
, reg_addr
;
532 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
534 queue_id
= edma_queue_to_virtual_q
& EDMA_WRR_VID_SCTL_MASK
;
535 if (queue_id
< 0 || queue_id
> 15) {
536 pr_err("queue_id not within desired range\n");
540 virtual_qid
= edma_queue_to_virtual_q
>>
541 EDMA_WRR_VID_SCTL_SHIFT
;
542 if (virtual_qid
< 0 || virtual_qid
> 8) {
543 pr_err("queue_id not within desired range\n");
547 data
= virtual_qid
<< EDMA_VQ_ID_SHIFT(queue_id
);
549 reg_addr
= EDMA_REG_VQ_CTRL0
+ (queue_id
& ~0x3);
550 edma_read_reg(reg_addr
, ®_data
);
551 reg_data
&= ~(1 << EDMA_VQ_ID_SHIFT(queue_id
));
552 edma_write_reg(reg_addr
, data
| reg_data
);
558 static struct ctl_table edma_table
[] = {
560 .procname
= "default_lan_tag",
561 .data
= &edma_default_ltag
,
562 .maxlen
= sizeof(int),
564 .proc_handler
= edma_change_default_lan_vlan
567 .procname
= "default_wan_tag",
568 .data
= &edma_default_wtag
,
569 .maxlen
= sizeof(int),
571 .proc_handler
= edma_change_default_wan_vlan
574 .procname
= "weight_assigned_to_queues",
575 .data
= &edma_weight_assigned_to_q
,
576 .maxlen
= sizeof(int),
578 .proc_handler
= edma_weight_assigned_to_queues
581 .procname
= "queue_to_virtual_queue_map",
582 .data
= &edma_queue_to_virtual_q
,
583 .maxlen
= sizeof(int),
585 .proc_handler
= edma_queue_to_virtual_queue_map
588 .procname
= "enable_stp_rstp",
589 .data
= &edma_enable_rstp
,
590 .maxlen
= sizeof(int),
592 .proc_handler
= edma_enable_stp_rstp
595 .procname
= "athr_hdr_eth_type",
596 .data
= &edma_athr_hdr_eth_type
,
597 .maxlen
= sizeof(int),
599 .proc_handler
= edma_ath_hdr_eth_type
602 .procname
= "default_group1_vlan_tag",
603 .data
= &edma_default_group1_vtag
,
604 .maxlen
= sizeof(int),
606 .proc_handler
= edma_change_group1_vtag
609 .procname
= "default_group2_vlan_tag",
610 .data
= &edma_default_group2_vtag
,
611 .maxlen
= sizeof(int),
613 .proc_handler
= edma_change_group2_vtag
616 .procname
= "default_group3_vlan_tag",
617 .data
= &edma_default_group3_vtag
,
618 .maxlen
= sizeof(int),
620 .proc_handler
= edma_change_group3_vtag
623 .procname
= "default_group4_vlan_tag",
624 .data
= &edma_default_group4_vtag
,
625 .maxlen
= sizeof(int),
627 .proc_handler
= edma_change_group4_vtag
630 .procname
= "default_group5_vlan_tag",
631 .data
= &edma_default_group5_vtag
,
632 .maxlen
= sizeof(int),
634 .proc_handler
= edma_change_group5_vtag
637 .procname
= "edma_rss_idt_value",
638 .data
= &edma_rss_idt_val
,
639 .maxlen
= sizeof(int),
641 .proc_handler
= edma_set_rss_idt_value
644 .procname
= "edma_rss_idt_idx",
645 .data
= &edma_rss_idt_idx
,
646 .maxlen
= sizeof(int),
648 .proc_handler
= edma_set_rss_idt_idx
653 static int ess_parse(struct edma_common_info
*edma
)
655 struct device_node
*switch_node
;
658 switch_node
= of_find_node_by_name(NULL
, "ess-switch");
660 pr_err("cannot find ess-switch node\n");
664 edma
->ess_hw_addr
= of_io_request_and_map(switch_node
,
666 if (!edma
->ess_hw_addr
) {
667 pr_err("%s ioremap fail.", __func__
);
671 edma
->ess_clk
= of_clk_get_by_name(switch_node
, "ess_clk");
672 ret
= clk_prepare_enable(edma
->ess_clk
);
674 of_node_put(switch_node
);
678 /* edma_axi_netdev_ops
679 * Describe the operations supported by registered netdevices
681 * static const struct net_device_ops edma_axi_netdev_ops = {
682 * .ndo_open = edma_open,
683 * .ndo_stop = edma_close,
684 * .ndo_start_xmit = edma_xmit_frame,
685 * .ndo_set_mac_address = edma_set_mac_addr,
688 static const struct net_device_ops edma_axi_netdev_ops
= {
689 .ndo_open
= edma_open
,
690 .ndo_stop
= edma_close
,
691 .ndo_start_xmit
= edma_xmit
,
692 .ndo_set_mac_address
= edma_set_mac_addr
,
693 #ifdef CONFIG_RFS_ACCEL
694 .ndo_rx_flow_steer
= edma_rx_flow_steer
,
695 .ndo_register_rfs_filter
= edma_register_rfs_filter
,
696 .ndo_get_default_vlan_tag
= edma_get_default_vlan_tag
,
698 .ndo_get_stats
= edma_get_stats
,
702 * Initialise an adapter identified by a platform_device structure.
704 * The OS initialization, configuring of the adapter private structure,
705 * and a hardware reset occur in the probe.
707 static int edma_axi_probe(struct platform_device
*pdev
)
709 struct edma_common_info
*edma_cinfo
;
711 struct edma_adapter
*adapter
[EDMA_MAX_PORTID_SUPPORTED
];
712 struct resource
*res
;
713 struct device_node
*np
= pdev
->dev
.of_node
;
714 struct device_node
*pnp
;
715 struct device_node
*mdio_node
= NULL
;
716 struct platform_device
*mdio_plat
= NULL
;
717 struct mii_bus
*miibus
= NULL
;
718 struct edma_mdio_data
*mdio_data
= NULL
;
719 int i
, j
, k
, err
= 0;
721 int idx
= 0, idx_mac
= 0;
723 if (CONFIG_NR_CPUS
!= EDMA_CPU_CORES_SUPPORTED
) {
724 dev_err(&pdev
->dev
, "Invalid CPU Cores\n");
728 if ((num_rxq
!= 4) && (num_rxq
!= 8)) {
729 dev_err(&pdev
->dev
, "Invalid RX queue, edma probe failed\n");
732 edma_cinfo
= kzalloc(sizeof(struct edma_common_info
), GFP_KERNEL
);
738 edma_cinfo
->pdev
= pdev
;
740 of_property_read_u32(np
, "qcom,num_gmac", &edma_cinfo
->num_gmac
);
741 if (edma_cinfo
->num_gmac
> EDMA_MAX_PORTID_SUPPORTED
) {
742 pr_err("Invalid DTSI Entry for qcom,num_gmac\n");
747 /* Initialize the netdev array before allocation
748 * to avoid double free
750 for (i
= 0 ; i
< edma_cinfo
->num_gmac
; i
++)
751 edma_netdev
[i
] = NULL
;
753 for (i
= 0 ; i
< edma_cinfo
->num_gmac
; i
++) {
754 edma_netdev
[i
] = alloc_etherdev_mqs(sizeof(struct edma_adapter
),
755 EDMA_NETDEV_TX_QUEUE
, EDMA_NETDEV_RX_QUEUE
);
757 if (!edma_netdev
[i
]) {
759 "net device alloc fails for index=%d\n", i
);
764 SET_NETDEV_DEV(edma_netdev
[i
], &pdev
->dev
);
765 platform_set_drvdata(pdev
, edma_netdev
[i
]);
766 edma_cinfo
->netdev
[i
] = edma_netdev
[i
];
769 /* Fill ring details */
770 edma_cinfo
->num_tx_queues
= EDMA_MAX_TRANSMIT_QUEUE
;
771 edma_cinfo
->num_txq_per_core
= (EDMA_MAX_TRANSMIT_QUEUE
/ 4);
772 edma_cinfo
->tx_ring_count
= EDMA_TX_RING_SIZE
;
774 /* Update num rx queues based on module parameter */
775 edma_cinfo
->num_rx_queues
= num_rxq
;
776 edma_cinfo
->num_rxq_per_core
= ((num_rxq
== 4) ? 1 : 2);
778 edma_cinfo
->rx_ring_count
= EDMA_RX_RING_SIZE
;
780 hw
= &edma_cinfo
->hw
;
782 /* Fill HW defaults */
783 hw
->tx_intr_mask
= EDMA_TX_IMR_NORMAL_MASK
;
784 hw
->rx_intr_mask
= EDMA_RX_IMR_NORMAL_MASK
;
786 of_property_read_u32(np
, "qcom,page-mode", &edma_cinfo
->page_mode
);
787 of_property_read_u32(np
, "qcom,rx_head_buf_size",
788 &hw
->rx_head_buff_size
);
790 if (overwrite_mode
) {
791 dev_info(&pdev
->dev
, "page mode overwritten");
792 edma_cinfo
->page_mode
= page_mode
;
796 edma_cinfo
->fraglist_mode
= 1;
798 if (edma_cinfo
->page_mode
)
799 hw
->rx_head_buff_size
= EDMA_RX_HEAD_BUFF_SIZE_JUMBO
;
800 else if (edma_cinfo
->fraglist_mode
)
801 hw
->rx_head_buff_size
= jumbo_mru
;
802 else if (!hw
->rx_head_buff_size
)
803 hw
->rx_head_buff_size
= EDMA_RX_HEAD_BUFF_SIZE
;
805 hw
->misc_intr_mask
= 0;
806 hw
->wol_intr_mask
= 0;
808 hw
->intr_clear_type
= EDMA_INTR_CLEAR_TYPE
;
809 hw
->intr_sw_idx_w
= EDMA_INTR_SW_IDX_W_TYPE
;
811 /* configure RSS type to the different protocol that can be
814 hw
->rss_type
= EDMA_RSS_TYPE_IPV4TCP
| EDMA_RSS_TYPE_IPV6_TCP
|
815 EDMA_RSS_TYPE_IPV4_UDP
| EDMA_RSS_TYPE_IPV6UDP
|
816 EDMA_RSS_TYPE_IPV4
| EDMA_RSS_TYPE_IPV6
;
818 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
820 edma_cinfo
->hw
.hw_addr
= devm_ioremap_resource(&pdev
->dev
, res
);
821 if (IS_ERR(edma_cinfo
->hw
.hw_addr
)) {
822 err
= PTR_ERR(edma_cinfo
->hw
.hw_addr
);
826 edma_hw_addr
= (u32
)edma_cinfo
->hw
.hw_addr
;
828 /* Parse tx queue interrupt number from device tree */
829 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++)
830 edma_cinfo
->tx_irq
[i
] = platform_get_irq(pdev
, i
);
832 /* Parse rx queue interrupt number from device tree
833 * Here we are setting j to point to the point where we
834 * left tx interrupt parsing(i.e 16) and run run the loop
835 * from 0 to 7 to parse rx interrupt number.
837 for (i
= 0, j
= edma_cinfo
->num_tx_queues
, k
= 0;
838 i
< edma_cinfo
->num_rx_queues
; i
++) {
839 edma_cinfo
->rx_irq
[k
] = platform_get_irq(pdev
, j
);
840 k
+= ((num_rxq
== 4) ? 2 : 1);
841 j
+= ((num_rxq
== 4) ? 2 : 1);
844 edma_cinfo
->rx_head_buffer_len
= edma_cinfo
->hw
.rx_head_buff_size
;
845 edma_cinfo
->rx_page_buffer_len
= PAGE_SIZE
;
847 err
= edma_alloc_queues_tx(edma_cinfo
);
849 dev_err(&pdev
->dev
, "Allocation of TX queue failed\n");
853 err
= edma_alloc_queues_rx(edma_cinfo
);
855 dev_err(&pdev
->dev
, "Allocation of RX queue failed\n");
859 err
= edma_alloc_tx_rings(edma_cinfo
);
861 dev_err(&pdev
->dev
, "Allocation of TX resources failed\n");
865 err
= edma_alloc_rx_rings(edma_cinfo
);
867 dev_err(&pdev
->dev
, "Allocation of RX resources failed\n");
871 /* Initialize netdev and netdev bitmap for transmit descriptor rings */
872 for (i
= 0; i
< edma_cinfo
->num_tx_queues
; i
++) {
873 struct edma_tx_desc_ring
*etdr
= edma_cinfo
->tpd_ring
[i
];
876 etdr
->netdev_bmp
= 0;
877 for (j
= 0; j
< EDMA_MAX_NETDEV_PER_QUEUE
; j
++) {
878 etdr
->netdev
[j
] = NULL
;
883 if (of_property_read_bool(np
, "qcom,mdio_supported")) {
884 mdio_node
= of_find_compatible_node(NULL
, NULL
,
885 "qcom,ipq4019-mdio");
887 dev_err(&pdev
->dev
, "cannot find mdio node by phandle");
889 goto err_mdiobus_init_fail
;
892 mdio_plat
= of_find_device_by_node(mdio_node
);
895 "cannot find platform device from mdio node");
896 of_node_put(mdio_node
);
898 goto err_mdiobus_init_fail
;
901 mdio_data
= dev_get_drvdata(&mdio_plat
->dev
);
904 "cannot get mii bus reference from device data");
905 of_node_put(mdio_node
);
907 goto err_mdiobus_init_fail
;
910 miibus
= mdio_data
->mii_bus
;
913 if (of_property_read_bool(np
, "qcom,single-phy") &&
914 edma_cinfo
->num_gmac
== 1) {
915 err
= ess_parse(edma_cinfo
);
917 err
= ess_reset(edma_cinfo
);
919 goto err_single_phy_init
;
921 edma_cinfo
->is_single_phy
= true;
924 for_each_available_child_of_node(np
, pnp
) {
925 const char *mac_addr
;
927 /* this check is needed if parent and daughter dts have
928 * different number of gmac nodes
930 if (idx_mac
== edma_cinfo
->num_gmac
) {
935 mac_addr
= of_get_mac_address(pnp
);
936 if (!IS_ERR(mac_addr
))
937 memcpy(edma_netdev
[idx_mac
]->dev_addr
, mac_addr
, ETH_ALEN
);
942 /* Populate the adapter structure register the netdevice */
943 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
946 adapter
[i
] = netdev_priv(edma_netdev
[i
]);
947 adapter
[i
]->netdev
= edma_netdev
[i
];
948 adapter
[i
]->pdev
= pdev
;
949 for (j
= 0; j
< CONFIG_NR_CPUS
; j
++) {
951 adapter
[i
]->tx_start_offset
[j
] =
952 ((j
<< EDMA_TX_CPU_START_SHIFT
) + (m
<< 1));
953 /* Share the queues with available net-devices.
954 * For instance , with 5 net-devices
955 * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13
956 * and eth1/eth3 will get the remaining.
958 for (k
= adapter
[i
]->tx_start_offset
[j
]; k
<
959 (adapter
[i
]->tx_start_offset
[j
] + 2); k
++) {
960 if (edma_fill_netdev(edma_cinfo
, k
, i
, j
)) {
961 pr_err("Netdev overflow Error\n");
967 adapter
[i
]->edma_cinfo
= edma_cinfo
;
968 edma_netdev
[i
]->netdev_ops
= &edma_axi_netdev_ops
;
969 edma_netdev
[i
]->max_mtu
= 9000;
970 edma_netdev
[i
]->features
= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
971 | NETIF_F_HW_VLAN_CTAG_TX
972 | NETIF_F_HW_VLAN_CTAG_RX
| NETIF_F_SG
|
973 NETIF_F_TSO
| NETIF_F_GRO
;
974 edma_netdev
[i
]->hw_features
= NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
|
975 NETIF_F_HW_VLAN_CTAG_RX
976 | NETIF_F_SG
| NETIF_F_TSO
| NETIF_F_GRO
;
977 edma_netdev
[i
]->vlan_features
= NETIF_F_HW_CSUM
| NETIF_F_SG
|
978 NETIF_F_TSO
| NETIF_F_GRO
;
979 edma_netdev
[i
]->wanted_features
= NETIF_F_HW_CSUM
| NETIF_F_SG
|
980 NETIF_F_TSO
| NETIF_F_GRO
;
982 #ifdef CONFIG_RFS_ACCEL
983 edma_netdev
[i
]->features
|= NETIF_F_RXHASH
| NETIF_F_NTUPLE
;
984 edma_netdev
[i
]->hw_features
|= NETIF_F_RXHASH
| NETIF_F_NTUPLE
;
985 edma_netdev
[i
]->vlan_features
|= NETIF_F_RXHASH
| NETIF_F_NTUPLE
;
986 edma_netdev
[i
]->wanted_features
|= NETIF_F_RXHASH
| NETIF_F_NTUPLE
;
988 edma_set_ethtool_ops(edma_netdev
[i
]);
990 /* This just fill in some default MAC address
992 if (!is_valid_ether_addr(edma_netdev
[i
]->dev_addr
)) {
993 random_ether_addr(edma_netdev
[i
]->dev_addr
);
994 pr_info("EDMA using MAC@ - using");
995 pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n",
996 *(edma_netdev
[i
]->dev_addr
),
997 *(edma_netdev
[i
]->dev_addr
+ 1),
998 *(edma_netdev
[i
]->dev_addr
+ 2),
999 *(edma_netdev
[i
]->dev_addr
+ 3),
1000 *(edma_netdev
[i
]->dev_addr
+ 4),
1001 *(edma_netdev
[i
]->dev_addr
+ 5));
1004 err
= register_netdev(edma_netdev
[i
]);
1008 /* carrier off reporting is important to
1009 * ethtool even BEFORE open
1011 netif_carrier_off(edma_netdev
[i
]);
1013 /* Allocate reverse irq cpu mapping structure for
1016 #ifdef CONFIG_RFS_ACCEL
1017 edma_netdev
[i
]->rx_cpu_rmap
=
1018 alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE
);
1019 if (!edma_netdev
[i
]->rx_cpu_rmap
) {
1021 goto err_rmap_alloc_fail
;
1026 for (i
= 0; i
< EDMA_MAX_PORTID_BITMAP_INDEX
; i
++)
1027 edma_cinfo
->portid_netdev_lookup_tbl
[i
] = NULL
;
1029 for_each_available_child_of_node(np
, pnp
) {
1030 const uint32_t *vlan_tag
= NULL
;
1033 /* this check is needed if parent and daughter dts have
1034 * different number of gmac nodes
1036 if (idx
== edma_cinfo
->num_gmac
)
1039 /* Populate port-id to netdev lookup table */
1040 vlan_tag
= of_get_property(pnp
, "vlan_tag", &len
);
1042 pr_err("Vlan tag parsing Failed.\n");
1043 goto err_rmap_alloc_fail
;
1046 adapter
[idx
]->default_vlan_tag
= of_read_number(vlan_tag
, 1);
1048 portid_bmp
= of_read_number(vlan_tag
, 1);
1049 adapter
[idx
]->dp_bitmap
= portid_bmp
;
1051 portid_bmp
= portid_bmp
>> 1; /* We ignore CPU Port bit 0 */
1052 while (portid_bmp
) {
1053 int port_bit
= ffs(portid_bmp
);
1055 if (port_bit
> EDMA_MAX_PORTID_SUPPORTED
)
1056 goto err_rmap_alloc_fail
;
1057 edma_cinfo
->portid_netdev_lookup_tbl
[port_bit
] =
1059 portid_bmp
&= ~(1 << (port_bit
- 1));
1062 if (!of_property_read_u32(pnp
, "qcom,poll_required",
1063 &adapter
[idx
]->poll_required
)) {
1064 if (adapter
[idx
]->poll_required
) {
1065 of_property_read_u32(pnp
, "qcom,phy_mdio_addr",
1066 &adapter
[idx
]->phy_mdio_addr
);
1067 of_property_read_u32(pnp
, "qcom,forced_speed",
1068 &adapter
[idx
]->forced_speed
);
1069 of_property_read_u32(pnp
, "qcom,forced_duplex",
1070 &adapter
[idx
]->forced_duplex
);
1072 /* create a phyid using MDIO bus id
1073 * and MDIO bus address
1075 snprintf(adapter
[idx
]->phy_id
,
1076 MII_BUS_ID_SIZE
+ 3, PHY_ID_FMT
,
1078 adapter
[idx
]->phy_mdio_addr
);
1081 adapter
[idx
]->poll_required
= 0;
1082 adapter
[idx
]->forced_speed
= SPEED_1000
;
1083 adapter
[idx
]->forced_duplex
= DUPLEX_FULL
;
1089 edma_cinfo
->edma_ctl_table_hdr
= register_net_sysctl(&init_net
,
1092 if (!edma_cinfo
->edma_ctl_table_hdr
) {
1093 dev_err(&pdev
->dev
, "edma sysctl table hdr not registered\n");
1094 goto err_unregister_sysctl_tbl
;
1097 /* Disable all 16 Tx and 8 rx irqs */
1098 edma_irq_disable(edma_cinfo
);
1100 err
= edma_reset(edma_cinfo
);
1106 /* populate per_core_info, do a napi_Add, request 16 TX irqs,
1107 * 8 RX irqs, do a napi enable
1109 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++) {
1112 edma_cinfo
->edma_percpu_info
[i
].napi
.state
= 0;
1114 netif_napi_add(edma_netdev
[0],
1115 &edma_cinfo
->edma_percpu_info
[i
].napi
,
1117 napi_enable(&edma_cinfo
->edma_percpu_info
[i
].napi
);
1118 edma_cinfo
->edma_percpu_info
[i
].tx_mask
= tx_mask
[i
];
1119 edma_cinfo
->edma_percpu_info
[i
].rx_mask
= EDMA_RX_PER_CPU_MASK
1120 << (i
<< EDMA_RX_PER_CPU_MASK_SHIFT
);
1121 edma_cinfo
->edma_percpu_info
[i
].tx_start
= tx_start
[i
];
1122 edma_cinfo
->edma_percpu_info
[i
].rx_start
=
1123 i
<< EDMA_RX_CPU_START_SHIFT
;
1124 rx_start
= i
<< EDMA_RX_CPU_START_SHIFT
;
1125 edma_cinfo
->edma_percpu_info
[i
].tx_status
= 0;
1126 edma_cinfo
->edma_percpu_info
[i
].rx_status
= 0;
1127 edma_cinfo
->edma_percpu_info
[i
].edma_cinfo
= edma_cinfo
;
1129 /* Request irq per core */
1130 for (j
= edma_cinfo
->edma_percpu_info
[i
].tx_start
;
1131 j
< tx_start
[i
] + 4; j
++) {
1132 sprintf(&edma_tx_irq
[j
][0], "edma_eth_tx%d", j
);
1133 err
= request_irq(edma_cinfo
->tx_irq
[j
],
1137 &edma_cinfo
->edma_percpu_info
[i
]);
1142 for (j
= edma_cinfo
->edma_percpu_info
[i
].rx_start
;
1144 ((edma_cinfo
->num_rx_queues
== 4) ? 1 : 2));
1146 sprintf(&edma_rx_irq
[j
][0], "edma_eth_rx%d", j
);
1147 err
= request_irq(edma_cinfo
->rx_irq
[j
],
1151 &edma_cinfo
->edma_percpu_info
[i
]);
1156 #ifdef CONFIG_RFS_ACCEL
1157 for (j
= edma_cinfo
->edma_percpu_info
[i
].rx_start
;
1158 j
< rx_start
+ 2; j
+= 2) {
1159 err
= irq_cpu_rmap_add(edma_netdev
[0]->rx_cpu_rmap
,
1160 edma_cinfo
->rx_irq
[j
]);
1162 goto err_rmap_add_fail
;
1167 /* Used to clear interrupt status, allocate rx buffer,
1168 * configure edma descriptors registers
1170 err
= edma_configure(edma_cinfo
);
1176 /* Configure RSS indirection table.
1177 * 128 hash will be configured in the following
1178 * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively
1181 for (i
= 0; i
< EDMA_NUM_IDT
; i
++)
1182 edma_write_reg(EDMA_REG_RSS_IDT(i
), EDMA_RSS_IDT_VALUE
);
1184 /* Configure load balance mapping table.
1185 * 4 table entry will be configured according to the
1186 * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4}
1189 edma_write_reg(EDMA_REG_LB_RING
, EDMA_LB_REG_VALUE
);
1191 /* Configure Virtual queue for Tx rings
1192 * User can also change this value runtime through
1195 edma_write_reg(EDMA_REG_VQ_CTRL0
, EDMA_VQ_REG_VALUE
);
1196 edma_write_reg(EDMA_REG_VQ_CTRL1
, EDMA_VQ_REG_VALUE
);
1198 /* Configure Max AXI Burst write size to 128 bytes*/
1199 edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE
,
1200 EDMA_AXIW_MAXWRSIZE_VALUE
);
1202 /* Enable All 16 tx and 8 rx irq mask */
1203 edma_irq_enable(edma_cinfo
);
1204 edma_enable_tx_ctrl(&edma_cinfo
->hw
);
1205 edma_enable_rx_ctrl(&edma_cinfo
->hw
);
1207 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1208 if (adapter
[i
]->poll_required
) {
1209 int phy_mode
= of_get_phy_mode(np
);
1212 phy_mode
= PHY_INTERFACE_MODE_SGMII
;
1213 adapter
[i
]->phydev
=
1214 phy_connect(edma_netdev
[i
],
1215 (const char *)adapter
[i
]->phy_id
,
1218 if (IS_ERR(adapter
[i
]->phydev
)) {
1219 dev_dbg(&pdev
->dev
, "PHY attach FAIL");
1221 goto edma_phy_attach_fail
;
1223 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
1224 adapter
[i
]->phydev
->advertising
);
1225 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
1226 adapter
[i
]->phydev
->advertising
);
1227 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
1228 adapter
[i
]->phydev
->supported
);
1229 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
1230 adapter
[i
]->phydev
->supported
);
1233 adapter
[i
]->phydev
= NULL
;
1237 spin_lock_init(&edma_cinfo
->stats_lock
);
1239 timer_setup(&edma_cinfo
->edma_stats_timer
, edma_statistics_timer
, 0);
1240 mod_timer(&edma_cinfo
->edma_stats_timer
, jiffies
+ 1*HZ
);
1244 edma_phy_attach_fail
:
1247 #ifdef CONFIG_RFS_ACCEL
1248 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1249 free_irq_cpu_rmap(adapter
[i
]->netdev
->rx_cpu_rmap
);
1250 adapter
[i
]->netdev
->rx_cpu_rmap
= NULL
;
1254 edma_free_irqs(adapter
[0]);
1255 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++)
1256 napi_disable(&edma_cinfo
->edma_percpu_info
[i
].napi
);
1258 err_unregister_sysctl_tbl
:
1259 err_rmap_alloc_fail
:
1260 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++)
1261 unregister_netdev(edma_netdev
[i
]);
1263 err_single_phy_init
:
1264 iounmap(edma_cinfo
->ess_hw_addr
);
1265 clk_disable_unprepare(edma_cinfo
->ess_clk
);
1266 err_mdiobus_init_fail
:
1267 edma_free_rx_rings(edma_cinfo
);
1269 edma_free_tx_rings(edma_cinfo
);
1271 edma_free_queues(edma_cinfo
);
1274 iounmap(edma_cinfo
->hw
.hw_addr
);
1276 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1278 free_netdev(edma_netdev
[i
]);
1286 /* edma_axi_remove()
1287 * Device Removal Routine
1289 * edma_axi_remove is called by the platform subsystem to alert the driver
1290 * that it should release a platform device.
1292 static int edma_axi_remove(struct platform_device
*pdev
)
1294 struct edma_adapter
*adapter
= netdev_priv(edma_netdev
[0]);
1295 struct edma_common_info
*edma_cinfo
= adapter
->edma_cinfo
;
1296 struct edma_hw
*hw
= &edma_cinfo
->hw
;
1299 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++)
1300 unregister_netdev(edma_netdev
[i
]);
1302 edma_stop_rx_tx(hw
);
1303 for (i
= 0; i
< CONFIG_NR_CPUS
; i
++)
1304 napi_disable(&edma_cinfo
->edma_percpu_info
[i
].napi
);
1306 edma_irq_disable(edma_cinfo
);
1307 edma_write_reg(EDMA_REG_RX_ISR
, 0xff);
1308 edma_write_reg(EDMA_REG_TX_ISR
, 0xffff);
1309 #ifdef CONFIG_RFS_ACCEL
1310 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1311 free_irq_cpu_rmap(edma_netdev
[i
]->rx_cpu_rmap
);
1312 edma_netdev
[i
]->rx_cpu_rmap
= NULL
;
1316 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++) {
1317 struct edma_adapter
*adapter
= netdev_priv(edma_netdev
[i
]);
1319 if (adapter
->phydev
)
1320 phy_disconnect(adapter
->phydev
);
1323 del_timer_sync(&edma_cinfo
->edma_stats_timer
);
1324 edma_free_irqs(adapter
);
1325 unregister_net_sysctl_table(edma_cinfo
->edma_ctl_table_hdr
);
1326 iounmap(edma_cinfo
->ess_hw_addr
);
1327 clk_disable_unprepare(edma_cinfo
->ess_clk
);
1328 edma_free_tx_resources(edma_cinfo
);
1329 edma_free_rx_resources(edma_cinfo
);
1330 edma_free_tx_rings(edma_cinfo
);
1331 edma_free_rx_rings(edma_cinfo
);
1332 edma_free_queues(edma_cinfo
);
1333 for (i
= 0; i
< edma_cinfo
->num_gmac
; i
++)
1334 free_netdev(edma_netdev
[i
]);
1341 static const struct of_device_id edma_of_mtable
[] = {
1342 {.compatible
= "qcom,ess-edma" },
1345 MODULE_DEVICE_TABLE(of
, edma_of_mtable
);
1347 static struct platform_driver edma_axi_driver
= {
1349 .name
= edma_axi_driver_name
,
1350 .of_match_table
= edma_of_mtable
,
1352 .probe
= edma_axi_probe
,
1353 .remove
= edma_axi_remove
,
1356 module_platform_driver(edma_axi_driver
);
1358 MODULE_AUTHOR("Qualcomm Atheros Inc");
1359 MODULE_DESCRIPTION("QCA ESS EDMA driver");
1360 MODULE_LICENSE("GPL");