1 /******************************************************************************
3 ** FILE NAME : ifxmips_ptm_vdsl.c
9 ** DESCRIPTION : PTM driver common source file (core functions for VR9)
10 ** COPYRIGHT : Copyright (c) 2006
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
20 ** $Date $Author $Comment
21 ** 07 JUL 2009 Xu Liang Init Version
22 *******************************************************************************/
24 #include <linux/version.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/ctype.h>
29 #include <linux/errno.h>
30 #include <linux/proc_fs.h>
31 #include <linux/init.h>
32 #include <linux/ioctl.h>
33 #include <linux/etherdevice.h>
34 #include <linux/interrupt.h>
35 #include <linux/netdevice.h>
36 #include <linux/platform_device.h>
37 #include <linux/of_device.h>
39 #include "ifxmips_ptm_vdsl.h"
40 #include <lantiq_soc.h>
42 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
43 #define MODULE_PARM(a, b) module_param(a, int, 0)
45 static int wanqos_en
= 0;
46 static int queue_gamma_map
[4] = {0xFE, 0x01, 0x00, 0x00};
48 MODULE_PARM(wanqos_en
, "i");
49 MODULE_PARM_DESC(wanqos_en
, "WAN QoS support, 1 - enabled, 0 - disabled.");
51 MODULE_PARM_ARRAY(queue_gamma_map
, "4-4i");
52 MODULE_PARM_DESC(queue_gamma_map
, "TX QoS queues mapping to 4 TX Gamma interfaces.");
54 extern int (*ifx_mei_atm_showtime_enter
)(struct port_cell_info
*, void *);
55 extern int (*ifx_mei_atm_showtime_exit
)(void);
56 extern int ifx_mei_atm_showtime_check(int *is_showtime
, struct port_cell_info
*port_cell
, void **xdata_addr
);
58 static int g_showtime
= 0;
59 static void *g_xdata_addr
= NULL
;
62 #define ENABLE_TMP_DBG 0
64 unsigned long cgu_get_pp32_clock(void)
66 struct clk
*c
= clk_get_ppe();
67 unsigned long rate
= clk_get_rate(c
);
72 static void ptm_setup(struct net_device
*, int);
73 static struct net_device_stats
*ptm_get_stats(struct net_device
*);
74 static int ptm_open(struct net_device
*);
75 static int ptm_stop(struct net_device
*);
76 static unsigned int ptm_poll(int, unsigned int);
77 static int ptm_napi_poll(struct napi_struct
*, int);
78 static int ptm_hard_start_xmit(struct sk_buff
*, struct net_device
*);
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
80 static int ptm_change_mtu(struct net_device
*, int);
82 static int ptm_ioctl(struct net_device
*, struct ifreq
*, int);
83 static void ptm_tx_timeout(struct net_device
*);
85 static inline struct sk_buff
* alloc_skb_rx(void);
86 static inline struct sk_buff
* alloc_skb_tx(unsigned int);
87 static inline struct sk_buff
*get_skb_pointer(unsigned int);
88 static inline int get_tx_desc(unsigned int, unsigned int *);
91 * Mailbox handler and signal function
93 static irqreturn_t
mailbox_irq_handler(int, void *);
96 * Tasklet to Handle Swap Descriptors
98 static void do_swap_desc_tasklet(unsigned long);
102 * Init & clean-up functions
104 static inline int init_priv_data(void);
105 static inline void clear_priv_data(void);
106 static inline int init_tables(void);
107 static inline void clear_tables(void);
109 static int g_wanqos_en
= 0;
111 static int g_queue_gamma_map
[4];
113 static struct ptm_priv_data g_ptm_priv_data
;
115 static struct net_device_ops g_ptm_netdev_ops
= {
116 .ndo_get_stats
= ptm_get_stats
,
117 .ndo_open
= ptm_open
,
118 .ndo_stop
= ptm_stop
,
119 .ndo_start_xmit
= ptm_hard_start_xmit
,
120 .ndo_validate_addr
= eth_validate_addr
,
121 .ndo_set_mac_address
= eth_mac_addr
,
122 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
123 .ndo_change_mtu
= ptm_change_mtu
,
125 .ndo_do_ioctl
= ptm_ioctl
,
126 .ndo_tx_timeout
= ptm_tx_timeout
,
129 static struct net_device
*g_net_dev
[1] = {0};
130 static char *g_net_dev_name
[1] = {"dsl0"};
132 static int g_ptm_prio_queue_map
[8];
134 static DECLARE_TASKLET(g_swap_desc_tasklet
, do_swap_desc_tasklet
, 0);
137 unsigned int ifx_ptm_dbg_enable
= DBG_ENABLE_MASK_ERR
;
140 * ####################################
142 * ####################################
145 static void ptm_setup(struct net_device
*dev
, int ndev
)
147 netif_carrier_off(dev
);
149 dev
->netdev_ops
= &g_ptm_netdev_ops
;
150 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
151 /* Allow up to 1508 bytes, for RFC4638 */
152 dev
->max_mtu
= ETH_DATA_LEN
+ 8;
154 netif_napi_add(dev
, &g_ptm_priv_data
.itf
[ndev
].napi
, ptm_napi_poll
, 16);
155 dev
->watchdog_timeo
= ETH_WATCHDOG_TIMEOUT
;
157 dev
->dev_addr
[0] = 0x00;
158 dev
->dev_addr
[1] = 0x20;
159 dev
->dev_addr
[2] = 0xda;
160 dev
->dev_addr
[3] = 0x86;
161 dev
->dev_addr
[4] = 0x23;
162 dev
->dev_addr
[5] = 0x75 + ndev
;
165 static struct net_device_stats
*ptm_get_stats(struct net_device
*dev
)
167 struct net_device_stats
*s
;
169 if ( dev
!= g_net_dev
[0] )
171 s
= &g_ptm_priv_data
.itf
[0].stats
;
176 static int ptm_open(struct net_device
*dev
)
178 ASSERT(dev
== g_net_dev
[0], "incorrect device");
180 napi_enable(&g_ptm_priv_data
.itf
[0].napi
);
182 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER
);
184 netif_start_queue(dev
);
189 static int ptm_stop(struct net_device
*dev
)
191 ASSERT(dev
== g_net_dev
[0], "incorrect device");
193 IFX_REG_W32_MASK(1 | (1 << 17), 0, MBOX_IGU1_IER
);
195 napi_disable(&g_ptm_priv_data
.itf
[0].napi
);
197 netif_stop_queue(dev
);
202 static unsigned int ptm_poll(int ndev
, unsigned int work_to_do
)
204 unsigned int work_done
= 0;
205 volatile struct rx_descriptor
*desc
;
206 struct rx_descriptor reg_desc
;
207 struct sk_buff
*skb
, *new_skb
;
209 ASSERT(ndev
>= 0 && ndev
< ARRAY_SIZE(g_net_dev
), "ndev = %d (wrong value)", ndev
);
211 while ( work_done
< work_to_do
) {
212 desc
= &WAN_RX_DESC_BASE
[g_ptm_priv_data
.itf
[0].rx_desc_pos
];
213 if ( desc
->own
/* || !desc->c */ ) // if PP32 hold descriptor or descriptor not completed
215 if ( ++g_ptm_priv_data
.itf
[0].rx_desc_pos
== WAN_RX_DESC_NUM
)
216 g_ptm_priv_data
.itf
[0].rx_desc_pos
= 0;
219 skb
= get_skb_pointer(reg_desc
.dataptr
);
220 ASSERT(skb
!= NULL
, "invalid pointer skb == NULL");
222 new_skb
= alloc_skb_rx();
223 if ( new_skb
!= NULL
) {
224 skb_reserve(skb
, reg_desc
.byteoff
);
225 skb_put(skb
, reg_desc
.datalen
);
227 // parse protocol header
228 skb
->dev
= g_net_dev
[0];
229 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
231 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
232 g_net_dev
[0]->last_rx
= jiffies
;
235 netif_receive_skb(skb
);
237 g_ptm_priv_data
.itf
[0].stats
.rx_packets
++;
238 g_ptm_priv_data
.itf
[0].stats
.rx_bytes
+= reg_desc
.datalen
;
240 reg_desc
.dataptr
= (unsigned int)new_skb
->data
& 0x0FFFFFFF;
241 reg_desc
.byteoff
= RX_HEAD_MAC_ADDR_ALIGNMENT
;
244 reg_desc
.datalen
= RX_MAX_BUFFER_SIZE
- RX_HEAD_MAC_ADDR_ALIGNMENT
;
248 /* write discriptor to memory */
249 *((volatile unsigned int *)desc
+ 1) = *((unsigned int *)®_desc
+ 1);
251 *(volatile unsigned int *)desc
= *(unsigned int *)®_desc
;
259 static int ptm_napi_poll(struct napi_struct
*napi
, int budget
)
262 unsigned int work_done
;
264 work_done
= ptm_poll(ndev
, budget
);
267 if ( !netif_running(napi
->dev
) ) {
273 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_ISRC
);
275 if (work_done
< budget
) {
277 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER
);
285 static int ptm_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
289 volatile struct tx_descriptor
*desc
;
290 struct tx_descriptor reg_desc
= {0};
291 struct sk_buff
*skb_to_free
;
292 unsigned int byteoff
;
294 ASSERT(dev
== g_net_dev
[0], "incorrect device");
297 err("not in showtime");
298 goto PTM_HARD_START_XMIT_FAIL
;
301 /* allocate descriptor */
302 desc_base
= get_tx_desc(0, &f_full
);
304 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
305 netif_trans_update(dev
);
307 dev
->trans_start
= jiffies
;
309 netif_stop_queue(dev
);
311 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_ISRC
);
312 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_IER
);
315 goto PTM_HARD_START_XMIT_FAIL
;
316 desc
= &CPU_TO_WAN_TX_DESC_BASE
[desc_base
];
318 byteoff
= (unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1);
319 if ( skb_headroom(skb
) < sizeof(struct sk_buff
*) + byteoff
|| skb_cloned(skb
) ) {
320 struct sk_buff
*new_skb
;
322 ASSERT(skb_headroom(skb
) >= sizeof(struct sk_buff
*) + byteoff
, "skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff");
323 ASSERT(!skb_cloned(skb
), "skb is cloned");
325 new_skb
= alloc_skb_tx(skb
->len
);
326 if ( new_skb
== NULL
) {
328 goto ALLOC_SKB_TX_FAIL
;
330 skb_put(new_skb
, skb
->len
);
331 memcpy(new_skb
->data
, skb
->data
, skb
->len
);
332 dev_kfree_skb_any(skb
);
334 byteoff
= (unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1);
335 /* write back to physical memory */
336 dma_cache_wback((unsigned long)skb
->data
, skb
->len
);
339 *(struct sk_buff
**)((unsigned int)skb
->data
- byteoff
- sizeof(struct sk_buff
*)) = skb
;
340 /* write back to physical memory */
341 dma_cache_wback((unsigned long)skb
->data
- byteoff
- sizeof(struct sk_buff
*), skb
->len
+ byteoff
+ sizeof(struct sk_buff
*));
343 /* free previous skb */
344 skb_to_free
= get_skb_pointer(desc
->dataptr
);
345 if ( skb_to_free
!= NULL
)
346 dev_kfree_skb_any(skb_to_free
);
348 /* update descriptor */
350 reg_desc
.dataptr
= (unsigned int)skb
->data
& (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT
- 1));
351 reg_desc
.datalen
= skb
->len
< ETH_ZLEN
? ETH_ZLEN
: skb
->len
;
352 reg_desc
.qid
= g_ptm_prio_queue_map
[skb
->priority
> 7 ? 7 : skb
->priority
];
353 reg_desc
.byteoff
= byteoff
;
356 reg_desc
.sop
= reg_desc
.eop
= 1;
359 g_ptm_priv_data
.itf
[0].stats
.tx_packets
++;
360 g_ptm_priv_data
.itf
[0].stats
.tx_bytes
+= reg_desc
.datalen
;
362 /* write discriptor to memory */
363 *((volatile unsigned int *)desc
+ 1) = *((unsigned int *)®_desc
+ 1);
365 *(volatile unsigned int *)desc
= *(unsigned int *)®_desc
;
367 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
368 netif_trans_update(dev
);
370 dev
->trans_start
= jiffies
;
376 PTM_HARD_START_XMIT_FAIL
:
377 dev_kfree_skb_any(skb
);
378 g_ptm_priv_data
.itf
[0].stats
.tx_dropped
++;
382 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
383 static int ptm_change_mtu(struct net_device
*dev
, int mtu
)
385 /* Allow up to 1508 bytes, for RFC4638 */
386 if (mtu
< 68 || mtu
> ETH_DATA_LEN
+ 8)
393 static int ptm_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
395 ASSERT(dev
== g_net_dev
[0], "incorrect device");
399 case IFX_PTM_MIB_CW_GET
:
400 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifRxNoIdleCodewords
= IFX_REG_R32(DREG_AR_CELL0
) + IFX_REG_R32(DREG_AR_CELL1
);
401 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifRxIdleCodewords
= IFX_REG_R32(DREG_AR_IDLE_CNT0
) + IFX_REG_R32(DREG_AR_IDLE_CNT1
);
402 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifRxCodingViolation
= IFX_REG_R32(DREG_AR_CVN_CNT0
) + IFX_REG_R32(DREG_AR_CVN_CNT1
) + IFX_REG_R32(DREG_AR_CVNP_CNT0
) + IFX_REG_R32(DREG_AR_CVNP_CNT1
);
403 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifTxNoIdleCodewords
= IFX_REG_R32(DREG_AT_CELL0
) + IFX_REG_R32(DREG_AT_CELL1
);
404 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifTxIdleCodewords
= IFX_REG_R32(DREG_AT_IDLE_CNT0
) + IFX_REG_R32(DREG_AT_IDLE_CNT1
);
406 case IFX_PTM_MIB_FRAME_GET
:
408 PTM_FRAME_MIB_T data
= {0};
411 data
.RxCorrect
= IFX_REG_R32(DREG_AR_HEC_CNT0
) + IFX_REG_R32(DREG_AR_HEC_CNT1
) + IFX_REG_R32(DREG_AR_AIIDLE_CNT0
) + IFX_REG_R32(DREG_AR_AIIDLE_CNT1
);
412 for ( i
= 0; i
< 4; i
++ )
413 data
.RxDropped
+= WAN_RX_MIB_TABLE(i
)->wrx_dropdes_pdu
;
414 for ( i
= 0; i
< 8; i
++ )
415 data
.TxSend
+= WAN_TX_MIB_TABLE(i
)->wtx_total_pdu
;
417 *((PTM_FRAME_MIB_T
*)ifr
->ifr_data
) = data
;
420 case IFX_PTM_CFG_GET
:
421 // use bear channel 0 preemption gamma interface settings
422 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxEthCrcPresent
= 1;
423 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxEthCrcCheck
= RX_GAMMA_ITF_CFG(0)->rx_eth_fcs_ver_dis
== 0 ? 1 : 0;
424 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcCheck
= RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis
== 0 ? 1 : 0;;
425 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcLen
= RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size
== 0 ? 0 : (RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size
* 16);
426 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxEthCrcGen
= TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis
== 0 ? 1 : 0;
427 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcGen
= TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size
== 0 ? 0 : 1;
428 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcLen
= TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size
== 0 ? 0 : (TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size
* 16);
430 case IFX_PTM_CFG_SET
:
434 for ( i
= 0; i
< 4; i
++ ) {
435 RX_GAMMA_ITF_CFG(i
)->rx_eth_fcs_ver_dis
= ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxEthCrcCheck
? 0 : 1;
437 RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis
= ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcCheck
? 0 : 1;
439 switch ( ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcLen
) {
440 case 16: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size
= 1; break;
441 case 32: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size
= 2; break;
442 default: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size
= 0;
445 TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis
= ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxEthCrcGen
? 0 : 1;
447 if ( ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcGen
) {
448 switch ( ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcLen
) {
449 case 16: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size
= 1; break;
450 case 32: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size
= 2; break;
451 default: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size
= 0;
455 TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size
= 0;
459 case IFX_PTM_MAP_PKT_PRIO_TO_Q
:
461 struct ppe_prio_q_map cmd
;
463 if ( copy_from_user(&cmd
, ifr
->ifr_data
, sizeof(cmd
)) )
466 if ( cmd
.pkt_prio
< 0 || cmd
.pkt_prio
>= ARRAY_SIZE(g_ptm_prio_queue_map
) )
469 if ( cmd
.qid
< 0 || cmd
.qid
>= g_wanqos_en
)
472 g_ptm_prio_queue_map
[cmd
.pkt_prio
] = cmd
.qid
;
482 static void ptm_tx_timeout(struct net_device
*dev
)
484 ASSERT(dev
== g_net_dev
[0], "incorrect device");
486 /* disable TX irq, release skb when sending new packet */
487 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER
);
489 /* wake up TX queue */
490 netif_wake_queue(dev
);
495 static inline struct sk_buff
* alloc_skb_rx(void)
499 /* allocate memroy including trailer and padding */
500 skb
= dev_alloc_skb(RX_MAX_BUFFER_SIZE
+ DATA_BUFFER_ALIGNMENT
);
502 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
503 if ( ((unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1)) != 0 )
504 skb_reserve(skb
, ~((unsigned int)skb
->data
+ (DATA_BUFFER_ALIGNMENT
- 1)) & (DATA_BUFFER_ALIGNMENT
- 1));
505 /* pub skb in reserved area "skb->data - 4" */
506 *((struct sk_buff
**)skb
->data
- 1) = skb
;
508 /* write back and invalidate cache */
509 dma_cache_wback_inv((unsigned long)skb
->data
- sizeof(skb
), sizeof(skb
));
510 /* invalidate cache */
511 dma_cache_inv((unsigned long)skb
->data
, (unsigned int)skb
->end
- (unsigned int)skb
->data
);
517 static inline struct sk_buff
* alloc_skb_tx(unsigned int size
)
521 /* allocate memory including padding */
522 size
= RX_MAX_BUFFER_SIZE
;
523 size
= (size
+ DATA_BUFFER_ALIGNMENT
- 1) & ~(DATA_BUFFER_ALIGNMENT
- 1);
524 skb
= dev_alloc_skb(size
+ DATA_BUFFER_ALIGNMENT
);
525 /* must be burst length alignment */
527 skb_reserve(skb
, ~((unsigned int)skb
->data
+ (DATA_BUFFER_ALIGNMENT
- 1)) & (DATA_BUFFER_ALIGNMENT
- 1));
531 static inline struct sk_buff
*get_skb_pointer(unsigned int dataptr
)
533 unsigned int skb_dataptr
;
536 // usually, CPE memory is less than 256M bytes
537 // so NULL means invalid pointer
538 if ( dataptr
== 0 ) {
539 dbg("dataptr is 0, it's supposed to be invalid pointer");
543 skb_dataptr
= (dataptr
- 4) | KSEG1
;
544 skb
= *(struct sk_buff
**)skb_dataptr
;
546 ASSERT((unsigned int)skb
>= KSEG0
, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb
, dataptr
);
547 ASSERT((((unsigned int)skb
->data
& (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT
- 1))) | KSEG1
) == (dataptr
| KSEG1
), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb
, (unsigned int)skb
->data
, dataptr
);
552 static inline int get_tx_desc(unsigned int itf
, unsigned int *f_full
)
555 struct ptm_itf
*p_itf
= &g_ptm_priv_data
.itf
[0];
557 // assume TX is serial operation
558 // no protection provided
562 if ( CPU_TO_WAN_TX_DESC_BASE
[p_itf
->tx_desc_pos
].own
== 0 ) {
563 desc_base
= p_itf
->tx_desc_pos
;
564 if ( ++(p_itf
->tx_desc_pos
) == CPU_TO_WAN_TX_DESC_NUM
)
565 p_itf
->tx_desc_pos
= 0;
566 if ( CPU_TO_WAN_TX_DESC_BASE
[p_itf
->tx_desc_pos
].own
== 0 )
573 static irqreturn_t
mailbox_irq_handler(int irq
, void *dev_id
)
578 isr
= IFX_REG_R32(MBOX_IGU1_ISR
);
579 IFX_REG_W32(isr
, MBOX_IGU1_ISRC
);
580 isr
&= IFX_REG_R32(MBOX_IGU1_IER
);
583 IFX_REG_W32_MASK(1, 0, MBOX_IGU1_IER
);
584 napi_schedule(&g_ptm_priv_data
.itf
[0].napi
);
585 #if defined(ENABLE_TMP_DBG) && ENABLE_TMP_DBG
587 volatile struct rx_descriptor
*desc
= &WAN_RX_DESC_BASE
[g_ptm_priv_data
.itf
[0].rx_desc_pos
];
589 if ( desc
->own
) { // PP32 hold
590 err("invalid interrupt");
596 IFX_REG_W32_MASK(1 << 16, 0, MBOX_IGU1_IER
);
597 tasklet_hi_schedule(&g_swap_desc_tasklet
);
600 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER
);
601 netif_wake_queue(g_net_dev
[0]);
607 static void do_swap_desc_tasklet(unsigned long arg
)
610 volatile struct tx_descriptor
*desc
;
612 unsigned int byteoff
;
614 while ( budget
-- > 0 ) {
615 if ( WAN_SWAP_DESC_BASE
[g_ptm_priv_data
.itf
[0].tx_swap_desc_pos
].own
) // if PP32 hold descriptor
618 desc
= &WAN_SWAP_DESC_BASE
[g_ptm_priv_data
.itf
[0].tx_swap_desc_pos
];
619 if ( ++g_ptm_priv_data
.itf
[0].tx_swap_desc_pos
== WAN_SWAP_DESC_NUM
)
620 g_ptm_priv_data
.itf
[0].tx_swap_desc_pos
= 0;
622 skb
= get_skb_pointer(desc
->dataptr
);
624 dev_kfree_skb_any(skb
);
626 skb
= alloc_skb_tx(RX_MAX_BUFFER_SIZE
);
628 panic("can't allocate swap buffer for PPE firmware use\n");
629 byteoff
= (unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1);
630 *(struct sk_buff
**)((unsigned int)skb
->data
- byteoff
- sizeof(struct sk_buff
*)) = skb
;
632 desc
->dataptr
= (unsigned int)skb
->data
& 0x0FFFFFFF;
637 IFX_REG_W32_MASK(0, 16, MBOX_IGU1_ISRC
);
638 // no more skb to be replaced
639 if ( WAN_SWAP_DESC_BASE
[g_ptm_priv_data
.itf
[0].tx_swap_desc_pos
].own
) { // if PP32 hold descriptor
640 IFX_REG_W32_MASK(0, 1 << 16, MBOX_IGU1_IER
);
644 tasklet_hi_schedule(&g_swap_desc_tasklet
);
649 static inline int ifx_ptm_version(char *buf
)
652 unsigned int major
, minor
;
654 ifx_ptm_get_fw_ver(&major
, &minor
);
656 len
+= sprintf(buf
+ len
, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR
, IFX_PTM_VER_MID
, IFX_PTM_VER_MINOR
);
657 len
+= sprintf(buf
+ len
, " PTM (E1) firmware version %d.%d\n", major
, minor
);
662 static inline int init_priv_data(void)
666 g_wanqos_en
= wanqos_en
? wanqos_en
: 8;
667 if ( g_wanqos_en
> 8 )
670 for ( i
= 0; i
< ARRAY_SIZE(g_queue_gamma_map
); i
++ )
672 g_queue_gamma_map
[i
] = queue_gamma_map
[i
] & ((1 << g_wanqos_en
) - 1);
673 for ( j
= 0; j
< i
; j
++ )
674 g_queue_gamma_map
[i
] &= ~g_queue_gamma_map
[j
];
677 memset(&g_ptm_priv_data
, 0, sizeof(g_ptm_priv_data
));
680 int max_packet_priority
= ARRAY_SIZE(g_ptm_prio_queue_map
);
682 int q_step
, q_accum
, p_step
;
684 tx_num_q
= __ETH_WAN_TX_QUEUE_NUM
;
685 q_step
= tx_num_q
- 1;
686 p_step
= max_packet_priority
- 1;
687 for ( j
= 0, q_accum
= 0; j
< max_packet_priority
; j
++, q_accum
+= q_step
)
688 g_ptm_prio_queue_map
[j
] = q_step
- (q_accum
+ (p_step
>> 1)) / p_step
;
694 static inline void clear_priv_data(void)
698 static inline int init_tables(void)
700 struct sk_buff
*skb_pool
[WAN_RX_DESC_NUM
] = {0};
701 struct cfg_std_data_len cfg_std_data_len
= {0};
702 struct tx_qos_cfg tx_qos_cfg
= {0};
703 struct psave_cfg psave_cfg
= {0};
704 struct eg_bwctrl_cfg eg_bwctrl_cfg
= {0};
705 struct test_mode test_mode
= {0};
706 struct rx_bc_cfg rx_bc_cfg
= {0};
707 struct tx_bc_cfg tx_bc_cfg
= {0};
708 struct gpio_mode gpio_mode
= {0};
709 struct gpio_wm_cfg gpio_wm_cfg
= {0};
710 struct rx_gamma_itf_cfg rx_gamma_itf_cfg
= {0};
711 struct tx_gamma_itf_cfg tx_gamma_itf_cfg
= {0};
712 struct wtx_qos_q_desc_cfg wtx_qos_q_desc_cfg
= {0};
713 struct rx_descriptor rx_desc
= {0};
714 struct tx_descriptor tx_desc
= {0};
717 for ( i
= 0; i
< WAN_RX_DESC_NUM
; i
++ ) {
718 skb_pool
[i
] = alloc_skb_rx();
719 if ( skb_pool
[i
] == NULL
)
720 goto ALLOC_SKB_RX_FAIL
;
723 cfg_std_data_len
.byte_off
= RX_HEAD_MAC_ADDR_ALIGNMENT
; // this field replaces byte_off in rx descriptor of VDSL ingress
724 cfg_std_data_len
.data_len
= 1600;
725 *CFG_STD_DATA_LEN
= cfg_std_data_len
;
727 tx_qos_cfg
.time_tick
= cgu_get_pp32_clock() / 62500; // 16 * (cgu_get_pp32_clock() / 1000000)
728 tx_qos_cfg
.overhd_bytes
= 0;
729 tx_qos_cfg
.eth1_eg_qnum
= __ETH_WAN_TX_QUEUE_NUM
;
730 tx_qos_cfg
.eth1_burst_chk
= 1;
731 tx_qos_cfg
.eth1_qss
= 0;
732 tx_qos_cfg
.shape_en
= 0; // disable
733 tx_qos_cfg
.wfq_en
= 0; // strict priority
734 *TX_QOS_CFG
= tx_qos_cfg
;
736 psave_cfg
.start_state
= 0;
737 psave_cfg
.sleep_en
= 1; // enable sleep mode
738 *PSAVE_CFG
= psave_cfg
;
740 eg_bwctrl_cfg
.fdesc_wm
= 16;
741 eg_bwctrl_cfg
.class_len
= 128;
742 *EG_BWCTRL_CFG
= eg_bwctrl_cfg
;
744 //*GPIO_ADDR = (unsigned int)IFX_GPIO_P0_OUT;
745 *GPIO_ADDR
= (unsigned int)0x00000000; // disabled by default
747 gpio_mode
.gpio_bit_bc1
= 2;
748 gpio_mode
.gpio_bit_bc0
= 1;
749 gpio_mode
.gpio_bc1_en
= 0;
750 gpio_mode
.gpio_bc0_en
= 0;
751 *GPIO_MODE
= gpio_mode
;
753 gpio_wm_cfg
.stop_wm_bc1
= 2;
754 gpio_wm_cfg
.start_wm_bc1
= 4;
755 gpio_wm_cfg
.stop_wm_bc0
= 2;
756 gpio_wm_cfg
.start_wm_bc0
= 4;
757 *GPIO_WM_CFG
= gpio_wm_cfg
;
759 test_mode
.mib_clear_mode
= 0;
760 test_mode
.test_mode
= 0;
761 *TEST_MODE
= test_mode
;
763 rx_bc_cfg
.local_state
= 0;
764 rx_bc_cfg
.remote_state
= 0;
765 rx_bc_cfg
.to_false_th
= 7;
766 rx_bc_cfg
.to_looking_th
= 3;
767 *RX_BC_CFG(0) = rx_bc_cfg
;
768 *RX_BC_CFG(1) = rx_bc_cfg
;
770 tx_bc_cfg
.fill_wm
= 2;
771 tx_bc_cfg
.uflw_wm
= 2;
772 *TX_BC_CFG(0) = tx_bc_cfg
;
773 *TX_BC_CFG(1) = tx_bc_cfg
;
775 rx_gamma_itf_cfg
.receive_state
= 0;
776 rx_gamma_itf_cfg
.rx_min_len
= 60;
777 rx_gamma_itf_cfg
.rx_pad_en
= 1;
778 rx_gamma_itf_cfg
.rx_eth_fcs_ver_dis
= 0;
779 rx_gamma_itf_cfg
.rx_rm_eth_fcs
= 1;
780 rx_gamma_itf_cfg
.rx_tc_crc_ver_dis
= 0;
781 rx_gamma_itf_cfg
.rx_tc_crc_size
= 1;
782 rx_gamma_itf_cfg
.rx_eth_fcs_result
= 0xC704DD7B;
783 rx_gamma_itf_cfg
.rx_tc_crc_result
= 0x1D0F1D0F;
784 rx_gamma_itf_cfg
.rx_crc_cfg
= 0x2500;
785 rx_gamma_itf_cfg
.rx_eth_fcs_init_value
= 0xFFFFFFFF;
786 rx_gamma_itf_cfg
.rx_tc_crc_init_value
= 0x0000FFFF;
787 rx_gamma_itf_cfg
.rx_max_len_sel
= 0;
788 rx_gamma_itf_cfg
.rx_edit_num2
= 0;
789 rx_gamma_itf_cfg
.rx_edit_pos2
= 0;
790 rx_gamma_itf_cfg
.rx_edit_type2
= 0;
791 rx_gamma_itf_cfg
.rx_edit_en2
= 0;
792 rx_gamma_itf_cfg
.rx_edit_num1
= 0;
793 rx_gamma_itf_cfg
.rx_edit_pos1
= 0;
794 rx_gamma_itf_cfg
.rx_edit_type1
= 0;
795 rx_gamma_itf_cfg
.rx_edit_en1
= 0;
796 rx_gamma_itf_cfg
.rx_inserted_bytes_1l
= 0;
797 rx_gamma_itf_cfg
.rx_inserted_bytes_1h
= 0;
798 rx_gamma_itf_cfg
.rx_inserted_bytes_2l
= 0;
799 rx_gamma_itf_cfg
.rx_inserted_bytes_2h
= 0;
800 rx_gamma_itf_cfg
.rx_len_adj
= -6;
801 for ( i
= 0; i
< 4; i
++ )
802 *RX_GAMMA_ITF_CFG(i
) = rx_gamma_itf_cfg
;
804 tx_gamma_itf_cfg
.tx_len_adj
= 6;
805 tx_gamma_itf_cfg
.tx_crc_off_adj
= 6;
806 tx_gamma_itf_cfg
.tx_min_len
= 0;
807 tx_gamma_itf_cfg
.tx_eth_fcs_gen_dis
= 0;
808 tx_gamma_itf_cfg
.tx_tc_crc_size
= 1;
809 tx_gamma_itf_cfg
.tx_crc_cfg
= 0x2F00;
810 tx_gamma_itf_cfg
.tx_eth_fcs_init_value
= 0xFFFFFFFF;
811 tx_gamma_itf_cfg
.tx_tc_crc_init_value
= 0x0000FFFF;
812 for ( i
= 0; i
< ARRAY_SIZE(g_queue_gamma_map
); i
++ ) {
813 tx_gamma_itf_cfg
.queue_mapping
= g_queue_gamma_map
[i
];
814 *TX_GAMMA_ITF_CFG(i
) = tx_gamma_itf_cfg
;
817 for ( i
= 0; i
< __ETH_WAN_TX_QUEUE_NUM
; i
++ ) {
818 wtx_qos_q_desc_cfg
.length
= WAN_TX_DESC_NUM
;
819 wtx_qos_q_desc_cfg
.addr
= __ETH_WAN_TX_DESC_BASE(i
);
820 *WTX_QOS_Q_DESC_CFG(i
) = wtx_qos_q_desc_cfg
;
823 // default TX queue QoS config is all ZERO
826 IFX_REG_W32(0x90111293, TX_CTRL_K_TABLE(0));
827 IFX_REG_W32(0x14959617, TX_CTRL_K_TABLE(1));
828 IFX_REG_W32(0x18999A1B, TX_CTRL_K_TABLE(2));
829 IFX_REG_W32(0x9C1D1E9F, TX_CTRL_K_TABLE(3));
830 IFX_REG_W32(0xA02122A3, TX_CTRL_K_TABLE(4));
831 IFX_REG_W32(0x24A5A627, TX_CTRL_K_TABLE(5));
832 IFX_REG_W32(0x28A9AA2B, TX_CTRL_K_TABLE(6));
833 IFX_REG_W32(0xAC2D2EAF, TX_CTRL_K_TABLE(7));
834 IFX_REG_W32(0x30B1B233, TX_CTRL_K_TABLE(8));
835 IFX_REG_W32(0xB43536B7, TX_CTRL_K_TABLE(9));
836 IFX_REG_W32(0xB8393ABB, TX_CTRL_K_TABLE(10));
837 IFX_REG_W32(0x3CBDBE3F, TX_CTRL_K_TABLE(11));
838 IFX_REG_W32(0xC04142C3, TX_CTRL_K_TABLE(12));
839 IFX_REG_W32(0x44C5C647, TX_CTRL_K_TABLE(13));
840 IFX_REG_W32(0x48C9CA4B, TX_CTRL_K_TABLE(14));
841 IFX_REG_W32(0xCC4D4ECF, TX_CTRL_K_TABLE(15));
843 // init RX descriptor
848 rx_desc
.byteoff
= RX_HEAD_MAC_ADDR_ALIGNMENT
;
849 rx_desc
.datalen
= RX_MAX_BUFFER_SIZE
- RX_HEAD_MAC_ADDR_ALIGNMENT
;
850 for ( i
= 0; i
< WAN_RX_DESC_NUM
; i
++ ) {
851 rx_desc
.dataptr
= (unsigned int)skb_pool
[i
]->data
& 0x0FFFFFFF;
852 WAN_RX_DESC_BASE
[i
] = rx_desc
;
855 // init TX descriptor
865 for ( i
= 0; i
< CPU_TO_WAN_TX_DESC_NUM
; i
++ )
866 CPU_TO_WAN_TX_DESC_BASE
[i
] = tx_desc
;
867 for ( i
= 0; i
< WAN_TX_DESC_NUM_TOTAL
; i
++ )
868 WAN_TX_DESC_BASE(0)[i
] = tx_desc
;
870 // init Swap descriptor
871 for ( i
= 0; i
< WAN_SWAP_DESC_NUM
; i
++ )
872 WAN_SWAP_DESC_BASE
[i
] = tx_desc
;
874 // init fastpath TX descriptor
876 for ( i
= 0; i
< FASTPATH_TO_WAN_TX_DESC_NUM
; i
++ )
877 FASTPATH_TO_WAN_TX_DESC_BASE
[i
] = tx_desc
;
883 dev_kfree_skb_any(skb_pool
[i
]);
887 static inline void clear_tables(void)
892 for ( i
= 0; i
< WAN_RX_DESC_NUM
; i
++ ) {
893 skb
= get_skb_pointer(WAN_RX_DESC_BASE
[i
].dataptr
);
895 dev_kfree_skb_any(skb
);
898 for ( i
= 0; i
< CPU_TO_WAN_TX_DESC_NUM
; i
++ ) {
899 skb
= get_skb_pointer(CPU_TO_WAN_TX_DESC_BASE
[i
].dataptr
);
901 dev_kfree_skb_any(skb
);
904 for ( j
= 0; j
< 8; j
++ )
905 for ( i
= 0; i
< WAN_TX_DESC_NUM
; i
++ ) {
906 skb
= get_skb_pointer(WAN_TX_DESC_BASE(j
)[i
].dataptr
);
908 dev_kfree_skb_any(skb
);
911 for ( i
= 0; i
< WAN_SWAP_DESC_NUM
; i
++ ) {
912 skb
= get_skb_pointer(WAN_SWAP_DESC_BASE
[i
].dataptr
);
914 dev_kfree_skb_any(skb
);
917 for ( i
= 0; i
< FASTPATH_TO_WAN_TX_DESC_NUM
; i
++ ) {
918 skb
= get_skb_pointer(FASTPATH_TO_WAN_TX_DESC_BASE
[i
].dataptr
);
920 dev_kfree_skb_any(skb
);
924 static int ptm_showtime_enter(struct port_cell_info
*port_cell
, void *xdata_addr
)
928 ASSERT(port_cell
!= NULL
, "port_cell is NULL");
929 ASSERT(xdata_addr
!= NULL
, "xdata_addr is NULL");
931 // TODO: ReTX set xdata_addr
932 g_xdata_addr
= xdata_addr
;
936 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ )
937 netif_carrier_on(g_net_dev
[i
]);
939 IFX_REG_W32(0x0F, UTP_CFG
);
942 // IFX_REG_W32_MASK(1 << 17, 0, FFSM_CFG0);
945 printk("enter showtime\n");
950 static int ptm_showtime_exit(void)
958 // IFX_REG_W32_MASK(0, 1 << 17, FFSM_CFG0);
961 IFX_REG_W32(0x00, UTP_CFG
);
963 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ )
964 netif_carrier_off(g_net_dev
[i
]);
968 // TODO: ReTX clean state
971 printk("leave showtime\n");
976 static const struct of_device_id ltq_ptm_match
[] = {
978 { .compatible
= "lantiq,ppe-danube", .data
= NULL
},
979 #elif defined CONFIG_AMAZON_SE
980 { .compatible
= "lantiq,ppe-ase", .data
= NULL
},
981 #elif defined CONFIG_AR9
982 { .compatible
= "lantiq,ppe-arx100", .data
= NULL
},
983 #elif defined CONFIG_VR9
984 { .compatible
= "lantiq,ppe-xrx200", .data
= NULL
},
988 MODULE_DEVICE_TABLE(of
, ltq_ptm_match
);
990 static int ltq_ptm_probe(struct platform_device
*pdev
)
995 struct port_cell_info port_cell
= {0};
997 ret
= init_priv_data();
999 err("INIT_PRIV_DATA_FAIL");
1000 goto INIT_PRIV_DATA_FAIL
;
1003 ifx_ptm_init_chip(pdev
);
1004 ret
= init_tables();
1006 err("INIT_TABLES_FAIL");
1007 goto INIT_TABLES_FAIL
;
1010 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ ) {
1011 g_net_dev
[i
] = alloc_netdev(0, g_net_dev_name
[i
], NET_NAME_UNKNOWN
, ether_setup
);
1012 if ( g_net_dev
[i
] == NULL
)
1013 goto ALLOC_NETDEV_FAIL
;
1014 ptm_setup(g_net_dev
[i
], i
);
1017 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ ) {
1018 ret
= register_netdev(g_net_dev
[i
]);
1020 goto REGISTER_NETDEV_FAIL
;
1023 /* register interrupt handler */
1024 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
1025 ret
= request_irq(PPE_MAILBOX_IGU1_INT
, mailbox_irq_handler
, 0, "ptm_mailbox_isr", &g_ptm_priv_data
);
1027 ret
= request_irq(PPE_MAILBOX_IGU1_INT
, mailbox_irq_handler
, IRQF_DISABLED
, "ptm_mailbox_isr", &g_ptm_priv_data
);
1030 if ( ret
== -EBUSY
) {
1031 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1034 err("request_irq fail");
1036 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL
;
1038 disable_irq(PPE_MAILBOX_IGU1_INT
);
1040 ret
= ifx_pp32_start(0);
1042 err("ifx_pp32_start fail!");
1043 goto PP32_START_FAIL
;
1045 IFX_REG_W32(1 << 16, MBOX_IGU1_IER
); // enable SWAP interrupt
1046 IFX_REG_W32(~0, MBOX_IGU1_ISRC
);
1048 enable_irq(PPE_MAILBOX_IGU1_INT
);
1050 ifx_mei_atm_showtime_check(&g_showtime
, &port_cell
, &g_xdata_addr
);
1052 ptm_showtime_enter(&port_cell
, &g_xdata_addr
);
1055 ifx_mei_atm_showtime_enter
= ptm_showtime_enter
;
1056 ifx_mei_atm_showtime_exit
= ptm_showtime_exit
;
1058 ifx_ptm_version(ver_str
);
1059 printk(KERN_INFO
"%s", ver_str
);
1061 printk("ifxmips_ptm: PTM init succeed\n");
1066 free_irq(PPE_MAILBOX_IGU1_INT
, &g_ptm_priv_data
);
1067 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL
:
1068 i
= ARRAY_SIZE(g_net_dev
);
1069 REGISTER_NETDEV_FAIL
:
1071 unregister_netdev(g_net_dev
[i
]);
1072 i
= ARRAY_SIZE(g_net_dev
);
1075 free_netdev(g_net_dev
[i
]);
1076 g_net_dev
[i
] = NULL
;
1079 INIT_PRIV_DATA_FAIL
:
1081 printk("ifxmips_ptm: PTM init failed\n");
1085 static int ltq_ptm_remove(struct platform_device
*pdev
)
1088 ifx_mei_atm_showtime_enter
= NULL
;
1089 ifx_mei_atm_showtime_exit
= NULL
;
1094 free_irq(PPE_MAILBOX_IGU1_INT
, &g_ptm_priv_data
);
1096 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ )
1097 unregister_netdev(g_net_dev
[i
]);
1099 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ ) {
1100 free_netdev(g_net_dev
[i
]);
1101 g_net_dev
[i
] = NULL
;
1106 ifx_ptm_uninit_chip();
1114 static int __init
wanqos_en_setup(char *line
)
1116 wanqos_en
= simple_strtoul(line
, NULL
, 0);
1118 if ( wanqos_en
< 1 || wanqos_en
> 8 )
1124 static int __init
queue_gamma_map_setup(char *line
)
1129 for ( i
= 0, p
= line
; i
< ARRAY_SIZE(queue_gamma_map
) && isxdigit(*p
); i
++ )
1131 queue_gamma_map
[i
] = simple_strtoul(p
, &p
, 0);
1132 if ( *p
== ',' || *p
== ';' || *p
== ':' )
1139 static struct platform_driver ltq_ptm_driver
= {
1140 .probe
= ltq_ptm_probe
,
1141 .remove
= ltq_ptm_remove
,
1144 .owner
= THIS_MODULE
,
1145 .of_match_table
= ltq_ptm_match
,
1149 module_platform_driver(ltq_ptm_driver
);
1151 __setup("wanqos_en=", wanqos_en_setup
);
1152 __setup("queue_gamma_map=", queue_gamma_map_setup
);
1155 MODULE_LICENSE("GPL");