1 /******************************************************************************
3 ** FILE NAME : ifxmips_ptm_adsl.c
9 ** DESCRIPTION : PTM driver common source file (core functions for Danube/
11 ** COPYRIGHT : Copyright (c) 2006
12 ** Infineon Technologies AG
13 ** Am Campeon 1-12, 85579 Neubiberg, Germany
15 ** This program is free software; you can redistribute it and/or modify
16 ** it under the terms of the GNU General Public License as published by
17 ** the Free Software Foundation; either version 2 of the License, or
18 ** (at your option) any later version.
21 ** $Date $Author $Comment
22 ** 07 JUL 2009 Xu Liang Init Version
23 *******************************************************************************/
28 * ####################################
30 * ####################################
36 #include <linux/version.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/errno.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/ioctl.h>
44 #include <linux/etherdevice.h>
45 #include <linux/interrupt.h>
46 #include <linux/netdevice.h>
47 #include <linux/platform_device.h>
48 #include <linux/of_device.h>
52 * Chip Specific Head File
54 #include "ifxmips_ptm_adsl.h"
57 #include <lantiq_soc.h>
60 * ####################################
61 * Kernel Version Adaption
62 * ####################################
64 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
65 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
66 #define MODULE_PARM(a, b) module_param(a, int, 0)
68 #define MODULE_PARM_ARRAY(a, b) MODULE_PARM(a, b)
74 * ####################################
75 * Parameters to Configure PPE
76 * ####################################
79 static int write_desc_delay
= 0x20; /* Write descriptor delay */
81 static int rx_max_packet_size
= ETH_MAX_FRAME_LENGTH
;
82 /* Max packet size for RX */
84 static int dma_rx_descriptor_length
= 24; /* Number of descriptors per DMA RX channel */
85 static int dma_tx_descriptor_length
= 24; /* Number of descriptors per DMA TX channel */
87 static int eth_efmtc_crc_cfg
= 0x03100710; /* default: tx_eth_crc_check: 1, tx_tc_crc_check: 1, tx_tc_crc_len = 16 */
88 /* rx_eth_crc_present: 1, rx_eth_crc_check: 1, rx_tc_crc_check: 1, rx_tc_crc_len = 16 */
90 MODULE_PARM(write_desc_delay
, "i");
91 MODULE_PARM_DESC(write_desc_delay
, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
93 MODULE_PARM(rx_max_packet_size
, "i");
94 MODULE_PARM_DESC(rx_max_packet_size
, "Max packet size in byte for downstream ethernet frames");
96 MODULE_PARM(dma_rx_descriptor_length
, "i");
97 MODULE_PARM_DESC(dma_rx_descriptor_length
, "Number of descriptor assigned to DMA RX channel (>16)");
98 MODULE_PARM(dma_tx_descriptor_length
, "i");
99 MODULE_PARM_DESC(dma_tx_descriptor_length
, "Number of descriptor assigned to DMA TX channel (>16)");
101 MODULE_PARM(eth_efmtc_crc_cfg
, "i");
102 MODULE_PARM_DESC(eth_efmtc_crc_cfg
, "Configuration for PTM TX/RX ethernet/efm-tc CRC");
107 * ####################################
109 * ####################################
113 #define DUMP_SKB_LEN ~0
118 * ####################################
120 * ####################################
126 static void ptm_setup(struct net_device
*, int);
127 static struct net_device_stats
*ptm_get_stats(struct net_device
*);
128 static int ptm_open(struct net_device
*);
129 static int ptm_stop(struct net_device
*);
130 static unsigned int ptm_poll(int, unsigned int);
131 static int ptm_napi_poll(struct napi_struct
*, int);
132 static int ptm_hard_start_xmit(struct sk_buff
*, struct net_device
*);
133 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
134 static int ptm_change_mtu(struct net_device
*, int);
136 static int ptm_ioctl(struct net_device
*, struct ifreq
*, int);
137 static void ptm_tx_timeout(struct net_device
*);
142 static INLINE
void adsl_led_flash(void);
145 * buffer manage functions
147 static INLINE
struct sk_buff
* alloc_skb_rx(void);
148 //static INLINE struct sk_buff* alloc_skb_tx(unsigned int);
149 static INLINE
struct sk_buff
*get_skb_rx_pointer(unsigned int);
150 static INLINE
int get_tx_desc(unsigned int, unsigned int *);
153 * Mailbox handler and signal function
155 static INLINE
int mailbox_rx_irq_handler(unsigned int);
156 static irqreturn_t
mailbox_irq_handler(int, void *);
157 static INLINE
void mailbox_signal(unsigned int, int);
158 #ifdef CONFIG_IFX_PTM_RX_TASKLET
159 static void do_ptm_tasklet(unsigned long);
165 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
166 static void dump_skb(struct sk_buff
*, u32
, char *, int, int, int);
168 #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
170 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
171 static void skb_swap(struct sk_buff
*);
173 #define skb_swap(skb) do {} while (0)
177 * Proc File Functions
179 static INLINE
void proc_file_create(void);
180 static INLINE
void proc_file_delete(void);
181 static int proc_read_version(char *, char **, off_t
, int, int *, void *);
182 static int proc_read_wanmib(char *, char **, off_t
, int, int *, void *);
183 static int proc_write_wanmib(struct file
*, const char *, unsigned long, void *);
184 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
185 static int proc_read_genconf(char *, char **, off_t
, int, int *, void *);
187 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
188 static int proc_read_dbg(char *, char **, off_t
, int, int *, void *);
189 static int proc_write_dbg(struct file
*, const char *, unsigned long, void *);
193 * Proc Help Functions
195 static INLINE
int stricmp(const char *, const char *);
196 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
197 static INLINE
int strincmp(const char *, const char *, int);
199 static INLINE
int ifx_ptm_version(char *);
202 * Init & clean-up functions
204 static INLINE
void check_parameters(void);
205 static INLINE
int init_priv_data(void);
206 static INLINE
void clear_priv_data(void);
207 static INLINE
void init_tables(void);
212 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
213 extern int ifx_mei_atm_showtime_check(int *is_showtime
, struct port_cell_info
*port_cell
, void **xdata_addr
);
215 static inline int ifx_mei_atm_showtime_check(int *is_showtime
, struct port_cell_info
*port_cell
, void **xdata_addr
)
217 if ( is_showtime
!= NULL
)
226 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
227 extern int (*ifx_mei_atm_showtime_enter
)(struct port_cell_info
*, void *);
228 extern int (*ifx_mei_atm_showtime_exit
)(void);
230 int (*ifx_mei_atm_showtime_enter
)(struct port_cell_info
*, void *) = NULL
;
231 EXPORT_SYMBOL(ifx_mei_atm_showtime_enter
);
232 int (*ifx_mei_atm_showtime_exit
)(void) = NULL
;
233 EXPORT_SYMBOL(ifx_mei_atm_showtime_exit
);
239 * ####################################
241 * ####################################
244 static struct ptm_priv_data g_ptm_priv_data
;
246 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
247 static struct net_device_ops g_ptm_netdev_ops
= {
248 .ndo_get_stats
= ptm_get_stats
,
249 .ndo_open
= ptm_open
,
250 .ndo_stop
= ptm_stop
,
251 .ndo_start_xmit
= ptm_hard_start_xmit
,
252 .ndo_validate_addr
= eth_validate_addr
,
253 .ndo_set_mac_address
= eth_mac_addr
,
254 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
255 .ndo_change_mtu
= ptm_change_mtu
,
257 .ndo_do_ioctl
= ptm_ioctl
,
258 .ndo_tx_timeout
= ptm_tx_timeout
,
262 static struct net_device
*g_net_dev
[2] = {0};
263 static char *g_net_dev_name
[2] = {"dsl0", "dslfast0"};
265 #ifdef CONFIG_IFX_PTM_RX_TASKLET
266 static struct tasklet_struct g_ptm_tasklet
[] = {
267 {NULL
, 0, ATOMIC_INIT(0), do_ptm_tasklet
, 0},
268 {NULL
, 0, ATOMIC_INIT(0), do_ptm_tasklet
, 1},
272 unsigned int ifx_ptm_dbg_enable
= DBG_ENABLE_MASK_ERR
;
274 static struct proc_dir_entry
* g_ptm_dir
= NULL
;
276 static int g_showtime
= 0;
281 * ####################################
283 * ####################################
286 static void ptm_setup(struct net_device
*dev
, int ndev
)
288 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
289 netif_carrier_off(dev
);
292 /* hook network operations */
293 dev
->netdev_ops
= &g_ptm_netdev_ops
;
294 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
295 /* Allow up to 1508 bytes, for RFC4638 */
296 dev
->max_mtu
= ETH_DATA_LEN
+ 8;
298 netif_napi_add(dev
, &g_ptm_priv_data
.itf
[ndev
].napi
, ptm_napi_poll
, 25);
299 dev
->watchdog_timeo
= ETH_WATCHDOG_TIMEOUT
;
301 dev
->dev_addr
[0] = 0x00;
302 dev
->dev_addr
[1] = 0x20;
303 dev
->dev_addr
[2] = 0xda;
304 dev
->dev_addr
[3] = 0x86;
305 dev
->dev_addr
[4] = 0x23;
306 dev
->dev_addr
[5] = 0x75 + ndev
;
309 static struct net_device_stats
*ptm_get_stats(struct net_device
*dev
)
313 for ( ndev
= 0; ndev
< ARRAY_SIZE(g_net_dev
) && g_net_dev
[ndev
] != dev
; ndev
++ );
314 ASSERT(ndev
>= 0 && ndev
< ARRAY_SIZE(g_net_dev
), "ndev = %d (wrong value)", ndev
);
316 g_ptm_priv_data
.itf
[ndev
].stats
.rx_errors
= WAN_MIB_TABLE
[ndev
].wrx_tccrc_err_pdu
+ WAN_MIB_TABLE
[ndev
].wrx_ethcrc_err_pdu
;
317 g_ptm_priv_data
.itf
[ndev
].stats
.rx_dropped
= WAN_MIB_TABLE
[ndev
].wrx_nodesc_drop_pdu
+ WAN_MIB_TABLE
[ndev
].wrx_len_violation_drop_pdu
+ (WAN_MIB_TABLE
[ndev
].wrx_correct_pdu
- g_ptm_priv_data
.itf
[ndev
].stats
.rx_packets
);
319 return &g_ptm_priv_data
.itf
[ndev
].stats
;
322 static int ptm_open(struct net_device
*dev
)
326 for ( ndev
= 0; ndev
< ARRAY_SIZE(g_net_dev
) && g_net_dev
[ndev
] != dev
; ndev
++ );
327 ASSERT(ndev
>= 0 && ndev
< ARRAY_SIZE(g_net_dev
), "ndev = %d (wrong value)", ndev
);
329 napi_enable(&g_ptm_priv_data
.itf
[ndev
].napi
);
331 IFX_REG_W32_MASK(0, 1 << ndev
, MBOX_IGU1_IER
);
333 netif_start_queue(dev
);
338 static int ptm_stop(struct net_device
*dev
)
342 for ( ndev
= 0; ndev
< ARRAY_SIZE(g_net_dev
) && g_net_dev
[ndev
] != dev
; ndev
++ );
343 ASSERT(ndev
>= 0 && ndev
< ARRAY_SIZE(g_net_dev
), "ndev = %d (wrong value)", ndev
);
345 IFX_REG_W32_MASK((1 << ndev
) | (1 << (ndev
+ 16)), 0, MBOX_IGU1_IER
);
347 napi_disable(&g_ptm_priv_data
.itf
[ndev
].napi
);
349 netif_stop_queue(dev
);
354 static unsigned int ptm_poll(int ndev
, unsigned int work_to_do
)
356 unsigned int work_done
= 0;
358 ASSERT(ndev
>= 0 && ndev
< ARRAY_SIZE(g_net_dev
), "ndev = %d (wrong value)", ndev
);
360 while ( work_done
< work_to_do
&& WRX_DMA_CHANNEL_CONFIG(ndev
)->vlddes
> 0 ) {
361 if ( mailbox_rx_irq_handler(ndev
) < 0 )
369 static int ptm_napi_poll(struct napi_struct
*napi
, int budget
)
372 unsigned int work_done
;
374 for ( ndev
= 0; ndev
< ARRAY_SIZE(g_net_dev
) && g_net_dev
[ndev
] != napi
->dev
; ndev
++ );
376 work_done
= ptm_poll(ndev
, budget
);
379 if ( !netif_running(napi
->dev
) ) {
385 if ( WRX_DMA_CHANNEL_CONFIG(ndev
)->vlddes
== 0 ) {
387 IFX_REG_W32_MASK(0, 1 << ndev
, MBOX_IGU1_ISRC
);
389 if ( WRX_DMA_CHANNEL_CONFIG(ndev
)->vlddes
== 0 ) {
391 IFX_REG_W32_MASK(0, 1 << ndev
, MBOX_IGU1_IER
);
400 static int ptm_hard_start_xmit(struct sk_buff
*skb
, struct net_device
*dev
)
405 register struct tx_descriptor reg_desc
= {0};
407 for ( ndev
= 0; ndev
< ARRAY_SIZE(g_net_dev
) && g_net_dev
[ndev
] != dev
; ndev
++ );
408 ASSERT(ndev
>= 0 && ndev
< ARRAY_SIZE(g_net_dev
), "ndev = %d (wrong value)", ndev
);
411 err("not in showtime");
412 goto PTM_HARD_START_XMIT_FAIL
;
415 /* allocate descriptor */
416 desc_base
= get_tx_desc(ndev
, &f_full
);
418 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
419 netif_trans_update(dev
);
421 dev
->trans_start
= jiffies
;
423 netif_stop_queue(dev
);
425 IFX_REG_W32_MASK(0, 1 << (ndev
+ 16), MBOX_IGU1_ISRC
);
426 IFX_REG_W32_MASK(0, 1 << (ndev
+ 16), MBOX_IGU1_IER
);
429 goto PTM_HARD_START_XMIT_FAIL
;
431 if ( g_ptm_priv_data
.itf
[ndev
].tx_skb
[desc_base
] != NULL
)
432 dev_kfree_skb_any(g_ptm_priv_data
.itf
[ndev
].tx_skb
[desc_base
]);
433 g_ptm_priv_data
.itf
[ndev
].tx_skb
[desc_base
] = skb
;
435 reg_desc
.dataptr
= (unsigned int)skb
->data
>> 2;
436 reg_desc
.datalen
= skb
->len
< ETH_ZLEN
? ETH_ZLEN
: skb
->len
;
437 reg_desc
.byteoff
= (unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1);
440 reg_desc
.sop
= reg_desc
.eop
= 1;
442 /* write discriptor to memory and write back cache */
443 g_ptm_priv_data
.itf
[ndev
].tx_desc
[desc_base
] = reg_desc
;
444 dma_cache_wback((unsigned long)skb
->data
, skb
->len
);
447 dump_skb(skb
, DUMP_SKB_LEN
, (char *)__func__
, ndev
, ndev
, 1);
449 if ( (ifx_ptm_dbg_enable
& DBG_ENABLE_MASK_MAC_SWAP
) ) {
453 g_ptm_priv_data
.itf
[ndev
].stats
.tx_packets
++;
454 g_ptm_priv_data
.itf
[ndev
].stats
.tx_bytes
+= reg_desc
.datalen
;
456 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
457 netif_trans_update(dev
);
459 dev
->trans_start
= jiffies
;
461 mailbox_signal(ndev
, 1);
467 PTM_HARD_START_XMIT_FAIL
:
468 dev_kfree_skb_any(skb
);
469 g_ptm_priv_data
.itf
[ndev
].stats
.tx_dropped
++;
472 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
473 static int ptm_change_mtu(struct net_device
*dev
, int mtu
)
475 /* Allow up to 1508 bytes, for RFC4638 */
476 if (mtu
< 68 || mtu
> ETH_DATA_LEN
+ 8)
483 static int ptm_ioctl(struct net_device
*dev
, struct ifreq
*ifr
, int cmd
)
487 for ( ndev
= 0; ndev
< ARRAY_SIZE(g_net_dev
) && g_net_dev
[ndev
] != dev
; ndev
++ );
488 ASSERT(ndev
>= 0 && ndev
< ARRAY_SIZE(g_net_dev
), "ndev = %d (wrong value)", ndev
);
492 case IFX_PTM_MIB_CW_GET
:
493 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifRxNoIdleCodewords
= WAN_MIB_TABLE
[ndev
].wrx_nonidle_cw
;
494 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifRxIdleCodewords
= WAN_MIB_TABLE
[ndev
].wrx_idle_cw
;
495 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifRxCodingViolation
= WAN_MIB_TABLE
[ndev
].wrx_err_cw
;
496 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifTxNoIdleCodewords
= 0;
497 ((PTM_CW_IF_ENTRY_T
*)ifr
->ifr_data
)->ifTxIdleCodewords
= 0;
499 case IFX_PTM_MIB_FRAME_GET
:
500 ((PTM_FRAME_MIB_T
*)ifr
->ifr_data
)->RxCorrect
= WAN_MIB_TABLE
[ndev
].wrx_correct_pdu
;
501 ((PTM_FRAME_MIB_T
*)ifr
->ifr_data
)->TC_CrcError
= WAN_MIB_TABLE
[ndev
].wrx_tccrc_err_pdu
;
502 ((PTM_FRAME_MIB_T
*)ifr
->ifr_data
)->RxDropped
= WAN_MIB_TABLE
[ndev
].wrx_nodesc_drop_pdu
+ WAN_MIB_TABLE
[ndev
].wrx_len_violation_drop_pdu
;
503 ((PTM_FRAME_MIB_T
*)ifr
->ifr_data
)->TxSend
= WAN_MIB_TABLE
[ndev
].wtx_total_pdu
;
505 case IFX_PTM_CFG_GET
:
506 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxEthCrcPresent
= CFG_ETH_EFMTC_CRC
->rx_eth_crc_present
;
507 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxEthCrcCheck
= CFG_ETH_EFMTC_CRC
->rx_eth_crc_check
;
508 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcCheck
= CFG_ETH_EFMTC_CRC
->rx_tc_crc_check
;
509 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcLen
= CFG_ETH_EFMTC_CRC
->rx_tc_crc_len
;
510 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxEthCrcGen
= CFG_ETH_EFMTC_CRC
->tx_eth_crc_gen
;
511 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcGen
= CFG_ETH_EFMTC_CRC
->tx_tc_crc_gen
;
512 ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcLen
= CFG_ETH_EFMTC_CRC
->tx_tc_crc_len
;
514 case IFX_PTM_CFG_SET
:
515 CFG_ETH_EFMTC_CRC
->rx_eth_crc_present
= ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxEthCrcPresent
? 1 : 0;
516 CFG_ETH_EFMTC_CRC
->rx_eth_crc_check
= ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxEthCrcCheck
? 1 : 0;
517 if ( ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcCheck
&& (((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcLen
== 16 || ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcLen
== 32) )
519 CFG_ETH_EFMTC_CRC
->rx_tc_crc_check
= 1;
520 CFG_ETH_EFMTC_CRC
->rx_tc_crc_len
= ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->RxTcCrcLen
;
524 CFG_ETH_EFMTC_CRC
->rx_tc_crc_check
= 0;
525 CFG_ETH_EFMTC_CRC
->rx_tc_crc_len
= 0;
527 CFG_ETH_EFMTC_CRC
->tx_eth_crc_gen
= ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxEthCrcGen
? 1 : 0;
528 if ( ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcGen
&& (((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcLen
== 16 || ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcLen
== 32) )
530 CFG_ETH_EFMTC_CRC
->tx_tc_crc_gen
= 1;
531 CFG_ETH_EFMTC_CRC
->tx_tc_crc_len
= ((IFX_PTM_CFG_T
*)ifr
->ifr_data
)->TxTcCrcLen
;
535 CFG_ETH_EFMTC_CRC
->tx_tc_crc_gen
= 0;
536 CFG_ETH_EFMTC_CRC
->tx_tc_crc_len
= 0;
546 static void ptm_tx_timeout(struct net_device
*dev
)
550 for ( ndev
= 0; ndev
< ARRAY_SIZE(g_net_dev
) && g_net_dev
[ndev
] != dev
; ndev
++ );
551 ASSERT(ndev
>= 0 && ndev
< ARRAY_SIZE(g_net_dev
), "ndev = %d (wrong value)", ndev
);
553 /* disable TX irq, release skb when sending new packet */
554 IFX_REG_W32_MASK(1 << (ndev
+ 16), 0, MBOX_IGU1_IER
);
556 /* wake up TX queue */
557 netif_wake_queue(dev
);
562 static INLINE
void adsl_led_flash(void)
566 static INLINE
struct sk_buff
* alloc_skb_rx(void)
570 /* allocate memroy including trailer and padding */
571 skb
= dev_alloc_skb(rx_max_packet_size
+ RX_HEAD_MAC_ADDR_ALIGNMENT
+ DATA_BUFFER_ALIGNMENT
);
573 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
574 if ( ((unsigned int)skb
->data
& (DATA_BUFFER_ALIGNMENT
- 1)) != 0 )
575 skb_reserve(skb
, ~((unsigned int)skb
->data
+ (DATA_BUFFER_ALIGNMENT
- 1)) & (DATA_BUFFER_ALIGNMENT
- 1));
576 /* pub skb in reserved area "skb->data - 4" */
577 *((struct sk_buff
**)skb
->data
- 1) = skb
;
579 /* write back and invalidate cache */
580 dma_cache_wback_inv((unsigned long)skb
->data
- sizeof(skb
), sizeof(skb
));
581 /* invalidate cache */
582 dma_cache_inv((unsigned long)skb
->data
, (unsigned int)skb
->end
- (unsigned int)skb
->data
);
589 static INLINE
struct sk_buff
* alloc_skb_tx(unsigned int size
)
593 /* allocate memory including padding */
594 size
= (size
+ DATA_BUFFER_ALIGNMENT
- 1) & ~(DATA_BUFFER_ALIGNMENT
- 1);
595 skb
= dev_alloc_skb(size
+ DATA_BUFFER_ALIGNMENT
);
596 /* must be burst length alignment */
598 skb_reserve(skb
, ~((unsigned int)skb
->data
+ (DATA_BUFFER_ALIGNMENT
- 1)) & (DATA_BUFFER_ALIGNMENT
- 1));
603 static INLINE
struct sk_buff
*get_skb_rx_pointer(unsigned int dataptr
)
605 unsigned int skb_dataptr
;
608 skb_dataptr
= ((dataptr
- 1) << 2) | KSEG1
;
609 skb
= *(struct sk_buff
**)skb_dataptr
;
611 ASSERT((unsigned int)skb
>= KSEG0
, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb
, dataptr
);
612 ASSERT(((unsigned int)skb
->data
| KSEG1
) == ((dataptr
<< 2) | KSEG1
), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb
, (unsigned int)skb
->data
, dataptr
);
617 static INLINE
int get_tx_desc(unsigned int itf
, unsigned int *f_full
)
620 struct ptm_itf
*p_itf
= &g_ptm_priv_data
.itf
[itf
];
622 // assume TX is serial operation
623 // no protection provided
627 if ( p_itf
->tx_desc
[p_itf
->tx_desc_pos
].own
== 0 ) {
628 desc_base
= p_itf
->tx_desc_pos
;
629 if ( ++(p_itf
->tx_desc_pos
) == dma_tx_descriptor_length
)
630 p_itf
->tx_desc_pos
= 0;
631 if ( p_itf
->tx_desc
[p_itf
->tx_desc_pos
].own
== 0 )
638 static INLINE
int mailbox_rx_irq_handler(unsigned int ch
) // return: < 0 - descriptor not available, 0 - received one packet
640 unsigned int ndev
= ch
;
642 struct sk_buff
*new_skb
;
643 volatile struct rx_descriptor
*desc
;
644 struct rx_descriptor reg_desc
;
647 desc
= &g_ptm_priv_data
.itf
[ndev
].rx_desc
[g_ptm_priv_data
.itf
[ndev
].rx_desc_pos
];
648 if ( desc
->own
|| !desc
->c
) // if PP32 hold descriptor or descriptor not completed
650 if ( ++g_ptm_priv_data
.itf
[ndev
].rx_desc_pos
== dma_rx_descriptor_length
)
651 g_ptm_priv_data
.itf
[ndev
].rx_desc_pos
= 0;
654 skb
= get_skb_rx_pointer(reg_desc
.dataptr
);
656 if ( !reg_desc
.err
) {
657 new_skb
= alloc_skb_rx();
658 if ( new_skb
!= NULL
) {
659 skb_reserve(skb
, reg_desc
.byteoff
);
660 skb_put(skb
, reg_desc
.datalen
);
662 dump_skb(skb
, DUMP_SKB_LEN
, (char *)__func__
, ndev
, ndev
, 0);
664 // parse protocol header
665 skb
->dev
= g_net_dev
[ndev
];
666 skb
->protocol
= eth_type_trans(skb
, skb
->dev
);
668 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
669 g_net_dev
[ndev
]->last_rx
= jiffies
;
672 netif_rx_ret
= netif_receive_skb(skb
);
674 if ( netif_rx_ret
!= NET_RX_DROP
) {
675 g_ptm_priv_data
.itf
[ndev
].stats
.rx_packets
++;
676 g_ptm_priv_data
.itf
[ndev
].stats
.rx_bytes
+= reg_desc
.datalen
;
679 reg_desc
.dataptr
= ((unsigned int)new_skb
->data
>> 2) & 0x0FFFFFFF;
680 reg_desc
.byteoff
= RX_HEAD_MAC_ADDR_ALIGNMENT
;
686 reg_desc
.datalen
= rx_max_packet_size
;
694 mailbox_signal(ndev
, 0);
701 static irqreturn_t
mailbox_irq_handler(int irq
, void *dev_id
)
706 isr
= IFX_REG_R32(MBOX_IGU1_ISR
);
707 IFX_REG_W32(isr
, MBOX_IGU1_ISRC
);
708 isr
&= IFX_REG_R32(MBOX_IGU1_IER
);
710 while ( (i
= __fls(isr
)) >= 0 ) {
715 IFX_REG_W32_MASK(1 << i
, 0, MBOX_IGU1_IER
);
717 if ( i
< MAX_ITF_NUMBER
)
718 netif_wake_queue(g_net_dev
[i
]);
722 #ifdef CONFIG_IFX_PTM_RX_INTERRUPT
723 while ( WRX_DMA_CHANNEL_CONFIG(i
)->vlddes
> 0 )
724 mailbox_rx_irq_handler(i
);
726 IFX_REG_W32_MASK(1 << i
, 0, MBOX_IGU1_IER
);
727 napi_schedule(&g_ptm_priv_data
.itf
[i
].napi
);
735 static INLINE
void mailbox_signal(unsigned int itf
, int is_tx
)
740 while ( MBOX_IGU3_ISR_ISR(itf
+ 16) && count
> 0 )
742 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf
+ 16), MBOX_IGU3_ISRS
);
745 while ( MBOX_IGU3_ISR_ISR(itf
) && count
> 0 )
747 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf
), MBOX_IGU3_ISRS
);
750 ASSERT(count
!= 0, "MBOX_IGU3_ISR = 0x%08x", IFX_REG_R32(MBOX_IGU3_ISR
));
753 #ifdef CONFIG_IFX_PTM_RX_TASKLET
754 static void do_ptm_tasklet(unsigned long arg
)
756 unsigned int work_to_do
= 25;
757 unsigned int work_done
= 0;
759 ASSERT(arg
>= 0 && arg
< ARRAY_SIZE(g_net_dev
), "arg = %lu (wrong value)", arg
);
761 while ( work_done
< work_to_do
&& WRX_DMA_CHANNEL_CONFIG(arg
)->vlddes
> 0 ) {
762 if ( mailbox_rx_irq_handler(arg
) < 0 )
769 if ( !netif_running(g_net_dev
[arg
]) )
773 if ( WRX_DMA_CHANNEL_CONFIG(arg
)->vlddes
== 0 ) {
775 IFX_REG_W32_MASK(0, 1 << arg
, MBOX_IGU1_ISRC
);
777 if ( WRX_DMA_CHANNEL_CONFIG(arg
)->vlddes
== 0 ) {
778 IFX_REG_W32_MASK(0, 1 << arg
, MBOX_IGU1_IER
);
784 tasklet_schedule(&g_ptm_tasklet
[arg
]);
788 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
789 static void dump_skb(struct sk_buff
*skb
, u32 len
, char *title
, int port
, int ch
, int is_tx
)
793 if ( !(ifx_ptm_dbg_enable
& (is_tx
? DBG_ENABLE_MASK_DUMP_SKB_TX
: DBG_ENABLE_MASK_DUMP_SKB_RX
)) )
796 if ( skb
->len
< len
)
799 if ( len
> rx_max_packet_size
) {
800 printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32
)skb
, (u32
)skb
->data
, skb
->len
);
805 printk("%s (port %d, ch %d)\n", title
, port
, ch
);
807 printk("%s\n", title
);
808 printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32
)skb
->data
, (u32
)skb
->tail
, (int)skb
->len
);
809 for ( i
= 1; i
<= len
; i
++ ) {
811 printk(" %4d:", i
- 1);
812 printk(" %02X", (int)(*((char*)skb
->data
+ i
- 1) & 0xFF));
816 if ( (i
- 1) % 16 != 0 )
821 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
822 static void skb_swap(struct sk_buff
*skb
)
824 unsigned char tmp
[8];
825 unsigned char *p
= skb
->data
;
827 if ( !(p
[0] & 0x01) ) { // bypass broadcast/multicast
831 memcpy(p
+ 6, tmp
, 6);
835 while ( p
[0] == 0x81 && p
[1] == 0x00 )
839 if ( p
[0] == 0x08 && p
[1] == 0x00 ) {
843 memcpy(p
+ 4, tmp
, 4);
847 dma_cache_wback((unsigned long)skb
->data
, (unsigned long)p
- (unsigned long)skb
->data
);
852 static INLINE
void proc_file_create(void)
854 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
855 struct proc_dir_entry
*res
;
857 g_ptm_dir
= proc_mkdir("driver/ifx_ptm", NULL
);
859 create_proc_read_entry("version",
865 res
= create_proc_entry("wanmib",
869 res
->read_proc
= proc_read_wanmib
;
870 res
->write_proc
= proc_write_wanmib
;
873 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
874 create_proc_read_entry("genconf",
881 create_proc_read_entry("regs",
884 ifx_ptm_proc_read_regs
,
889 res
= create_proc_entry("dbg",
893 res
->read_proc
= proc_read_dbg
;
894 res
->write_proc
= proc_write_dbg
;
899 static INLINE
void proc_file_delete(void)
901 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
902 remove_proc_entry("dbg", g_ptm_dir
);
905 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
907 remove_proc_entry("regs", g_ptm_dir
);
910 remove_proc_entry("genconf", g_ptm_dir
);
913 remove_proc_entry("wanmib", g_ptm_dir
);
915 remove_proc_entry("version", g_ptm_dir
);
917 remove_proc_entry("driver/ifx_ptm", NULL
);
920 static int proc_read_version(char *buf
, char **start
, off_t offset
, int count
, int *eof
, void *data
)
924 len
+= ifx_ptm_version(buf
+ len
);
926 if ( offset
>= len
) {
931 *start
= buf
+ offset
;
932 if ( (len
-= offset
) > count
)
938 static int proc_read_wanmib(char *page
, char **start
, off_t off
, int count
, int *eof
, void *data
)
947 for ( i
= 0; i
< ARRAY_SIZE(title
); i
++ ) {
948 len
+= sprintf(page
+ off
+ len
, title
[i
]);
949 len
+= sprintf(page
+ off
+ len
, " wrx_correct_pdu = %d\n", WAN_MIB_TABLE
[i
].wrx_correct_pdu
);
950 len
+= sprintf(page
+ off
+ len
, " wrx_correct_pdu_bytes = %d\n", WAN_MIB_TABLE
[i
].wrx_correct_pdu_bytes
);
951 len
+= sprintf(page
+ off
+ len
, " wrx_tccrc_err_pdu = %d\n", WAN_MIB_TABLE
[i
].wrx_tccrc_err_pdu
);
952 len
+= sprintf(page
+ off
+ len
, " wrx_tccrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE
[i
].wrx_tccrc_err_pdu_bytes
);
953 len
+= sprintf(page
+ off
+ len
, " wrx_ethcrc_err_pdu = %d\n", WAN_MIB_TABLE
[i
].wrx_ethcrc_err_pdu
);
954 len
+= sprintf(page
+ off
+ len
, " wrx_ethcrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE
[i
].wrx_ethcrc_err_pdu_bytes
);
955 len
+= sprintf(page
+ off
+ len
, " wrx_nodesc_drop_pdu = %d\n", WAN_MIB_TABLE
[i
].wrx_nodesc_drop_pdu
);
956 len
+= sprintf(page
+ off
+ len
, " wrx_len_violation_drop_pdu = %d\n", WAN_MIB_TABLE
[i
].wrx_len_violation_drop_pdu
);
957 len
+= sprintf(page
+ off
+ len
, " wrx_idle_bytes = %d\n", WAN_MIB_TABLE
[i
].wrx_idle_bytes
);
958 len
+= sprintf(page
+ off
+ len
, " wrx_nonidle_cw = %d\n", WAN_MIB_TABLE
[i
].wrx_nonidle_cw
);
959 len
+= sprintf(page
+ off
+ len
, " wrx_idle_cw = %d\n", WAN_MIB_TABLE
[i
].wrx_idle_cw
);
960 len
+= sprintf(page
+ off
+ len
, " wrx_err_cw = %d\n", WAN_MIB_TABLE
[i
].wrx_err_cw
);
961 len
+= sprintf(page
+ off
+ len
, " wtx_total_pdu = %d\n", WAN_MIB_TABLE
[i
].wtx_total_pdu
);
962 len
+= sprintf(page
+ off
+ len
, " wtx_total_bytes = %d\n", WAN_MIB_TABLE
[i
].wtx_total_bytes
);
970 static int proc_write_wanmib(struct file
*file
, const char *buf
, unsigned long count
, void *data
)
978 len
= count
< sizeof(str
) ? count
: sizeof(str
) - 1;
979 rlen
= len
- copy_from_user(str
, buf
, len
);
980 while ( rlen
&& str
[rlen
- 1] <= ' ' )
983 for ( p
= str
; *p
&& *p
<= ' '; p
++, rlen
-- );
987 if ( stricmp(p
, "clear") == 0 || stricmp(p
, "clean") == 0 ) {
988 for ( i
= 0; i
< 2; i
++ )
989 memset((void*)&WAN_MIB_TABLE
[i
], 0, sizeof(WAN_MIB_TABLE
[i
]));
995 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
997 static int proc_read_genconf(char *page
, char **start
, off_t off
, int count
, int *eof
, void *data
)
1000 int len_max
= off
+ count
;
1007 pstr
= *start
= page
;
1011 llen
+= sprintf(str
+ llen
, "CFG_WAN_WRDES_DELAY (0x%08X): %d\n", (unsigned int)CFG_WAN_WRDES_DELAY
, IFX_REG_R32(CFG_WAN_WRDES_DELAY
));
1012 llen
+= sprintf(str
+ llen
, "CFG_WRX_DMACH_ON (0x%08X):", (unsigned int)CFG_WRX_DMACH_ON
);
1013 for ( i
= 0, bit
= 1; i
< MAX_RX_DMA_CHANNEL_NUMBER
; i
++, bit
<<= 1 )
1014 llen
+= sprintf(str
+ llen
, " %d - %s", i
, (IFX_REG_R32(CFG_WRX_DMACH_ON
) & bit
) ? "on " : "off");
1015 llen
+= sprintf(str
+ llen
, "\n");
1016 llen
+= sprintf(str
+ llen
, "CFG_WTX_DMACH_ON (0x%08X):", (unsigned int)CFG_WTX_DMACH_ON
);
1017 for ( i
= 0, bit
= 1; i
< MAX_TX_DMA_CHANNEL_NUMBER
; i
++, bit
<<= 1 )
1018 llen
+= sprintf(str
+ llen
, " %d - %s", i
, (IFX_REG_R32(CFG_WTX_DMACH_ON
) & bit
) ? "on " : "off");
1019 llen
+= sprintf(str
+ llen
, "\n");
1020 llen
+= sprintf(str
+ llen
, "CFG_WRX_LOOK_BITTH (0x%08X): %d\n", (unsigned int)CFG_WRX_LOOK_BITTH
, IFX_REG_R32(CFG_WRX_LOOK_BITTH
));
1021 llen
+= sprintf(str
+ llen
, "CFG_ETH_EFMTC_CRC (0x%08X): rx_tc_crc_len - %2d, rx_tc_crc_check - %s\n", (unsigned int)CFG_ETH_EFMTC_CRC
, CFG_ETH_EFMTC_CRC
->rx_tc_crc_len
, CFG_ETH_EFMTC_CRC
->rx_tc_crc_check
? " on" : "off");
1022 llen
+= sprintf(str
+ llen
, " rx_eth_crc_check - %s, rx_eth_crc_present - %s\n", CFG_ETH_EFMTC_CRC
->rx_eth_crc_check
? " on" : "off", CFG_ETH_EFMTC_CRC
->rx_eth_crc_present
? " on" : "off");
1023 llen
+= sprintf(str
+ llen
, " tx_tc_crc_len - %2d, tx_tc_crc_gen - %s\n", CFG_ETH_EFMTC_CRC
->tx_tc_crc_len
, CFG_ETH_EFMTC_CRC
->tx_tc_crc_gen
? " on" : "off");
1024 llen
+= sprintf(str
+ llen
, " tx_eth_crc_gen - %s\n", CFG_ETH_EFMTC_CRC
->tx_eth_crc_gen
? " on" : "off");
1026 llen
+= sprintf(str
+ llen
, "RX Port:\n");
1027 for ( i
= 0; i
< MAX_RX_DMA_CHANNEL_NUMBER
; i
++ )
1028 llen
+= sprintf(str
+ llen
, " %d (0x%08X). mfs - %5d, dmach - %d, local_state - %d, partner_state - %d\n", i
, (unsigned int)WRX_PORT_CONFIG(i
), WRX_PORT_CONFIG(i
)->mfs
, WRX_PORT_CONFIG(i
)->dmach
, WRX_PORT_CONFIG(i
)->local_state
, WRX_PORT_CONFIG(i
)->partner_state
);
1029 llen
+= sprintf(str
+ llen
, "RX DMA Channel:\n");
1030 for ( i
= 0; i
< MAX_RX_DMA_CHANNEL_NUMBER
; i
++ )
1031 llen
+= sprintf(str
+ llen
, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i
, (unsigned int)WRX_DMA_CHANNEL_CONFIG(i
), WRX_DMA_CHANNEL_CONFIG(i
)->desba
, ((unsigned int)WRX_DMA_CHANNEL_CONFIG(i
)->desba
<< 2) | KSEG1
, WRX_DMA_CHANNEL_CONFIG(i
)->deslen
, WRX_DMA_CHANNEL_CONFIG(i
)->vlddes
);
1033 llen
+= sprintf(str
+ llen
, "TX Port:\n");
1034 for ( i
= 0; i
< MAX_TX_DMA_CHANNEL_NUMBER
; i
++ )
1035 llen
+= sprintf(str
+ llen
, " %d (0x%08X). tx_cwth2 - %d, tx_cwth1 - %d\n", i
, (unsigned int)WTX_PORT_CONFIG(i
), WTX_PORT_CONFIG(i
)->tx_cwth2
, WTX_PORT_CONFIG(i
)->tx_cwth1
);
1036 llen
+= sprintf(str
+ llen
, "TX DMA Channel:\n");
1037 for ( i
= 0; i
< MAX_TX_DMA_CHANNEL_NUMBER
; i
++ )
1038 llen
+= sprintf(str
+ llen
, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i
, (unsigned int)WTX_DMA_CHANNEL_CONFIG(i
), WTX_DMA_CHANNEL_CONFIG(i
)->desba
, ((unsigned int)WTX_DMA_CHANNEL_CONFIG(i
)->desba
<< 2) | KSEG1
, WTX_DMA_CHANNEL_CONFIG(i
)->deslen
, WTX_DMA_CHANNEL_CONFIG(i
)->vlddes
);
1040 if ( len
<= off
&& len
+ llen
> off
)
1042 memcpy(pstr
, str
+ off
- len
, len
+ llen
- off
);
1043 pstr
+= len
+ llen
- off
;
1045 else if ( len
> off
)
1047 memcpy(pstr
, str
, llen
);
1051 if ( len
>= len_max
)
1052 goto PROC_READ_GENCONF_OVERRUN_END
;
1058 PROC_READ_GENCONF_OVERRUN_END
:
1059 return len
- llen
- off
;
1062 #endif // defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1064 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1066 static int proc_read_dbg(char *page
, char **start
, off_t off
, int count
, int *eof
, void *data
)
1070 len
+= sprintf(page
+ off
+ len
, "error print - %s\n", (ifx_ptm_dbg_enable
& DBG_ENABLE_MASK_ERR
) ? "enabled" : "disabled");
1071 len
+= sprintf(page
+ off
+ len
, "debug print - %s\n", (ifx_ptm_dbg_enable
& DBG_ENABLE_MASK_DEBUG_PRINT
) ? "enabled" : "disabled");
1072 len
+= sprintf(page
+ off
+ len
, "assert - %s\n", (ifx_ptm_dbg_enable
& DBG_ENABLE_MASK_ASSERT
) ? "enabled" : "disabled");
1073 len
+= sprintf(page
+ off
+ len
, "dump rx skb - %s\n", (ifx_ptm_dbg_enable
& DBG_ENABLE_MASK_DUMP_SKB_RX
) ? "enabled" : "disabled");
1074 len
+= sprintf(page
+ off
+ len
, "dump tx skb - %s\n", (ifx_ptm_dbg_enable
& DBG_ENABLE_MASK_DUMP_SKB_TX
) ? "enabled" : "disabled");
1075 len
+= sprintf(page
+ off
+ len
, "mac swap - %s\n", (ifx_ptm_dbg_enable
& DBG_ENABLE_MASK_MAC_SWAP
) ? "enabled" : "disabled");
1082 static int proc_write_dbg(struct file
*file
, const char *buf
, unsigned long count
, void *data
)
1084 static const char *dbg_enable_mask_str
[] = {
1103 static const int dbg_enable_mask_str_len
[] = {
1114 unsigned int dbg_enable_mask
[] = {
1115 DBG_ENABLE_MASK_ERR
,
1116 DBG_ENABLE_MASK_DEBUG_PRINT
,
1117 DBG_ENABLE_MASK_ASSERT
,
1118 DBG_ENABLE_MASK_DUMP_SKB_RX
,
1119 DBG_ENABLE_MASK_DUMP_SKB_TX
,
1120 DBG_ENABLE_MASK_DUMP_INIT
,
1121 DBG_ENABLE_MASK_DUMP_QOS
,
1122 DBG_ENABLE_MASK_MAC_SWAP
,
1134 len
= count
< sizeof(str
) ? count
: sizeof(str
) - 1;
1135 rlen
= len
- copy_from_user(str
, buf
, len
);
1136 while ( rlen
&& str
[rlen
- 1] <= ' ' )
1139 for ( p
= str
; *p
&& *p
<= ' '; p
++, rlen
-- );
1143 // debugging feature for enter/leave showtime
1144 if ( strincmp(p
, "enter", 5) == 0 && ifx_mei_atm_showtime_enter
!= NULL
)
1145 ifx_mei_atm_showtime_enter(NULL
, NULL
);
1146 else if ( strincmp(p
, "leave", 5) == 0 && ifx_mei_atm_showtime_exit
!= NULL
)
1147 ifx_mei_atm_showtime_exit();
1149 if ( strincmp(p
, "enable", 6) == 0 ) {
1153 else if ( strincmp(p
, "disable", 7) == 0 ) {
1157 else if ( strincmp(p
, "help", 4) == 0 || *p
== '?' ) {
1158 printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/qos/swap/all] > /proc/driver/ifx_ptm/dbg\n");
1164 ifx_ptm_dbg_enable
|= DBG_ENABLE_MASK_ALL
& ~DBG_ENABLE_MASK_MAC_SWAP
;
1166 ifx_ptm_dbg_enable
&= ~DBG_ENABLE_MASK_ALL
| DBG_ENABLE_MASK_MAC_SWAP
;
1170 for ( i
= 0; i
< ARRAY_SIZE(dbg_enable_mask_str
); i
++ )
1171 if ( strincmp(p
, dbg_enable_mask_str
[i
], dbg_enable_mask_str_len
[i
]) == 0 ) {
1173 ifx_ptm_dbg_enable
|= dbg_enable_mask
[i
>> 1];
1175 ifx_ptm_dbg_enable
&= ~dbg_enable_mask
[i
>> 1];
1176 p
+= dbg_enable_mask_str_len
[i
];
1179 } while ( i
< ARRAY_SIZE(dbg_enable_mask_str
) );
1186 #endif // defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1188 static INLINE
int stricmp(const char *p1
, const char *p2
)
1192 while ( *p1
&& *p2
)
1194 c1
= *p1
>= 'A' && *p1
<= 'Z' ? *p1
+ 'a' - 'A' : *p1
;
1195 c2
= *p2
>= 'A' && *p2
<= 'Z' ? *p2
+ 'a' - 'A' : *p2
;
1205 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1206 static INLINE
int strincmp(const char *p1
, const char *p2
, int n
)
1210 while ( n
&& *p1
&& *p2
)
1212 c1
= *p1
>= 'A' && *p1
<= 'Z' ? *p1
+ 'a' - 'A' : *p1
;
1213 c2
= *p2
>= 'A' && *p2
<= 'Z' ? *p2
+ 'a' - 'A' : *p2
;
1221 return n
? *p1
- *p2
: c1
;
1225 static INLINE
int ifx_ptm_version(char *buf
)
1228 unsigned int major
, minor
;
1230 ifx_ptm_get_fw_ver(&major
, &minor
);
1232 len
+= sprintf(buf
+ len
, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR
, IFX_PTM_VER_MID
, IFX_PTM_VER_MINOR
);
1233 len
+= sprintf(buf
+ len
, " PTM (E1) firmware version %d.%d\n", major
, minor
);
1238 static INLINE
void check_parameters(void)
1240 /* There is a delay between PPE write descriptor and descriptor is */
1241 /* really stored in memory. Host also has this delay when writing */
1242 /* descriptor. So PPE will use this value to determine if the write */
1243 /* operation makes effect. */
1244 if ( write_desc_delay
< 0 )
1245 write_desc_delay
= 0;
1247 /* Because of the limitation of length field in descriptors, the packet */
1248 /* size could not be larger than 64K minus overhead size. */
1249 if ( rx_max_packet_size
< ETH_MIN_FRAME_LENGTH
)
1250 rx_max_packet_size
= ETH_MIN_FRAME_LENGTH
;
1251 else if ( rx_max_packet_size
> 65536 - 1 )
1252 rx_max_packet_size
= 65536 - 1;
1254 if ( dma_rx_descriptor_length
< 2 )
1255 dma_rx_descriptor_length
= 2;
1256 if ( dma_tx_descriptor_length
< 2 )
1257 dma_tx_descriptor_length
= 2;
1260 static INLINE
int init_priv_data(void)
1264 struct rx_descriptor rx_desc
= {0};
1265 struct sk_buff
*skb
;
1266 volatile struct rx_descriptor
*p_rx_desc
;
1267 volatile struct tx_descriptor
*p_tx_desc
;
1268 struct sk_buff
**ppskb
;
1270 // clear ptm private data structure
1271 memset(&g_ptm_priv_data
, 0, sizeof(g_ptm_priv_data
));
1273 // allocate memory for RX descriptors
1274 p
= kzalloc(MAX_ITF_NUMBER
* dma_rx_descriptor_length
* sizeof(struct rx_descriptor
) + DESC_ALIGNMENT
, GFP_KERNEL
);
1277 dma_cache_inv((unsigned long)p
, MAX_ITF_NUMBER
* dma_rx_descriptor_length
* sizeof(struct rx_descriptor
) + DESC_ALIGNMENT
);
1278 g_ptm_priv_data
.rx_desc_base
= p
;
1279 //p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1281 // allocate memory for TX descriptors
1282 p
= kzalloc(MAX_ITF_NUMBER
* dma_tx_descriptor_length
* sizeof(struct tx_descriptor
) + DESC_ALIGNMENT
, GFP_KERNEL
);
1285 dma_cache_inv((unsigned long)p
, MAX_ITF_NUMBER
* dma_tx_descriptor_length
* sizeof(struct tx_descriptor
) + DESC_ALIGNMENT
);
1286 g_ptm_priv_data
.tx_desc_base
= p
;
1288 // allocate memroy for TX skb pointers
1289 p
= kzalloc(MAX_ITF_NUMBER
* dma_tx_descriptor_length
* sizeof(struct sk_buff
*) + 4, GFP_KERNEL
);
1292 dma_cache_wback_inv((unsigned long)p
, MAX_ITF_NUMBER
* dma_tx_descriptor_length
* sizeof(struct sk_buff
*) + 4);
1293 g_ptm_priv_data
.tx_skb_base
= p
;
1295 p_rx_desc
= (volatile struct rx_descriptor
*)((((unsigned int)g_ptm_priv_data
.rx_desc_base
+ DESC_ALIGNMENT
- 1) & ~(DESC_ALIGNMENT
- 1)) | KSEG1
);
1296 p_tx_desc
= (volatile struct tx_descriptor
*)((((unsigned int)g_ptm_priv_data
.tx_desc_base
+ DESC_ALIGNMENT
- 1) & ~(DESC_ALIGNMENT
- 1)) | KSEG1
);
1297 ppskb
= (struct sk_buff
**)(((unsigned int)g_ptm_priv_data
.tx_skb_base
+ 3) & ~3);
1298 for ( i
= 0; i
< MAX_ITF_NUMBER
; i
++ ) {
1299 g_ptm_priv_data
.itf
[i
].rx_desc
= &p_rx_desc
[i
* dma_rx_descriptor_length
];
1300 g_ptm_priv_data
.itf
[i
].tx_desc
= &p_tx_desc
[i
* dma_tx_descriptor_length
];
1301 g_ptm_priv_data
.itf
[i
].tx_skb
= &ppskb
[i
* dma_tx_descriptor_length
];
1308 rx_desc
.byteoff
= RX_HEAD_MAC_ADDR_ALIGNMENT
;
1311 rx_desc
.datalen
= rx_max_packet_size
;
1312 for ( i
= 0; i
< MAX_ITF_NUMBER
* dma_rx_descriptor_length
; i
++ ) {
1313 skb
= alloc_skb_rx();
1316 rx_desc
.dataptr
= ((unsigned int)skb
->data
>> 2) & 0x0FFFFFFF;
1317 p_rx_desc
[i
] = rx_desc
;
1323 static INLINE
void clear_priv_data(void)
1326 struct sk_buff
*skb
;
1328 for ( i
= 0; i
< MAX_ITF_NUMBER
; i
++ ) {
1329 if ( g_ptm_priv_data
.itf
[i
].tx_skb
!= NULL
) {
1330 for ( j
= 0; j
< dma_tx_descriptor_length
; j
++ )
1331 if ( g_ptm_priv_data
.itf
[i
].tx_skb
[j
] != NULL
)
1332 dev_kfree_skb_any(g_ptm_priv_data
.itf
[i
].tx_skb
[j
]);
1334 if ( g_ptm_priv_data
.itf
[i
].rx_desc
!= NULL
) {
1335 for ( j
= 0; j
< dma_rx_descriptor_length
; j
++ ) {
1336 if ( g_ptm_priv_data
.itf
[i
].rx_desc
[j
].sop
|| g_ptm_priv_data
.itf
[i
].rx_desc
[j
].eop
) { // descriptor initialized
1337 skb
= get_skb_rx_pointer(g_ptm_priv_data
.itf
[i
].rx_desc
[j
].dataptr
);
1338 dev_kfree_skb_any(skb
);
1344 if ( g_ptm_priv_data
.rx_desc_base
!= NULL
)
1345 kfree(g_ptm_priv_data
.rx_desc_base
);
1347 if ( g_ptm_priv_data
.tx_desc_base
!= NULL
)
1348 kfree(g_ptm_priv_data
.tx_desc_base
);
1350 if ( g_ptm_priv_data
.tx_skb_base
!= NULL
)
1351 kfree(g_ptm_priv_data
.tx_skb_base
);
1354 static INLINE
void init_tables(void)
1357 volatile unsigned int *p
;
1358 struct wrx_dma_channel_config rx_config
= {0};
1359 struct wtx_dma_channel_config tx_config
= {0};
1360 struct wrx_port_cfg_status rx_port_cfg
= { 0 };
1361 struct wtx_port_cfg tx_port_cfg
= { 0 };
1366 IFX_REG_W32(CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00), CDM_CFG
); // CDM block 1 must be data memory and mapped to 0x5000 (dword addr)
1367 p
= CDM_DATA_MEMORY(0, 0); // Clear CDM block 1
1368 for ( i
= 0; i
< CDM_DATA_MEMORY_DWLEN
; i
++, p
++ )
1374 IFX_REG_W32(write_desc_delay
, CFG_WAN_WRDES_DELAY
);
1375 IFX_REG_W32((1 << MAX_RX_DMA_CHANNEL_NUMBER
) - 1, CFG_WRX_DMACH_ON
);
1376 IFX_REG_W32((1 << MAX_TX_DMA_CHANNEL_NUMBER
) - 1, CFG_WTX_DMACH_ON
);
1378 IFX_REG_W32(8, CFG_WRX_LOOK_BITTH
); // WAN RX EFM-TC Looking Threshold
1380 IFX_REG_W32(eth_efmtc_crc_cfg
, CFG_ETH_EFMTC_CRC
);
1383 * WRX DMA Channel Configuration Table
1385 rx_config
.deslen
= dma_rx_descriptor_length
;
1386 rx_port_cfg
.mfs
= ETH_MAX_FRAME_LENGTH
;
1387 rx_port_cfg
.local_state
= 0; // looking for sync
1388 rx_port_cfg
.partner_state
= 0; // parter receiver is out of sync
1390 for ( i
= 0; i
< MAX_RX_DMA_CHANNEL_NUMBER
; i
++ ) {
1391 rx_config
.desba
= ((unsigned int)g_ptm_priv_data
.itf
[i
].rx_desc
>> 2) & 0x0FFFFFFF;
1392 *WRX_DMA_CHANNEL_CONFIG(i
) = rx_config
;
1394 rx_port_cfg
.dmach
= i
;
1395 *WRX_PORT_CONFIG(i
) = rx_port_cfg
;
1399 * WTX DMA Channel Configuration Table
1401 tx_config
.deslen
= dma_tx_descriptor_length
;
1402 tx_port_cfg
.tx_cwth1
= 5;
1403 tx_port_cfg
.tx_cwth2
= 4;
1405 for ( i
= 0; i
< MAX_TX_DMA_CHANNEL_NUMBER
; i
++ ) {
1406 tx_config
.desba
= ((unsigned int)g_ptm_priv_data
.itf
[i
].tx_desc
>> 2) & 0x0FFFFFFF;
1407 *WTX_DMA_CHANNEL_CONFIG(i
) = tx_config
;
1409 *WTX_PORT_CONFIG(i
) = tx_port_cfg
;
1416 * ####################################
1418 * ####################################
1421 static int ptm_showtime_enter(struct port_cell_info
*port_cell
, void *xdata_addr
)
1427 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ )
1428 netif_carrier_on(g_net_dev
[i
]);
1430 printk("enter showtime\n");
1435 static int ptm_showtime_exit(void)
1442 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ )
1443 netif_carrier_off(g_net_dev
[i
]);
1447 printk("leave showtime\n");
1453 static const struct of_device_id ltq_ptm_match
[] = {
1454 #ifdef CONFIG_DANUBE
1455 { .compatible
= "lantiq,ppe-danube", .data
= NULL
},
1456 #elif defined CONFIG_AMAZON_SE
1457 { .compatible
= "lantiq,ppe-ase", .data
= NULL
},
1458 #elif defined CONFIG_AR9
1459 { .compatible
= "lantiq,ppe-arx100", .data
= NULL
},
1460 #elif defined CONFIG_VR9
1461 { .compatible
= "lantiq,ppe-xrx200", .data
= NULL
},
1465 MODULE_DEVICE_TABLE(of
, ltq_ptm_match
);
1468 * ####################################
1470 * ####################################
1475 * Initialize global variables, PP32, comunication structures, register IRQ
1476 * and register device.
1481 * else --- failure, usually it is negative value of error code
1483 static int ltq_ptm_probe(struct platform_device
*pdev
)
1486 struct port_cell_info port_cell
= {0};
1487 void *xdata_addr
= NULL
;
1493 ret
= init_priv_data();
1495 err("INIT_PRIV_DATA_FAIL");
1496 goto INIT_PRIV_DATA_FAIL
;
1499 ifx_ptm_init_chip(pdev
);
1502 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ ) {
1503 g_net_dev
[i
] = alloc_netdev(0, g_net_dev_name
[i
], NET_NAME_UNKNOWN
, ether_setup
);
1504 if ( g_net_dev
[i
] == NULL
)
1505 goto ALLOC_NETDEV_FAIL
;
1506 ptm_setup(g_net_dev
[i
], i
);
1509 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ ) {
1510 ret
= register_netdev(g_net_dev
[i
]);
1512 goto REGISTER_NETDEV_FAIL
;
1515 /* register interrupt handler */
1516 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
1517 ret
= request_irq(PPE_MAILBOX_IGU1_INT
, mailbox_irq_handler
, 0, "ptm_mailbox_isr", &g_ptm_priv_data
);
1519 ret
= request_irq(PPE_MAILBOX_IGU1_INT
, mailbox_irq_handler
, IRQF_DISABLED
, "ptm_mailbox_isr", &g_ptm_priv_data
);
1522 if ( ret
== -EBUSY
) {
1523 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1526 err("request_irq fail");
1528 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL
;
1530 disable_irq(PPE_MAILBOX_IGU1_INT
);
1532 ret
= ifx_pp32_start(0);
1534 err("ifx_pp32_start fail!");
1535 goto PP32_START_FAIL
;
1537 IFX_REG_W32(0, MBOX_IGU1_IER
);
1538 IFX_REG_W32(~0, MBOX_IGU1_ISRC
);
1540 enable_irq(PPE_MAILBOX_IGU1_INT
);
1545 port_cell
.port_num
= 1;
1546 ifx_mei_atm_showtime_check(&g_showtime
, &port_cell
, &xdata_addr
);
1548 ptm_showtime_enter(&port_cell
, &xdata_addr
);
1551 ifx_mei_atm_showtime_enter
= ptm_showtime_enter
;
1552 ifx_mei_atm_showtime_exit
= ptm_showtime_exit
;
1554 ifx_ptm_version(ver_str
);
1555 printk(KERN_INFO
"%s", ver_str
);
1557 printk("ifxmips_ptm: PTM init succeed\n");
1562 free_irq(PPE_MAILBOX_IGU1_INT
, &g_ptm_priv_data
);
1563 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL
:
1564 i
= ARRAY_SIZE(g_net_dev
);
1565 REGISTER_NETDEV_FAIL
:
1567 unregister_netdev(g_net_dev
[i
]);
1568 i
= ARRAY_SIZE(g_net_dev
);
1571 free_netdev(g_net_dev
[i
]);
1572 g_net_dev
[i
] = NULL
;
1574 INIT_PRIV_DATA_FAIL
:
1576 printk("ifxmips_ptm: PTM init failed\n");
1582 * Release memory, free IRQ, and deregister device.
1588 static int ltq_ptm_remove(struct platform_device
*pdev
)
1592 ifx_mei_atm_showtime_enter
= NULL
;
1593 ifx_mei_atm_showtime_exit
= NULL
;
1600 free_irq(PPE_MAILBOX_IGU1_INT
, &g_ptm_priv_data
);
1602 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ )
1603 unregister_netdev(g_net_dev
[i
]);
1605 for ( i
= 0; i
< ARRAY_SIZE(g_net_dev
); i
++ ) {
1606 free_netdev(g_net_dev
[i
]);
1607 g_net_dev
[i
] = NULL
;
1610 ifx_ptm_uninit_chip();
1617 static struct platform_driver ltq_ptm_driver
= {
1618 .probe
= ltq_ptm_probe
,
1619 .remove
= ltq_ptm_remove
,
1622 .owner
= THIS_MODULE
,
1623 .of_match_table
= ltq_ptm_match
,
1627 module_platform_driver(ltq_ptm_driver
);
1629 MODULE_LICENSE("GPL");