186c848693f975edbf68e2762eb2e3df08b62618
[openwrt/staging/wigyori.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_adsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_adsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for Danube/
10 ** Amazon-SE/AR9)
11 ** COPYRIGHT : Copyright (c) 2006
12 ** Infineon Technologies AG
13 ** Am Campeon 1-12, 85579 Neubiberg, Germany
14 **
15 ** This program is free software; you can redistribute it and/or modify
16 ** it under the terms of the GNU General Public License as published by
17 ** the Free Software Foundation; either version 2 of the License, or
18 ** (at your option) any later version.
19 **
20 ** HISTORY
21 ** $Date $Author $Comment
22 ** 07 JUL 2009 Xu Liang Init Version
23 *******************************************************************************/
24
25
26
27 /*
28 * ####################################
29 * Head File
30 * ####################################
31 */
32
33 /*
34 * Common Head File
35 */
36 #include <linux/version.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/errno.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/ioctl.h>
44 #include <linux/etherdevice.h>
45 #include <linux/interrupt.h>
46 #include <linux/netdevice.h>
47 #include <linux/platform_device.h>
48 #include <linux/of_device.h>
49 #include <asm/io.h>
50
51 /*
52 * Chip Specific Head File
53 */
54 #include "ifxmips_ptm_adsl.h"
55
56
57 #include <lantiq_soc.h>
58
59 /*
60 * ####################################
61 * Kernel Version Adaption
62 * ####################################
63 */
64 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
65 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
66 #define MODULE_PARM(a, b) module_param(a, int, 0)
67 #else
68 #define MODULE_PARM_ARRAY(a, b) MODULE_PARM(a, b)
69 #endif
70
71
72
73 /*
74 * ####################################
75 * Parameters to Configure PPE
76 * ####################################
77 */
78
79 static int write_desc_delay = 0x20; /* Write descriptor delay */
80
81 static int rx_max_packet_size = ETH_MAX_FRAME_LENGTH;
82 /* Max packet size for RX */
83
84 static int dma_rx_descriptor_length = 24; /* Number of descriptors per DMA RX channel */
85 static int dma_tx_descriptor_length = 24; /* Number of descriptors per DMA TX channel */
86
87 static int eth_efmtc_crc_cfg = 0x03100710; /* default: tx_eth_crc_check: 1, tx_tc_crc_check: 1, tx_tc_crc_len = 16 */
88 /* rx_eth_crc_present: 1, rx_eth_crc_check: 1, rx_tc_crc_check: 1, rx_tc_crc_len = 16 */
89
90 MODULE_PARM(write_desc_delay, "i");
91 MODULE_PARM_DESC(write_desc_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
92
93 MODULE_PARM(rx_max_packet_size, "i");
94 MODULE_PARM_DESC(rx_max_packet_size, "Max packet size in byte for downstream ethernet frames");
95
96 MODULE_PARM(dma_rx_descriptor_length, "i");
97 MODULE_PARM_DESC(dma_rx_descriptor_length, "Number of descriptor assigned to DMA RX channel (>16)");
98 MODULE_PARM(dma_tx_descriptor_length, "i");
99 MODULE_PARM_DESC(dma_tx_descriptor_length, "Number of descriptor assigned to DMA TX channel (>16)");
100
101 MODULE_PARM(eth_efmtc_crc_cfg, "i");
102 MODULE_PARM_DESC(eth_efmtc_crc_cfg, "Configuration for PTM TX/RX ethernet/efm-tc CRC");
103
104
105
106 /*
107 * ####################################
108 * Definition
109 * ####################################
110 */
111
112
113 #define DUMP_SKB_LEN ~0
114
115
116
117 /*
118 * ####################################
119 * Declaration
120 * ####################################
121 */
122
123 /*
124 * Network Operations
125 */
126 static void ptm_setup(struct net_device *, int);
127 static struct net_device_stats *ptm_get_stats(struct net_device *);
128 static int ptm_open(struct net_device *);
129 static int ptm_stop(struct net_device *);
130 static unsigned int ptm_poll(int, unsigned int);
131 static int ptm_napi_poll(struct napi_struct *, int);
132 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
133 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
134 static int ptm_change_mtu(struct net_device *, int);
135 #endif
136 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
137 static void ptm_tx_timeout(struct net_device *);
138
139 /*
140 * DSL Data LED
141 */
142 static INLINE void adsl_led_flash(void);
143
144 /*
145 * buffer manage functions
146 */
147 static INLINE struct sk_buff* alloc_skb_rx(void);
148 //static INLINE struct sk_buff* alloc_skb_tx(unsigned int);
149 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int);
150 static INLINE int get_tx_desc(unsigned int, unsigned int *);
151
152 /*
153 * Mailbox handler and signal function
154 */
155 static INLINE int mailbox_rx_irq_handler(unsigned int);
156 static irqreturn_t mailbox_irq_handler(int, void *);
157 static INLINE void mailbox_signal(unsigned int, int);
158 #ifdef CONFIG_IFX_PTM_RX_TASKLET
159 static void do_ptm_tasklet(unsigned long);
160 #endif
161
162 /*
163 * Debug Functions
164 */
165 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
166 static void dump_skb(struct sk_buff *, u32, char *, int, int, int);
167 #else
168 #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
169 #endif
170 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
171 static void skb_swap(struct sk_buff *);
172 #else
173 #define skb_swap(skb) do {} while (0)
174 #endif
175
176 /*
177 * Proc File Functions
178 */
179 static INLINE void proc_file_create(void);
180 static INLINE void proc_file_delete(void);
181 static int proc_read_version(char *, char **, off_t, int, int *, void *);
182 static int proc_read_wanmib(char *, char **, off_t, int, int *, void *);
183 static int proc_write_wanmib(struct file *, const char *, unsigned long, void *);
184 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
185 static int proc_read_genconf(char *, char **, off_t, int, int *, void *);
186 #endif
187 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
188 static int proc_read_dbg(char *, char **, off_t, int, int *, void *);
189 static int proc_write_dbg(struct file *, const char *, unsigned long, void *);
190 #endif
191
192 /*
193 * Proc Help Functions
194 */
195 static INLINE int stricmp(const char *, const char *);
196 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
197 static INLINE int strincmp(const char *, const char *, int);
198 #endif
199 static INLINE int ifx_ptm_version(char *);
200
201 /*
202 * Init & clean-up functions
203 */
204 static INLINE void check_parameters(void);
205 static INLINE int init_priv_data(void);
206 static INLINE void clear_priv_data(void);
207 static INLINE void init_tables(void);
208
209 /*
210 * Exteranl Function
211 */
212 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
213 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
214 #else
215 static inline int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr)
216 {
217 if ( is_showtime != NULL )
218 *is_showtime = 0;
219 return 0;
220 }
221 #endif
222
223 /*
224 * External variable
225 */
226 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
227 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
228 extern int (*ifx_mei_atm_showtime_exit)(void);
229 #else
230 int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL;
231 EXPORT_SYMBOL(ifx_mei_atm_showtime_enter);
232 int (*ifx_mei_atm_showtime_exit)(void) = NULL;
233 EXPORT_SYMBOL(ifx_mei_atm_showtime_exit);
234 #endif
235
236
237
238 /*
239 * ####################################
240 * Local Variable
241 * ####################################
242 */
243
244 static struct ptm_priv_data g_ptm_priv_data;
245
246 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
247 static struct net_device_ops g_ptm_netdev_ops = {
248 .ndo_get_stats = ptm_get_stats,
249 .ndo_open = ptm_open,
250 .ndo_stop = ptm_stop,
251 .ndo_start_xmit = ptm_hard_start_xmit,
252 .ndo_validate_addr = eth_validate_addr,
253 .ndo_set_mac_address = eth_mac_addr,
254 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
255 .ndo_change_mtu = ptm_change_mtu,
256 #endif
257 .ndo_do_ioctl = ptm_ioctl,
258 .ndo_tx_timeout = ptm_tx_timeout,
259 };
260 #endif
261
262 static struct net_device *g_net_dev[2] = {0};
263 static char *g_net_dev_name[2] = {"dsl0", "dslfast0"};
264
265 #ifdef CONFIG_IFX_PTM_RX_TASKLET
266 static struct tasklet_struct g_ptm_tasklet[] = {
267 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 0},
268 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 1},
269 };
270 #endif
271
272 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
273
274 static struct proc_dir_entry* g_ptm_dir = NULL;
275
276 static int g_showtime = 0;
277
278
279
280 /*
281 * ####################################
282 * Local Function
283 * ####################################
284 */
285
286 static void ptm_setup(struct net_device *dev, int ndev)
287 {
288 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
289 netif_carrier_off(dev);
290 #endif
291
292 /* hook network operations */
293 dev->netdev_ops = &g_ptm_netdev_ops;
294 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
295 /* Allow up to 1508 bytes, for RFC4638 */
296 dev->max_mtu = ETH_DATA_LEN + 8;
297 #endif
298 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 25);
299 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
300
301 dev->dev_addr[0] = 0x00;
302 dev->dev_addr[1] = 0x20;
303 dev->dev_addr[2] = 0xda;
304 dev->dev_addr[3] = 0x86;
305 dev->dev_addr[4] = 0x23;
306 dev->dev_addr[5] = 0x75 + ndev;
307 }
308
309 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
310 {
311 int ndev;
312
313 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
314 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
315
316 g_ptm_priv_data.itf[ndev].stats.rx_errors = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu + WAN_MIB_TABLE[ndev].wrx_ethcrc_err_pdu;
317 g_ptm_priv_data.itf[ndev].stats.rx_dropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu + (WAN_MIB_TABLE[ndev].wrx_correct_pdu - g_ptm_priv_data.itf[ndev].stats.rx_packets);
318
319 return &g_ptm_priv_data.itf[ndev].stats;
320 }
321
322 static int ptm_open(struct net_device *dev)
323 {
324 int ndev;
325
326 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
327 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
328
329 napi_enable(&g_ptm_priv_data.itf[ndev].napi);
330
331 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
332
333 netif_start_queue(dev);
334
335 return 0;
336 }
337
338 static int ptm_stop(struct net_device *dev)
339 {
340 int ndev;
341
342 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
343 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
344
345 IFX_REG_W32_MASK((1 << ndev) | (1 << (ndev + 16)), 0, MBOX_IGU1_IER);
346
347 napi_disable(&g_ptm_priv_data.itf[ndev].napi);
348
349 netif_stop_queue(dev);
350
351 return 0;
352 }
353
354 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
355 {
356 unsigned int work_done = 0;
357
358 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
359
360 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes > 0 ) {
361 if ( mailbox_rx_irq_handler(ndev) < 0 )
362 break;
363
364 work_done++;
365 }
366
367 return work_done;
368 }
369 static int ptm_napi_poll(struct napi_struct *napi, int budget)
370 {
371 int ndev;
372 unsigned int work_done;
373
374 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != napi->dev; ndev++ );
375
376 work_done = ptm_poll(ndev, budget);
377
378 // interface down
379 if ( !netif_running(napi->dev) ) {
380 napi_complete(napi);
381 return work_done;
382 }
383
384 // no more traffic
385 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
386 // clear interrupt
387 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_ISRC);
388 // double check
389 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
390 napi_complete(napi);
391 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
392 return work_done;
393 }
394 }
395
396 // next round
397 return work_done;
398 }
399
400 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
401 {
402 int ndev;
403 unsigned int f_full;
404 int desc_base;
405 register struct tx_descriptor reg_desc = {0};
406
407 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
408 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
409
410 if ( !g_showtime ) {
411 err("not in showtime");
412 goto PTM_HARD_START_XMIT_FAIL;
413 }
414
415 /* allocate descriptor */
416 desc_base = get_tx_desc(ndev, &f_full);
417 if ( f_full ) {
418 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
419 netif_trans_update(dev);
420 #else
421 dev->trans_start = jiffies;
422 #endif
423 netif_stop_queue(dev);
424
425 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_ISRC);
426 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_IER);
427 }
428 if ( desc_base < 0 )
429 goto PTM_HARD_START_XMIT_FAIL;
430
431 if ( g_ptm_priv_data.itf[ndev].tx_skb[desc_base] != NULL )
432 dev_kfree_skb_any(g_ptm_priv_data.itf[ndev].tx_skb[desc_base]);
433 g_ptm_priv_data.itf[ndev].tx_skb[desc_base] = skb;
434
435 reg_desc.dataptr = (unsigned int)skb->data >> 2;
436 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
437 reg_desc.byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
438 reg_desc.own = 1;
439 reg_desc.c = 1;
440 reg_desc.sop = reg_desc.eop = 1;
441
442 /* write discriptor to memory and write back cache */
443 g_ptm_priv_data.itf[ndev].tx_desc[desc_base] = reg_desc;
444 dma_cache_wback((unsigned long)skb->data, skb->len);
445 wmb();
446
447 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 1);
448
449 if ( (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ) {
450 skb_swap(skb);
451 }
452
453 g_ptm_priv_data.itf[ndev].stats.tx_packets++;
454 g_ptm_priv_data.itf[ndev].stats.tx_bytes += reg_desc.datalen;
455
456 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
457 netif_trans_update(dev);
458 #else
459 dev->trans_start = jiffies;
460 #endif
461 mailbox_signal(ndev, 1);
462
463 adsl_led_flash();
464
465 return NETDEV_TX_OK;
466
467 PTM_HARD_START_XMIT_FAIL:
468 dev_kfree_skb_any(skb);
469 g_ptm_priv_data.itf[ndev].stats.tx_dropped++;
470 return NETDEV_TX_OK;
471 }
472 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
473 static int ptm_change_mtu(struct net_device *dev, int mtu)
474 {
475 /* Allow up to 1508 bytes, for RFC4638 */
476 if (mtu < 68 || mtu > ETH_DATA_LEN + 8)
477 return -EINVAL;
478 dev->mtu = mtu;
479 return 0;
480 }
481 #endif
482
483 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
484 {
485 int ndev;
486
487 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
488 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
489
490 switch ( cmd )
491 {
492 case IFX_PTM_MIB_CW_GET:
493 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = WAN_MIB_TABLE[ndev].wrx_nonidle_cw;
494 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = WAN_MIB_TABLE[ndev].wrx_idle_cw;
495 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = WAN_MIB_TABLE[ndev].wrx_err_cw;
496 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = 0;
497 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = 0;
498 break;
499 case IFX_PTM_MIB_FRAME_GET:
500 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxCorrect = WAN_MIB_TABLE[ndev].wrx_correct_pdu;
501 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TC_CrcError = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu;
502 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxDropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu;
503 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TxSend = WAN_MIB_TABLE[ndev].wtx_total_pdu;
504 break;
505 case IFX_PTM_CFG_GET:
506 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = CFG_ETH_EFMTC_CRC->rx_eth_crc_present;
507 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = CFG_ETH_EFMTC_CRC->rx_eth_crc_check;
508 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = CFG_ETH_EFMTC_CRC->rx_tc_crc_check;
509 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = CFG_ETH_EFMTC_CRC->rx_tc_crc_len;
510 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = CFG_ETH_EFMTC_CRC->tx_eth_crc_gen;
511 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = CFG_ETH_EFMTC_CRC->tx_tc_crc_gen;
512 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = CFG_ETH_EFMTC_CRC->tx_tc_crc_len;
513 break;
514 case IFX_PTM_CFG_SET:
515 CFG_ETH_EFMTC_CRC->rx_eth_crc_present = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent ? 1 : 0;
516 CFG_ETH_EFMTC_CRC->rx_eth_crc_check = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 1 : 0;
517 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck && (((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 32) )
518 {
519 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 1;
520 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen;
521 }
522 else
523 {
524 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 0;
525 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = 0;
526 }
527 CFG_ETH_EFMTC_CRC->tx_eth_crc_gen = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 1 : 0;
528 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen && (((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 32) )
529 {
530 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 1;
531 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen;
532 }
533 else
534 {
535 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 0;
536 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = 0;
537 }
538 break;
539 default:
540 return -EOPNOTSUPP;
541 }
542
543 return 0;
544 }
545
546 static void ptm_tx_timeout(struct net_device *dev)
547 {
548 int ndev;
549
550 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
551 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
552
553 /* disable TX irq, release skb when sending new packet */
554 IFX_REG_W32_MASK(1 << (ndev + 16), 0, MBOX_IGU1_IER);
555
556 /* wake up TX queue */
557 netif_wake_queue(dev);
558
559 return;
560 }
561
562 static INLINE void adsl_led_flash(void)
563 {
564 }
565
566 static INLINE struct sk_buff* alloc_skb_rx(void)
567 {
568 struct sk_buff *skb;
569
570 /* allocate memroy including trailer and padding */
571 skb = dev_alloc_skb(rx_max_packet_size + RX_HEAD_MAC_ADDR_ALIGNMENT + DATA_BUFFER_ALIGNMENT);
572 if ( skb != NULL ) {
573 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
574 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
575 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
576 /* pub skb in reserved area "skb->data - 4" */
577 *((struct sk_buff **)skb->data - 1) = skb;
578 wmb();
579 /* write back and invalidate cache */
580 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
581 /* invalidate cache */
582 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
583 }
584
585 return skb;
586 }
587
588 #if 0
589 static INLINE struct sk_buff* alloc_skb_tx(unsigned int size)
590 {
591 struct sk_buff *skb;
592
593 /* allocate memory including padding */
594 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
595 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
596 /* must be burst length alignment */
597 if ( skb != NULL )
598 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
599 return skb;
600 }
601 #endif
602
603 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int dataptr)
604 {
605 unsigned int skb_dataptr;
606 struct sk_buff *skb;
607
608 skb_dataptr = ((dataptr - 1) << 2) | KSEG1;
609 skb = *(struct sk_buff **)skb_dataptr;
610
611 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
612 ASSERT(((unsigned int)skb->data | KSEG1) == ((dataptr << 2) | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
613
614 return skb;
615 }
616
617 static INLINE int get_tx_desc(unsigned int itf, unsigned int *f_full)
618 {
619 int desc_base = -1;
620 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[itf];
621
622 // assume TX is serial operation
623 // no protection provided
624
625 *f_full = 1;
626
627 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 ) {
628 desc_base = p_itf->tx_desc_pos;
629 if ( ++(p_itf->tx_desc_pos) == dma_tx_descriptor_length )
630 p_itf->tx_desc_pos = 0;
631 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 )
632 *f_full = 0;
633 }
634
635 return desc_base;
636 }
637
638 static INLINE int mailbox_rx_irq_handler(unsigned int ch) // return: < 0 - descriptor not available, 0 - received one packet
639 {
640 unsigned int ndev = ch;
641 struct sk_buff *skb;
642 struct sk_buff *new_skb;
643 volatile struct rx_descriptor *desc;
644 struct rx_descriptor reg_desc;
645 int netif_rx_ret;
646
647 desc = &g_ptm_priv_data.itf[ndev].rx_desc[g_ptm_priv_data.itf[ndev].rx_desc_pos];
648 if ( desc->own || !desc->c ) // if PP32 hold descriptor or descriptor not completed
649 return -EAGAIN;
650 if ( ++g_ptm_priv_data.itf[ndev].rx_desc_pos == dma_rx_descriptor_length )
651 g_ptm_priv_data.itf[ndev].rx_desc_pos = 0;
652
653 reg_desc = *desc;
654 skb = get_skb_rx_pointer(reg_desc.dataptr);
655
656 if ( !reg_desc.err ) {
657 new_skb = alloc_skb_rx();
658 if ( new_skb != NULL ) {
659 skb_reserve(skb, reg_desc.byteoff);
660 skb_put(skb, reg_desc.datalen);
661
662 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 0);
663
664 // parse protocol header
665 skb->dev = g_net_dev[ndev];
666 skb->protocol = eth_type_trans(skb, skb->dev);
667
668 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
669 g_net_dev[ndev]->last_rx = jiffies;
670 #endif
671
672 netif_rx_ret = netif_receive_skb(skb);
673
674 if ( netif_rx_ret != NET_RX_DROP ) {
675 g_ptm_priv_data.itf[ndev].stats.rx_packets++;
676 g_ptm_priv_data.itf[ndev].stats.rx_bytes += reg_desc.datalen;
677 }
678
679 reg_desc.dataptr = ((unsigned int)new_skb->data >> 2) & 0x0FFFFFFF;
680 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
681 }
682 }
683 else
684 reg_desc.err = 0;
685
686 reg_desc.datalen = rx_max_packet_size;
687 reg_desc.own = 1;
688 reg_desc.c = 0;
689
690 // update descriptor
691 *desc = reg_desc;
692 wmb();
693
694 mailbox_signal(ndev, 0);
695
696 adsl_led_flash();
697
698 return 0;
699 }
700
701 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
702 {
703 unsigned int isr;
704 int i;
705
706 isr = IFX_REG_R32(MBOX_IGU1_ISR);
707 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
708 isr &= IFX_REG_R32(MBOX_IGU1_IER);
709
710 while ( (i = __fls(isr)) >= 0 ) {
711 isr ^= 1 << i;
712
713 if ( i >= 16 ) {
714 // TX
715 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
716 i -= 16;
717 if ( i < MAX_ITF_NUMBER )
718 netif_wake_queue(g_net_dev[i]);
719 }
720 else {
721 // RX
722 #ifdef CONFIG_IFX_PTM_RX_INTERRUPT
723 while ( WRX_DMA_CHANNEL_CONFIG(i)->vlddes > 0 )
724 mailbox_rx_irq_handler(i);
725 #else
726 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
727 napi_schedule(&g_ptm_priv_data.itf[i].napi);
728 #endif
729 }
730 }
731
732 return IRQ_HANDLED;
733 }
734
735 static INLINE void mailbox_signal(unsigned int itf, int is_tx)
736 {
737 int count = 1000;
738
739 if ( is_tx ) {
740 while ( MBOX_IGU3_ISR_ISR(itf + 16) && count > 0 )
741 count--;
742 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf + 16), MBOX_IGU3_ISRS);
743 }
744 else {
745 while ( MBOX_IGU3_ISR_ISR(itf) && count > 0 )
746 count--;
747 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf), MBOX_IGU3_ISRS);
748 }
749
750 ASSERT(count != 0, "MBOX_IGU3_ISR = 0x%08x", IFX_REG_R32(MBOX_IGU3_ISR));
751 }
752
753 #ifdef CONFIG_IFX_PTM_RX_TASKLET
754 static void do_ptm_tasklet(unsigned long arg)
755 {
756 unsigned int work_to_do = 25;
757 unsigned int work_done = 0;
758
759 ASSERT(arg >= 0 && arg < ARRAY_SIZE(g_net_dev), "arg = %lu (wrong value)", arg);
760
761 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(arg)->vlddes > 0 ) {
762 if ( mailbox_rx_irq_handler(arg) < 0 )
763 break;
764
765 work_done++;
766 }
767
768 // interface down
769 if ( !netif_running(g_net_dev[arg]) )
770 return;
771
772 // no more traffic
773 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
774 // clear interrupt
775 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_ISRC);
776 // double check
777 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
778 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_IER);
779 return;
780 }
781 }
782
783 // next round
784 tasklet_schedule(&g_ptm_tasklet[arg]);
785 }
786 #endif
787
788 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
789 static void dump_skb(struct sk_buff *skb, u32 len, char *title, int port, int ch, int is_tx)
790 {
791 int i;
792
793 if ( !(ifx_ptm_dbg_enable & (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : DBG_ENABLE_MASK_DUMP_SKB_RX)) )
794 return;
795
796 if ( skb->len < len )
797 len = skb->len;
798
799 if ( len > rx_max_packet_size ) {
800 printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32)skb, (u32)skb->data, skb->len);
801 return;
802 }
803
804 if ( ch >= 0 )
805 printk("%s (port %d, ch %d)\n", title, port, ch);
806 else
807 printk("%s\n", title);
808 printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len);
809 for ( i = 1; i <= len; i++ ) {
810 if ( i % 16 == 1 )
811 printk(" %4d:", i - 1);
812 printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF));
813 if ( i % 16 == 0 )
814 printk("\n");
815 }
816 if ( (i - 1) % 16 != 0 )
817 printk("\n");
818 }
819 #endif
820
821 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
822 static void skb_swap(struct sk_buff *skb)
823 {
824 unsigned char tmp[8];
825 unsigned char *p = skb->data;
826
827 if ( !(p[0] & 0x01) ) { // bypass broadcast/multicast
828 // swap MAC
829 memcpy(tmp, p, 6);
830 memcpy(p, p + 6, 6);
831 memcpy(p + 6, tmp, 6);
832 p += 12;
833
834 // bypass VLAN
835 while ( p[0] == 0x81 && p[1] == 0x00 )
836 p += 4;
837
838 // IP
839 if ( p[0] == 0x08 && p[1] == 0x00 ) {
840 p += 14;
841 memcpy(tmp, p, 4);
842 memcpy(p, p + 4, 4);
843 memcpy(p + 4, tmp, 4);
844 p += 8;
845 }
846
847 dma_cache_wback((unsigned long)skb->data, (unsigned long)p - (unsigned long)skb->data);
848 }
849 }
850 #endif
851
852 static INLINE void proc_file_create(void)
853 {
854 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
855 struct proc_dir_entry *res;
856
857 g_ptm_dir = proc_mkdir("driver/ifx_ptm", NULL);
858
859 create_proc_read_entry("version",
860 0,
861 g_ptm_dir,
862 proc_read_version,
863 NULL);
864
865 res = create_proc_entry("wanmib",
866 0,
867 g_ptm_dir);
868 if ( res != NULL ) {
869 res->read_proc = proc_read_wanmib;
870 res->write_proc = proc_write_wanmib;
871 }
872
873 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
874 create_proc_read_entry("genconf",
875 0,
876 g_ptm_dir,
877 proc_read_genconf,
878 NULL);
879
880 #ifdef CONFIG_AR9
881 create_proc_read_entry("regs",
882 0,
883 g_ptm_dir,
884 ifx_ptm_proc_read_regs,
885 NULL);
886 #endif
887 #endif
888
889 res = create_proc_entry("dbg",
890 0,
891 g_ptm_dir);
892 if ( res != NULL ) {
893 res->read_proc = proc_read_dbg;
894 res->write_proc = proc_write_dbg;
895 }
896 #endif
897 }
898
899 static INLINE void proc_file_delete(void)
900 {
901 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
902 remove_proc_entry("dbg", g_ptm_dir);
903 #endif
904
905 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
906 #ifdef CONFIG_AR9
907 remove_proc_entry("regs", g_ptm_dir);
908 #endif
909
910 remove_proc_entry("genconf", g_ptm_dir);
911 #endif
912
913 remove_proc_entry("wanmib", g_ptm_dir);
914
915 remove_proc_entry("version", g_ptm_dir);
916
917 remove_proc_entry("driver/ifx_ptm", NULL);
918 }
919
920 static int proc_read_version(char *buf, char **start, off_t offset, int count, int *eof, void *data)
921 {
922 int len = 0;
923
924 len += ifx_ptm_version(buf + len);
925
926 if ( offset >= len ) {
927 *start = buf;
928 *eof = 1;
929 return 0;
930 }
931 *start = buf + offset;
932 if ( (len -= offset) > count )
933 return count;
934 *eof = 1;
935 return len;
936 }
937
938 static int proc_read_wanmib(char *page, char **start, off_t off, int count, int *eof, void *data)
939 {
940 int len = 0;
941 int i;
942 char *title[] = {
943 "dsl0\n",
944 "dslfast0\n"
945 };
946
947 for ( i = 0; i < ARRAY_SIZE(title); i++ ) {
948 len += sprintf(page + off + len, title[i]);
949 len += sprintf(page + off + len, " wrx_correct_pdu = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu);
950 len += sprintf(page + off + len, " wrx_correct_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu_bytes);
951 len += sprintf(page + off + len, " wrx_tccrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu);
952 len += sprintf(page + off + len, " wrx_tccrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu_bytes);
953 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu);
954 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu_bytes);
955 len += sprintf(page + off + len, " wrx_nodesc_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_nodesc_drop_pdu);
956 len += sprintf(page + off + len, " wrx_len_violation_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_len_violation_drop_pdu);
957 len += sprintf(page + off + len, " wrx_idle_bytes = %d\n", WAN_MIB_TABLE[i].wrx_idle_bytes);
958 len += sprintf(page + off + len, " wrx_nonidle_cw = %d\n", WAN_MIB_TABLE[i].wrx_nonidle_cw);
959 len += sprintf(page + off + len, " wrx_idle_cw = %d\n", WAN_MIB_TABLE[i].wrx_idle_cw);
960 len += sprintf(page + off + len, " wrx_err_cw = %d\n", WAN_MIB_TABLE[i].wrx_err_cw);
961 len += sprintf(page + off + len, " wtx_total_pdu = %d\n", WAN_MIB_TABLE[i].wtx_total_pdu);
962 len += sprintf(page + off + len, " wtx_total_bytes = %d\n", WAN_MIB_TABLE[i].wtx_total_bytes);
963 }
964
965 *eof = 1;
966
967 return len;
968 }
969
970 static int proc_write_wanmib(struct file *file, const char *buf, unsigned long count, void *data)
971 {
972 char str[2048];
973 char *p;
974 int len, rlen;
975
976 int i;
977
978 len = count < sizeof(str) ? count : sizeof(str) - 1;
979 rlen = len - copy_from_user(str, buf, len);
980 while ( rlen && str[rlen - 1] <= ' ' )
981 rlen--;
982 str[rlen] = 0;
983 for ( p = str; *p && *p <= ' '; p++, rlen-- );
984 if ( !*p )
985 return count;
986
987 if ( stricmp(p, "clear") == 0 || stricmp(p, "clean") == 0 ) {
988 for ( i = 0; i < 2; i++ )
989 memset((void*)&WAN_MIB_TABLE[i], 0, sizeof(WAN_MIB_TABLE[i]));
990 }
991
992 return count;
993 }
994
995 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
996
997 static int proc_read_genconf(char *page, char **start, off_t off, int count, int *eof, void *data)
998 {
999 int len = 0;
1000 int len_max = off + count;
1001 char *pstr;
1002 char str[2048];
1003 int llen = 0;
1004 int i;
1005 unsigned long bit;
1006
1007 pstr = *start = page;
1008
1009 __sync();
1010
1011 llen += sprintf(str + llen, "CFG_WAN_WRDES_DELAY (0x%08X): %d\n", (unsigned int)CFG_WAN_WRDES_DELAY, IFX_REG_R32(CFG_WAN_WRDES_DELAY));
1012 llen += sprintf(str + llen, "CFG_WRX_DMACH_ON (0x%08X):", (unsigned int)CFG_WRX_DMACH_ON);
1013 for ( i = 0, bit = 1; i < MAX_RX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
1014 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WRX_DMACH_ON) & bit) ? "on " : "off");
1015 llen += sprintf(str + llen, "\n");
1016 llen += sprintf(str + llen, "CFG_WTX_DMACH_ON (0x%08X):", (unsigned int)CFG_WTX_DMACH_ON);
1017 for ( i = 0, bit = 1; i < MAX_TX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
1018 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WTX_DMACH_ON) & bit) ? "on " : "off");
1019 llen += sprintf(str + llen, "\n");
1020 llen += sprintf(str + llen, "CFG_WRX_LOOK_BITTH (0x%08X): %d\n", (unsigned int)CFG_WRX_LOOK_BITTH, IFX_REG_R32(CFG_WRX_LOOK_BITTH));
1021 llen += sprintf(str + llen, "CFG_ETH_EFMTC_CRC (0x%08X): rx_tc_crc_len - %2d, rx_tc_crc_check - %s\n", (unsigned int)CFG_ETH_EFMTC_CRC, CFG_ETH_EFMTC_CRC->rx_tc_crc_len, CFG_ETH_EFMTC_CRC->rx_tc_crc_check ? " on" : "off");
1022 llen += sprintf(str + llen, " rx_eth_crc_check - %s, rx_eth_crc_present - %s\n", CFG_ETH_EFMTC_CRC->rx_eth_crc_check ? " on" : "off", CFG_ETH_EFMTC_CRC->rx_eth_crc_present ? " on" : "off");
1023 llen += sprintf(str + llen, " tx_tc_crc_len - %2d, tx_tc_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_tc_crc_len, CFG_ETH_EFMTC_CRC->tx_tc_crc_gen ? " on" : "off");
1024 llen += sprintf(str + llen, " tx_eth_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_eth_crc_gen ? " on" : "off");
1025
1026 llen += sprintf(str + llen, "RX Port:\n");
1027 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
1028 llen += sprintf(str + llen, " %d (0x%08X). mfs - %5d, dmach - %d, local_state - %d, partner_state - %d\n", i, (unsigned int)WRX_PORT_CONFIG(i), WRX_PORT_CONFIG(i)->mfs, WRX_PORT_CONFIG(i)->dmach, WRX_PORT_CONFIG(i)->local_state, WRX_PORT_CONFIG(i)->partner_state);
1029 llen += sprintf(str + llen, "RX DMA Channel:\n");
1030 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
1031 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WRX_DMA_CHANNEL_CONFIG(i), WRX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WRX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WRX_DMA_CHANNEL_CONFIG(i)->deslen, WRX_DMA_CHANNEL_CONFIG(i)->vlddes);
1032
1033 llen += sprintf(str + llen, "TX Port:\n");
1034 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1035 llen += sprintf(str + llen, " %d (0x%08X). tx_cwth2 - %d, tx_cwth1 - %d\n", i, (unsigned int)WTX_PORT_CONFIG(i), WTX_PORT_CONFIG(i)->tx_cwth2, WTX_PORT_CONFIG(i)->tx_cwth1);
1036 llen += sprintf(str + llen, "TX DMA Channel:\n");
1037 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1038 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WTX_DMA_CHANNEL_CONFIG(i), WTX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WTX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WTX_DMA_CHANNEL_CONFIG(i)->deslen, WTX_DMA_CHANNEL_CONFIG(i)->vlddes);
1039
1040 if ( len <= off && len + llen > off )
1041 {
1042 memcpy(pstr, str + off - len, len + llen - off);
1043 pstr += len + llen - off;
1044 }
1045 else if ( len > off )
1046 {
1047 memcpy(pstr, str, llen);
1048 pstr += llen;
1049 }
1050 len += llen;
1051 if ( len >= len_max )
1052 goto PROC_READ_GENCONF_OVERRUN_END;
1053
1054 *eof = 1;
1055
1056 return len - off;
1057
1058 PROC_READ_GENCONF_OVERRUN_END:
1059 return len - llen - off;
1060 }
1061
1062 #endif // defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1063
1064 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1065
1066 static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data)
1067 {
1068 int len = 0;
1069
1070 len += sprintf(page + off + len, "error print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ERR) ? "enabled" : "disabled");
1071 len += sprintf(page + off + len, "debug print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ? "enabled" : "disabled");
1072 len += sprintf(page + off + len, "assert - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ASSERT) ? "enabled" : "disabled");
1073 len += sprintf(page + off + len, "dump rx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_RX) ? "enabled" : "disabled");
1074 len += sprintf(page + off + len, "dump tx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_TX) ? "enabled" : "disabled");
1075 len += sprintf(page + off + len, "mac swap - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ? "enabled" : "disabled");
1076
1077 *eof = 1;
1078
1079 return len;
1080 }
1081
1082 static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data)
1083 {
1084 static const char *dbg_enable_mask_str[] = {
1085 " error print",
1086 " err",
1087 " debug print",
1088 " dbg",
1089 " assert",
1090 " assert",
1091 " dump rx skb",
1092 " rx",
1093 " dump tx skb",
1094 " tx",
1095 " dump init",
1096 " init",
1097 " dump qos",
1098 " qos",
1099 " mac swap",
1100 " swap",
1101 " all"
1102 };
1103 static const int dbg_enable_mask_str_len[] = {
1104 12, 4,
1105 12, 4,
1106 7, 7,
1107 12, 3,
1108 12, 3,
1109 10, 5,
1110 9, 4,
1111 9, 5,
1112 4
1113 };
1114 unsigned int dbg_enable_mask[] = {
1115 DBG_ENABLE_MASK_ERR,
1116 DBG_ENABLE_MASK_DEBUG_PRINT,
1117 DBG_ENABLE_MASK_ASSERT,
1118 DBG_ENABLE_MASK_DUMP_SKB_RX,
1119 DBG_ENABLE_MASK_DUMP_SKB_TX,
1120 DBG_ENABLE_MASK_DUMP_INIT,
1121 DBG_ENABLE_MASK_DUMP_QOS,
1122 DBG_ENABLE_MASK_MAC_SWAP,
1123 DBG_ENABLE_MASK_ALL
1124 };
1125
1126 char str[2048];
1127 char *p;
1128
1129 int len, rlen;
1130
1131 int f_enable = 0;
1132 int i;
1133
1134 len = count < sizeof(str) ? count : sizeof(str) - 1;
1135 rlen = len - copy_from_user(str, buf, len);
1136 while ( rlen && str[rlen - 1] <= ' ' )
1137 rlen--;
1138 str[rlen] = 0;
1139 for ( p = str; *p && *p <= ' '; p++, rlen-- );
1140 if ( !*p )
1141 return 0;
1142
1143 // debugging feature for enter/leave showtime
1144 if ( strincmp(p, "enter", 5) == 0 && ifx_mei_atm_showtime_enter != NULL )
1145 ifx_mei_atm_showtime_enter(NULL, NULL);
1146 else if ( strincmp(p, "leave", 5) == 0 && ifx_mei_atm_showtime_exit != NULL )
1147 ifx_mei_atm_showtime_exit();
1148
1149 if ( strincmp(p, "enable", 6) == 0 ) {
1150 p += 6;
1151 f_enable = 1;
1152 }
1153 else if ( strincmp(p, "disable", 7) == 0 ) {
1154 p += 7;
1155 f_enable = -1;
1156 }
1157 else if ( strincmp(p, "help", 4) == 0 || *p == '?' ) {
1158 printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/qos/swap/all] > /proc/driver/ifx_ptm/dbg\n");
1159 }
1160
1161 if ( f_enable ) {
1162 if ( *p == 0 ) {
1163 if ( f_enable > 0 )
1164 ifx_ptm_dbg_enable |= DBG_ENABLE_MASK_ALL & ~DBG_ENABLE_MASK_MAC_SWAP;
1165 else
1166 ifx_ptm_dbg_enable &= ~DBG_ENABLE_MASK_ALL | DBG_ENABLE_MASK_MAC_SWAP;
1167 }
1168 else {
1169 do {
1170 for ( i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++ )
1171 if ( strincmp(p, dbg_enable_mask_str[i], dbg_enable_mask_str_len[i]) == 0 ) {
1172 if ( f_enable > 0 )
1173 ifx_ptm_dbg_enable |= dbg_enable_mask[i >> 1];
1174 else
1175 ifx_ptm_dbg_enable &= ~dbg_enable_mask[i >> 1];
1176 p += dbg_enable_mask_str_len[i];
1177 break;
1178 }
1179 } while ( i < ARRAY_SIZE(dbg_enable_mask_str) );
1180 }
1181 }
1182
1183 return count;
1184 }
1185
1186 #endif // defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1187
1188 static INLINE int stricmp(const char *p1, const char *p2)
1189 {
1190 int c1, c2;
1191
1192 while ( *p1 && *p2 )
1193 {
1194 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1195 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1196 if ( (c1 -= c2) )
1197 return c1;
1198 p1++;
1199 p2++;
1200 }
1201
1202 return *p1 - *p2;
1203 }
1204
1205 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1206 static INLINE int strincmp(const char *p1, const char *p2, int n)
1207 {
1208 int c1 = 0, c2;
1209
1210 while ( n && *p1 && *p2 )
1211 {
1212 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1213 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1214 if ( (c1 -= c2) )
1215 return c1;
1216 p1++;
1217 p2++;
1218 n--;
1219 }
1220
1221 return n ? *p1 - *p2 : c1;
1222 }
1223 #endif
1224
1225 static INLINE int ifx_ptm_version(char *buf)
1226 {
1227 int len = 0;
1228 unsigned int major, minor;
1229
1230 ifx_ptm_get_fw_ver(&major, &minor);
1231
1232 len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
1233 len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
1234
1235 return len;
1236 }
1237
1238 static INLINE void check_parameters(void)
1239 {
1240 /* There is a delay between PPE write descriptor and descriptor is */
1241 /* really stored in memory. Host also has this delay when writing */
1242 /* descriptor. So PPE will use this value to determine if the write */
1243 /* operation makes effect. */
1244 if ( write_desc_delay < 0 )
1245 write_desc_delay = 0;
1246
1247 /* Because of the limitation of length field in descriptors, the packet */
1248 /* size could not be larger than 64K minus overhead size. */
1249 if ( rx_max_packet_size < ETH_MIN_FRAME_LENGTH )
1250 rx_max_packet_size = ETH_MIN_FRAME_LENGTH;
1251 else if ( rx_max_packet_size > 65536 - 1 )
1252 rx_max_packet_size = 65536 - 1;
1253
1254 if ( dma_rx_descriptor_length < 2 )
1255 dma_rx_descriptor_length = 2;
1256 if ( dma_tx_descriptor_length < 2 )
1257 dma_tx_descriptor_length = 2;
1258 }
1259
1260 static INLINE int init_priv_data(void)
1261 {
1262 void *p;
1263 int i;
1264 struct rx_descriptor rx_desc = {0};
1265 struct sk_buff *skb;
1266 volatile struct rx_descriptor *p_rx_desc;
1267 volatile struct tx_descriptor *p_tx_desc;
1268 struct sk_buff **ppskb;
1269
1270 // clear ptm private data structure
1271 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
1272
1273 // allocate memory for RX descriptors
1274 p = kzalloc(MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1275 if ( p == NULL )
1276 return -1;
1277 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT);
1278 g_ptm_priv_data.rx_desc_base = p;
1279 //p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1280
1281 // allocate memory for TX descriptors
1282 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1283 if ( p == NULL )
1284 return -1;
1285 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT);
1286 g_ptm_priv_data.tx_desc_base = p;
1287
1288 // allocate memroy for TX skb pointers
1289 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4, GFP_KERNEL);
1290 if ( p == NULL )
1291 return -1;
1292 dma_cache_wback_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4);
1293 g_ptm_priv_data.tx_skb_base = p;
1294
1295 p_rx_desc = (volatile struct rx_descriptor *)((((unsigned int)g_ptm_priv_data.rx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1296 p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_ptm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1297 ppskb = (struct sk_buff **)(((unsigned int)g_ptm_priv_data.tx_skb_base + 3) & ~3);
1298 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1299 g_ptm_priv_data.itf[i].rx_desc = &p_rx_desc[i * dma_rx_descriptor_length];
1300 g_ptm_priv_data.itf[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length];
1301 g_ptm_priv_data.itf[i].tx_skb = &ppskb[i * dma_tx_descriptor_length];
1302 }
1303
1304 rx_desc.own = 1;
1305 rx_desc.c = 0;
1306 rx_desc.sop = 1;
1307 rx_desc.eop = 1;
1308 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
1309 rx_desc.id = 0;
1310 rx_desc.err = 0;
1311 rx_desc.datalen = rx_max_packet_size;
1312 for ( i = 0; i < MAX_ITF_NUMBER * dma_rx_descriptor_length; i++ ) {
1313 skb = alloc_skb_rx();
1314 if ( skb == NULL )
1315 return -1;
1316 rx_desc.dataptr = ((unsigned int)skb->data >> 2) & 0x0FFFFFFF;
1317 p_rx_desc[i] = rx_desc;
1318 }
1319
1320 return 0;
1321 }
1322
1323 static INLINE void clear_priv_data(void)
1324 {
1325 int i, j;
1326 struct sk_buff *skb;
1327
1328 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1329 if ( g_ptm_priv_data.itf[i].tx_skb != NULL ) {
1330 for ( j = 0; j < dma_tx_descriptor_length; j++ )
1331 if ( g_ptm_priv_data.itf[i].tx_skb[j] != NULL )
1332 dev_kfree_skb_any(g_ptm_priv_data.itf[i].tx_skb[j]);
1333 }
1334 if ( g_ptm_priv_data.itf[i].rx_desc != NULL ) {
1335 for ( j = 0; j < dma_rx_descriptor_length; j++ ) {
1336 if ( g_ptm_priv_data.itf[i].rx_desc[j].sop || g_ptm_priv_data.itf[i].rx_desc[j].eop ) { // descriptor initialized
1337 skb = get_skb_rx_pointer(g_ptm_priv_data.itf[i].rx_desc[j].dataptr);
1338 dev_kfree_skb_any(skb);
1339 }
1340 }
1341 }
1342 }
1343
1344 if ( g_ptm_priv_data.rx_desc_base != NULL )
1345 kfree(g_ptm_priv_data.rx_desc_base);
1346
1347 if ( g_ptm_priv_data.tx_desc_base != NULL )
1348 kfree(g_ptm_priv_data.tx_desc_base);
1349
1350 if ( g_ptm_priv_data.tx_skb_base != NULL )
1351 kfree(g_ptm_priv_data.tx_skb_base);
1352 }
1353
1354 static INLINE void init_tables(void)
1355 {
1356 int i;
1357 volatile unsigned int *p;
1358 struct wrx_dma_channel_config rx_config = {0};
1359 struct wtx_dma_channel_config tx_config = {0};
1360 struct wrx_port_cfg_status rx_port_cfg = { 0 };
1361 struct wtx_port_cfg tx_port_cfg = { 0 };
1362
1363 /*
1364 * CDM Block 1
1365 */
1366 IFX_REG_W32(CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00), CDM_CFG); // CDM block 1 must be data memory and mapped to 0x5000 (dword addr)
1367 p = CDM_DATA_MEMORY(0, 0); // Clear CDM block 1
1368 for ( i = 0; i < CDM_DATA_MEMORY_DWLEN; i++, p++ )
1369 IFX_REG_W32(0, p);
1370
1371 /*
1372 * General Registers
1373 */
1374 IFX_REG_W32(write_desc_delay, CFG_WAN_WRDES_DELAY);
1375 IFX_REG_W32((1 << MAX_RX_DMA_CHANNEL_NUMBER) - 1, CFG_WRX_DMACH_ON);
1376 IFX_REG_W32((1 << MAX_TX_DMA_CHANNEL_NUMBER) - 1, CFG_WTX_DMACH_ON);
1377
1378 IFX_REG_W32(8, CFG_WRX_LOOK_BITTH); // WAN RX EFM-TC Looking Threshold
1379
1380 IFX_REG_W32(eth_efmtc_crc_cfg, CFG_ETH_EFMTC_CRC);
1381
1382 /*
1383 * WRX DMA Channel Configuration Table
1384 */
1385 rx_config.deslen = dma_rx_descriptor_length;
1386 rx_port_cfg.mfs = ETH_MAX_FRAME_LENGTH;
1387 rx_port_cfg.local_state = 0; // looking for sync
1388 rx_port_cfg.partner_state = 0; // parter receiver is out of sync
1389
1390 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ ) {
1391 rx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].rx_desc >> 2) & 0x0FFFFFFF;
1392 *WRX_DMA_CHANNEL_CONFIG(i) = rx_config;
1393
1394 rx_port_cfg.dmach = i;
1395 *WRX_PORT_CONFIG(i) = rx_port_cfg;
1396 }
1397
1398 /*
1399 * WTX DMA Channel Configuration Table
1400 */
1401 tx_config.deslen = dma_tx_descriptor_length;
1402 tx_port_cfg.tx_cwth1 = 5;
1403 tx_port_cfg.tx_cwth2 = 4;
1404
1405 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ ) {
1406 tx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].tx_desc >> 2) & 0x0FFFFFFF;
1407 *WTX_DMA_CHANNEL_CONFIG(i) = tx_config;
1408
1409 *WTX_PORT_CONFIG(i) = tx_port_cfg;
1410 }
1411 }
1412
1413
1414
1415 /*
1416 * ####################################
1417 * Global Function
1418 * ####################################
1419 */
1420
1421 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
1422 {
1423 int i;
1424
1425 g_showtime = 1;
1426
1427 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1428 netif_carrier_on(g_net_dev[i]);
1429
1430 printk("enter showtime\n");
1431
1432 return 0;
1433 }
1434
1435 static int ptm_showtime_exit(void)
1436 {
1437 int i;
1438
1439 if ( !g_showtime )
1440 return -1;
1441
1442 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1443 netif_carrier_off(g_net_dev[i]);
1444
1445 g_showtime = 0;
1446
1447 printk("leave showtime\n");
1448
1449 return 0;
1450 }
1451
1452
1453 static const struct of_device_id ltq_ptm_match[] = {
1454 #ifdef CONFIG_DANUBE
1455 { .compatible = "lantiq,ppe-danube", .data = NULL },
1456 #elif defined CONFIG_AMAZON_SE
1457 { .compatible = "lantiq,ppe-ase", .data = NULL },
1458 #elif defined CONFIG_AR9
1459 { .compatible = "lantiq,ppe-arx100", .data = NULL },
1460 #elif defined CONFIG_VR9
1461 { .compatible = "lantiq,ppe-xrx200", .data = NULL },
1462 #endif
1463 {},
1464 };
1465 MODULE_DEVICE_TABLE(of, ltq_ptm_match);
1466
1467 /*
1468 * ####################################
1469 * Init/Cleanup API
1470 * ####################################
1471 */
1472
1473 /*
1474 * Description:
1475 * Initialize global variables, PP32, comunication structures, register IRQ
1476 * and register device.
1477 * Input:
1478 * none
1479 * Output:
1480 * 0 --- successful
1481 * else --- failure, usually it is negative value of error code
1482 */
1483 static int ltq_ptm_probe(struct platform_device *pdev)
1484 {
1485 int ret;
1486 struct port_cell_info port_cell = {0};
1487 void *xdata_addr = NULL;
1488 int i;
1489 char ver_str[256];
1490
1491 check_parameters();
1492
1493 ret = init_priv_data();
1494 if ( ret != 0 ) {
1495 err("INIT_PRIV_DATA_FAIL");
1496 goto INIT_PRIV_DATA_FAIL;
1497 }
1498
1499 ifx_ptm_init_chip(pdev);
1500 init_tables();
1501
1502 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1503 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
1504 if ( g_net_dev[i] == NULL )
1505 goto ALLOC_NETDEV_FAIL;
1506 ptm_setup(g_net_dev[i], i);
1507 }
1508
1509 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1510 ret = register_netdev(g_net_dev[i]);
1511 if ( ret != 0 )
1512 goto REGISTER_NETDEV_FAIL;
1513 }
1514
1515 /* register interrupt handler */
1516 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
1517 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
1518 #else
1519 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_mailbox_isr", &g_ptm_priv_data);
1520 #endif
1521 if ( ret ) {
1522 if ( ret == -EBUSY ) {
1523 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1524 }
1525 else {
1526 err("request_irq fail");
1527 }
1528 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1529 }
1530 disable_irq(PPE_MAILBOX_IGU1_INT);
1531
1532 ret = ifx_pp32_start(0);
1533 if ( ret ) {
1534 err("ifx_pp32_start fail!");
1535 goto PP32_START_FAIL;
1536 }
1537 IFX_REG_W32(0, MBOX_IGU1_IER);
1538 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1539
1540 enable_irq(PPE_MAILBOX_IGU1_INT);
1541
1542
1543 proc_file_create();
1544
1545 port_cell.port_num = 1;
1546 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &xdata_addr);
1547 if ( g_showtime ) {
1548 ptm_showtime_enter(&port_cell, &xdata_addr);
1549 }
1550
1551 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1552 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1553
1554 ifx_ptm_version(ver_str);
1555 printk(KERN_INFO "%s", ver_str);
1556
1557 printk("ifxmips_ptm: PTM init succeed\n");
1558
1559 return 0;
1560
1561 PP32_START_FAIL:
1562 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1563 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1564 i = ARRAY_SIZE(g_net_dev);
1565 REGISTER_NETDEV_FAIL:
1566 while ( i-- )
1567 unregister_netdev(g_net_dev[i]);
1568 i = ARRAY_SIZE(g_net_dev);
1569 ALLOC_NETDEV_FAIL:
1570 while ( i-- ) {
1571 free_netdev(g_net_dev[i]);
1572 g_net_dev[i] = NULL;
1573 }
1574 INIT_PRIV_DATA_FAIL:
1575 clear_priv_data();
1576 printk("ifxmips_ptm: PTM init failed\n");
1577 return ret;
1578 }
1579
1580 /*
1581 * Description:
1582 * Release memory, free IRQ, and deregister device.
1583 * Input:
1584 * none
1585 * Output:
1586 * none
1587 */
1588 static int ltq_ptm_remove(struct platform_device *pdev)
1589 {
1590 int i;
1591
1592 ifx_mei_atm_showtime_enter = NULL;
1593 ifx_mei_atm_showtime_exit = NULL;
1594
1595 proc_file_delete();
1596
1597
1598 ifx_pp32_stop(0);
1599
1600 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1601
1602 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1603 unregister_netdev(g_net_dev[i]);
1604
1605 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1606 free_netdev(g_net_dev[i]);
1607 g_net_dev[i] = NULL;
1608 }
1609
1610 ifx_ptm_uninit_chip();
1611
1612 clear_priv_data();
1613
1614 return 0;
1615 }
1616
1617 static struct platform_driver ltq_ptm_driver = {
1618 .probe = ltq_ptm_probe,
1619 .remove = ltq_ptm_remove,
1620 .driver = {
1621 .name = "ptm",
1622 .owner = THIS_MODULE,
1623 .of_match_table = ltq_ptm_match,
1624 },
1625 };
1626
1627 module_platform_driver(ltq_ptm_driver);
1628
1629 MODULE_LICENSE("GPL");