ltq-atm/ltq-ptm: add kernel 5.10 compatiblity
[openwrt/staging/wigyori.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_adsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_adsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for Danube/
10 ** Amazon-SE/AR9)
11 ** COPYRIGHT : Copyright (c) 2006
12 ** Infineon Technologies AG
13 ** Am Campeon 1-12, 85579 Neubiberg, Germany
14 **
15 ** This program is free software; you can redistribute it and/or modify
16 ** it under the terms of the GNU General Public License as published by
17 ** the Free Software Foundation; either version 2 of the License, or
18 ** (at your option) any later version.
19 **
20 ** HISTORY
21 ** $Date $Author $Comment
22 ** 07 JUL 2009 Xu Liang Init Version
23 *******************************************************************************/
24
25
26
27 /*
28 * ####################################
29 * Head File
30 * ####################################
31 */
32
33 /*
34 * Common Head File
35 */
36 #include <linux/version.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/errno.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/ioctl.h>
44 #include <linux/etherdevice.h>
45 #include <linux/interrupt.h>
46 #include <linux/netdevice.h>
47 #include <linux/platform_device.h>
48 #include <linux/of_device.h>
49 #include <asm/io.h>
50
51 /*
52 * Chip Specific Head File
53 */
54 #include "ifxmips_ptm_adsl.h"
55
56
57 #include <lantiq_soc.h>
58
59 /*
60 * ####################################
61 * Kernel Version Adaption
62 * ####################################
63 */
64 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
65 #define MODULE_PARM(a, b) module_param(a, int, 0)
66
67
68
69 /*
70 * ####################################
71 * Parameters to Configure PPE
72 * ####################################
73 */
74
75 static int write_desc_delay = 0x20; /* Write descriptor delay */
76
77 static int rx_max_packet_size = ETH_MAX_FRAME_LENGTH;
78 /* Max packet size for RX */
79
80 static int dma_rx_descriptor_length = 24; /* Number of descriptors per DMA RX channel */
81 static int dma_tx_descriptor_length = 24; /* Number of descriptors per DMA TX channel */
82
83 static int eth_efmtc_crc_cfg = 0x03100710; /* default: tx_eth_crc_check: 1, tx_tc_crc_check: 1, tx_tc_crc_len = 16 */
84 /* rx_eth_crc_present: 1, rx_eth_crc_check: 1, rx_tc_crc_check: 1, rx_tc_crc_len = 16 */
85
86 MODULE_PARM(write_desc_delay, "i");
87 MODULE_PARM_DESC(write_desc_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
88
89 MODULE_PARM(rx_max_packet_size, "i");
90 MODULE_PARM_DESC(rx_max_packet_size, "Max packet size in byte for downstream ethernet frames");
91
92 MODULE_PARM(dma_rx_descriptor_length, "i");
93 MODULE_PARM_DESC(dma_rx_descriptor_length, "Number of descriptor assigned to DMA RX channel (>16)");
94 MODULE_PARM(dma_tx_descriptor_length, "i");
95 MODULE_PARM_DESC(dma_tx_descriptor_length, "Number of descriptor assigned to DMA TX channel (>16)");
96
97 MODULE_PARM(eth_efmtc_crc_cfg, "i");
98 MODULE_PARM_DESC(eth_efmtc_crc_cfg, "Configuration for PTM TX/RX ethernet/efm-tc CRC");
99
100
101
102 /*
103 * ####################################
104 * Definition
105 * ####################################
106 */
107
108
109 #define DUMP_SKB_LEN ~0
110
111
112
113 /*
114 * ####################################
115 * Declaration
116 * ####################################
117 */
118
119 /*
120 * Network Operations
121 */
122 static void ptm_setup(struct net_device *, int);
123 static struct net_device_stats *ptm_get_stats(struct net_device *);
124 static int ptm_open(struct net_device *);
125 static int ptm_stop(struct net_device *);
126 static unsigned int ptm_poll(int, unsigned int);
127 static int ptm_napi_poll(struct napi_struct *, int);
128 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
129 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
130 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
131 static void ptm_tx_timeout(struct net_device *);
132 #else
133 static void ptm_tx_timeout(struct net_device *, unsigned int txqueue);
134 #endif
135
136 /*
137 * DSL Data LED
138 */
139 static INLINE void adsl_led_flash(void);
140
141 /*
142 * buffer manage functions
143 */
144 static INLINE struct sk_buff* alloc_skb_rx(void);
145 //static INLINE struct sk_buff* alloc_skb_tx(unsigned int);
146 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int);
147 static INLINE int get_tx_desc(unsigned int, unsigned int *);
148
149 /*
150 * Mailbox handler and signal function
151 */
152 static INLINE int mailbox_rx_irq_handler(unsigned int);
153 static irqreturn_t mailbox_irq_handler(int, void *);
154 static INLINE void mailbox_signal(unsigned int, int);
155 #ifdef CONFIG_IFX_PTM_RX_TASKLET
156 static void do_ptm_tasklet(unsigned long);
157 #endif
158
159 /*
160 * Debug Functions
161 */
162 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
163 static void dump_skb(struct sk_buff *, u32, char *, int, int, int);
164 #else
165 #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
166 #endif
167 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
168 static void skb_swap(struct sk_buff *);
169 #else
170 #define skb_swap(skb) do {} while (0)
171 #endif
172
173 /*
174 * Proc File Functions
175 */
176 static INLINE void proc_file_create(void);
177 static INLINE void proc_file_delete(void);
178 static int proc_read_version(char *, char **, off_t, int, int *, void *);
179 static int proc_read_wanmib(char *, char **, off_t, int, int *, void *);
180 static int proc_write_wanmib(struct file *, const char *, unsigned long, void *);
181 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
182 static int proc_read_genconf(char *, char **, off_t, int, int *, void *);
183 #endif
184 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
185 static int proc_read_dbg(char *, char **, off_t, int, int *, void *);
186 static int proc_write_dbg(struct file *, const char *, unsigned long, void *);
187 #endif
188
189 /*
190 * Proc Help Functions
191 */
192 static INLINE int stricmp(const char *, const char *);
193 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
194 static INLINE int strincmp(const char *, const char *, int);
195 #endif
196 static INLINE int ifx_ptm_version(char *);
197
198 /*
199 * Init & clean-up functions
200 */
201 static INLINE void check_parameters(void);
202 static INLINE int init_priv_data(void);
203 static INLINE void clear_priv_data(void);
204 static INLINE void init_tables(void);
205
206 /*
207 * Exteranl Function
208 */
209 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
210 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
211 #else
212 static inline int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr)
213 {
214 if ( is_showtime != NULL )
215 *is_showtime = 0;
216 return 0;
217 }
218 #endif
219
220 /*
221 * External variable
222 */
223 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
224 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
225 extern int (*ifx_mei_atm_showtime_exit)(void);
226 #else
227 int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL;
228 EXPORT_SYMBOL(ifx_mei_atm_showtime_enter);
229 int (*ifx_mei_atm_showtime_exit)(void) = NULL;
230 EXPORT_SYMBOL(ifx_mei_atm_showtime_exit);
231 #endif
232
233
234
235 /*
236 * ####################################
237 * Local Variable
238 * ####################################
239 */
240
241 static struct ptm_priv_data g_ptm_priv_data;
242
243 static struct net_device_ops g_ptm_netdev_ops = {
244 .ndo_get_stats = ptm_get_stats,
245 .ndo_open = ptm_open,
246 .ndo_stop = ptm_stop,
247 .ndo_start_xmit = ptm_hard_start_xmit,
248 .ndo_validate_addr = eth_validate_addr,
249 .ndo_set_mac_address = eth_mac_addr,
250 .ndo_do_ioctl = ptm_ioctl,
251 .ndo_tx_timeout = ptm_tx_timeout,
252 };
253
254 static struct net_device *g_net_dev[2] = {0};
255 static char *g_net_dev_name[2] = {"dsl0", "dslfast0"};
256
257 #ifdef CONFIG_IFX_PTM_RX_TASKLET
258 static struct tasklet_struct g_ptm_tasklet[] = {
259 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 0},
260 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 1},
261 };
262 #endif
263
264 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
265
266 static struct proc_dir_entry* g_ptm_dir = NULL;
267
268 static int g_showtime = 0;
269
270
271
272 /*
273 * ####################################
274 * Local Function
275 * ####################################
276 */
277
278 static void ptm_setup(struct net_device *dev, int ndev)
279 {
280 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
281 netif_carrier_off(dev);
282 #endif
283
284 /* hook network operations */
285 dev->netdev_ops = &g_ptm_netdev_ops;
286 /* Allow up to 1508 bytes, for RFC4638 */
287 dev->max_mtu = ETH_DATA_LEN + 8;
288 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 25);
289 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
290
291 dev->dev_addr[0] = 0x00;
292 dev->dev_addr[1] = 0x20;
293 dev->dev_addr[2] = 0xda;
294 dev->dev_addr[3] = 0x86;
295 dev->dev_addr[4] = 0x23;
296 dev->dev_addr[5] = 0x75 + ndev;
297 }
298
299 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
300 {
301 int ndev;
302
303 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
304 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
305
306 g_ptm_priv_data.itf[ndev].stats.rx_errors = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu + WAN_MIB_TABLE[ndev].wrx_ethcrc_err_pdu;
307 g_ptm_priv_data.itf[ndev].stats.rx_dropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu + (WAN_MIB_TABLE[ndev].wrx_correct_pdu - g_ptm_priv_data.itf[ndev].stats.rx_packets);
308
309 return &g_ptm_priv_data.itf[ndev].stats;
310 }
311
312 static int ptm_open(struct net_device *dev)
313 {
314 int ndev;
315
316 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
317 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
318
319 napi_enable(&g_ptm_priv_data.itf[ndev].napi);
320
321 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
322
323 netif_start_queue(dev);
324
325 return 0;
326 }
327
328 static int ptm_stop(struct net_device *dev)
329 {
330 int ndev;
331
332 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
333 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
334
335 IFX_REG_W32_MASK((1 << ndev) | (1 << (ndev + 16)), 0, MBOX_IGU1_IER);
336
337 napi_disable(&g_ptm_priv_data.itf[ndev].napi);
338
339 netif_stop_queue(dev);
340
341 return 0;
342 }
343
344 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
345 {
346 unsigned int work_done = 0;
347
348 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
349
350 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes > 0 ) {
351 if ( mailbox_rx_irq_handler(ndev) < 0 )
352 break;
353
354 work_done++;
355 }
356
357 return work_done;
358 }
359 static int ptm_napi_poll(struct napi_struct *napi, int budget)
360 {
361 int ndev;
362 unsigned int work_done;
363
364 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != napi->dev; ndev++ );
365
366 work_done = ptm_poll(ndev, budget);
367
368 // interface down
369 if ( !netif_running(napi->dev) ) {
370 napi_complete(napi);
371 return work_done;
372 }
373
374 // no more traffic
375 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
376 // clear interrupt
377 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_ISRC);
378 // double check
379 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
380 napi_complete(napi);
381 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
382 return work_done;
383 }
384 }
385
386 // next round
387 return work_done;
388 }
389
390 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
391 {
392 int ndev;
393 unsigned int f_full;
394 int desc_base;
395 register struct tx_descriptor reg_desc = {0};
396
397 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
398 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
399
400 if ( !g_showtime ) {
401 err("not in showtime");
402 goto PTM_HARD_START_XMIT_FAIL;
403 }
404
405 /* allocate descriptor */
406 desc_base = get_tx_desc(ndev, &f_full);
407 if ( f_full ) {
408 netif_trans_update(dev);
409 netif_stop_queue(dev);
410
411 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_ISRC);
412 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_IER);
413 }
414 if ( desc_base < 0 )
415 goto PTM_HARD_START_XMIT_FAIL;
416
417 if ( g_ptm_priv_data.itf[ndev].tx_skb[desc_base] != NULL )
418 dev_kfree_skb_any(g_ptm_priv_data.itf[ndev].tx_skb[desc_base]);
419 g_ptm_priv_data.itf[ndev].tx_skb[desc_base] = skb;
420
421 reg_desc.dataptr = (unsigned int)skb->data >> 2;
422 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
423 reg_desc.byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
424 reg_desc.own = 1;
425 reg_desc.c = 1;
426 reg_desc.sop = reg_desc.eop = 1;
427
428 /* write discriptor to memory and write back cache */
429 g_ptm_priv_data.itf[ndev].tx_desc[desc_base] = reg_desc;
430 dma_cache_wback((unsigned long)skb->data, skb->len);
431 wmb();
432
433 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 1);
434
435 if ( (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ) {
436 skb_swap(skb);
437 }
438
439 g_ptm_priv_data.itf[ndev].stats.tx_packets++;
440 g_ptm_priv_data.itf[ndev].stats.tx_bytes += reg_desc.datalen;
441
442 netif_trans_update(dev);
443 mailbox_signal(ndev, 1);
444
445 adsl_led_flash();
446
447 return NETDEV_TX_OK;
448
449 PTM_HARD_START_XMIT_FAIL:
450 dev_kfree_skb_any(skb);
451 g_ptm_priv_data.itf[ndev].stats.tx_dropped++;
452 return NETDEV_TX_OK;
453 }
454
455 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
456 {
457 int ndev;
458
459 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
460 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
461
462 switch ( cmd )
463 {
464 case IFX_PTM_MIB_CW_GET:
465 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = WAN_MIB_TABLE[ndev].wrx_nonidle_cw;
466 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = WAN_MIB_TABLE[ndev].wrx_idle_cw;
467 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = WAN_MIB_TABLE[ndev].wrx_err_cw;
468 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = 0;
469 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = 0;
470 break;
471 case IFX_PTM_MIB_FRAME_GET:
472 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxCorrect = WAN_MIB_TABLE[ndev].wrx_correct_pdu;
473 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TC_CrcError = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu;
474 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxDropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu;
475 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TxSend = WAN_MIB_TABLE[ndev].wtx_total_pdu;
476 break;
477 case IFX_PTM_CFG_GET:
478 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = CFG_ETH_EFMTC_CRC->rx_eth_crc_present;
479 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = CFG_ETH_EFMTC_CRC->rx_eth_crc_check;
480 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = CFG_ETH_EFMTC_CRC->rx_tc_crc_check;
481 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = CFG_ETH_EFMTC_CRC->rx_tc_crc_len;
482 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = CFG_ETH_EFMTC_CRC->tx_eth_crc_gen;
483 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = CFG_ETH_EFMTC_CRC->tx_tc_crc_gen;
484 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = CFG_ETH_EFMTC_CRC->tx_tc_crc_len;
485 break;
486 case IFX_PTM_CFG_SET:
487 CFG_ETH_EFMTC_CRC->rx_eth_crc_present = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent ? 1 : 0;
488 CFG_ETH_EFMTC_CRC->rx_eth_crc_check = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 1 : 0;
489 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck && (((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 32) )
490 {
491 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 1;
492 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen;
493 }
494 else
495 {
496 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 0;
497 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = 0;
498 }
499 CFG_ETH_EFMTC_CRC->tx_eth_crc_gen = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 1 : 0;
500 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen && (((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 32) )
501 {
502 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 1;
503 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen;
504 }
505 else
506 {
507 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 0;
508 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = 0;
509 }
510 break;
511 default:
512 return -EOPNOTSUPP;
513 }
514
515 return 0;
516 }
517
518 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
519 static void ptm_tx_timeout(struct net_device *dev)
520 #else
521 static void ptm_tx_timeout(struct net_device *dev, unsigned int txqueue)
522 #endif
523 {
524 int ndev;
525
526 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
527 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
528
529 /* disable TX irq, release skb when sending new packet */
530 IFX_REG_W32_MASK(1 << (ndev + 16), 0, MBOX_IGU1_IER);
531
532 /* wake up TX queue */
533 netif_wake_queue(dev);
534
535 return;
536 }
537
538 static INLINE void adsl_led_flash(void)
539 {
540 }
541
542 static INLINE struct sk_buff* alloc_skb_rx(void)
543 {
544 struct sk_buff *skb;
545
546 /* allocate memroy including trailer and padding */
547 skb = dev_alloc_skb(rx_max_packet_size + RX_HEAD_MAC_ADDR_ALIGNMENT + DATA_BUFFER_ALIGNMENT);
548 if ( skb != NULL ) {
549 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
550 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
551 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
552 /* pub skb in reserved area "skb->data - 4" */
553 *((struct sk_buff **)skb->data - 1) = skb;
554 wmb();
555 /* write back and invalidate cache */
556 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
557 /* invalidate cache */
558 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
559 }
560
561 return skb;
562 }
563
564 #if 0
565 static INLINE struct sk_buff* alloc_skb_tx(unsigned int size)
566 {
567 struct sk_buff *skb;
568
569 /* allocate memory including padding */
570 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
571 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
572 /* must be burst length alignment */
573 if ( skb != NULL )
574 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
575 return skb;
576 }
577 #endif
578
579 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int dataptr)
580 {
581 unsigned int skb_dataptr;
582 struct sk_buff *skb;
583
584 skb_dataptr = ((dataptr - 1) << 2) | KSEG1;
585 skb = *(struct sk_buff **)skb_dataptr;
586
587 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
588 ASSERT(((unsigned int)skb->data | KSEG1) == ((dataptr << 2) | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
589
590 return skb;
591 }
592
593 static INLINE int get_tx_desc(unsigned int itf, unsigned int *f_full)
594 {
595 int desc_base = -1;
596 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[itf];
597
598 // assume TX is serial operation
599 // no protection provided
600
601 *f_full = 1;
602
603 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 ) {
604 desc_base = p_itf->tx_desc_pos;
605 if ( ++(p_itf->tx_desc_pos) == dma_tx_descriptor_length )
606 p_itf->tx_desc_pos = 0;
607 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 )
608 *f_full = 0;
609 }
610
611 return desc_base;
612 }
613
614 static INLINE int mailbox_rx_irq_handler(unsigned int ch) // return: < 0 - descriptor not available, 0 - received one packet
615 {
616 unsigned int ndev = ch;
617 struct sk_buff *skb;
618 struct sk_buff *new_skb;
619 volatile struct rx_descriptor *desc;
620 struct rx_descriptor reg_desc;
621 int netif_rx_ret;
622
623 desc = &g_ptm_priv_data.itf[ndev].rx_desc[g_ptm_priv_data.itf[ndev].rx_desc_pos];
624 if ( desc->own || !desc->c ) // if PP32 hold descriptor or descriptor not completed
625 return -EAGAIN;
626 if ( ++g_ptm_priv_data.itf[ndev].rx_desc_pos == dma_rx_descriptor_length )
627 g_ptm_priv_data.itf[ndev].rx_desc_pos = 0;
628
629 reg_desc = *desc;
630 skb = get_skb_rx_pointer(reg_desc.dataptr);
631
632 if ( !reg_desc.err ) {
633 new_skb = alloc_skb_rx();
634 if ( new_skb != NULL ) {
635 skb_reserve(skb, reg_desc.byteoff);
636 skb_put(skb, reg_desc.datalen);
637
638 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 0);
639
640 // parse protocol header
641 skb->dev = g_net_dev[ndev];
642 skb->protocol = eth_type_trans(skb, skb->dev);
643
644 netif_rx_ret = netif_receive_skb(skb);
645
646 if ( netif_rx_ret != NET_RX_DROP ) {
647 g_ptm_priv_data.itf[ndev].stats.rx_packets++;
648 g_ptm_priv_data.itf[ndev].stats.rx_bytes += reg_desc.datalen;
649 }
650
651 reg_desc.dataptr = ((unsigned int)new_skb->data >> 2) & 0x0FFFFFFF;
652 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
653 }
654 }
655 else
656 reg_desc.err = 0;
657
658 reg_desc.datalen = rx_max_packet_size;
659 reg_desc.own = 1;
660 reg_desc.c = 0;
661
662 // update descriptor
663 *desc = reg_desc;
664 wmb();
665
666 mailbox_signal(ndev, 0);
667
668 adsl_led_flash();
669
670 return 0;
671 }
672
673 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
674 {
675 unsigned int isr;
676 int i;
677
678 isr = IFX_REG_R32(MBOX_IGU1_ISR);
679 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
680 isr &= IFX_REG_R32(MBOX_IGU1_IER);
681
682 while ( (i = __fls(isr)) >= 0 ) {
683 isr ^= 1 << i;
684
685 if ( i >= 16 ) {
686 // TX
687 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
688 i -= 16;
689 if ( i < MAX_ITF_NUMBER )
690 netif_wake_queue(g_net_dev[i]);
691 }
692 else {
693 // RX
694 #ifdef CONFIG_IFX_PTM_RX_INTERRUPT
695 while ( WRX_DMA_CHANNEL_CONFIG(i)->vlddes > 0 )
696 mailbox_rx_irq_handler(i);
697 #else
698 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
699 napi_schedule(&g_ptm_priv_data.itf[i].napi);
700 #endif
701 }
702 }
703
704 return IRQ_HANDLED;
705 }
706
707 static INLINE void mailbox_signal(unsigned int itf, int is_tx)
708 {
709 int count = 1000;
710
711 if ( is_tx ) {
712 while ( MBOX_IGU3_ISR_ISR(itf + 16) && count > 0 )
713 count--;
714 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf + 16), MBOX_IGU3_ISRS);
715 }
716 else {
717 while ( MBOX_IGU3_ISR_ISR(itf) && count > 0 )
718 count--;
719 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf), MBOX_IGU3_ISRS);
720 }
721
722 ASSERT(count != 0, "MBOX_IGU3_ISR = 0x%08x", IFX_REG_R32(MBOX_IGU3_ISR));
723 }
724
725 #ifdef CONFIG_IFX_PTM_RX_TASKLET
726 static void do_ptm_tasklet(unsigned long arg)
727 {
728 unsigned int work_to_do = 25;
729 unsigned int work_done = 0;
730
731 ASSERT(arg >= 0 && arg < ARRAY_SIZE(g_net_dev), "arg = %lu (wrong value)", arg);
732
733 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(arg)->vlddes > 0 ) {
734 if ( mailbox_rx_irq_handler(arg) < 0 )
735 break;
736
737 work_done++;
738 }
739
740 // interface down
741 if ( !netif_running(g_net_dev[arg]) )
742 return;
743
744 // no more traffic
745 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
746 // clear interrupt
747 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_ISRC);
748 // double check
749 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
750 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_IER);
751 return;
752 }
753 }
754
755 // next round
756 tasklet_schedule(&g_ptm_tasklet[arg]);
757 }
758 #endif
759
760 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
761 static void dump_skb(struct sk_buff *skb, u32 len, char *title, int port, int ch, int is_tx)
762 {
763 int i;
764
765 if ( !(ifx_ptm_dbg_enable & (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : DBG_ENABLE_MASK_DUMP_SKB_RX)) )
766 return;
767
768 if ( skb->len < len )
769 len = skb->len;
770
771 if ( len > rx_max_packet_size ) {
772 printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32)skb, (u32)skb->data, skb->len);
773 return;
774 }
775
776 if ( ch >= 0 )
777 printk("%s (port %d, ch %d)\n", title, port, ch);
778 else
779 printk("%s\n", title);
780 printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len);
781 for ( i = 1; i <= len; i++ ) {
782 if ( i % 16 == 1 )
783 printk(" %4d:", i - 1);
784 printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF));
785 if ( i % 16 == 0 )
786 printk("\n");
787 }
788 if ( (i - 1) % 16 != 0 )
789 printk("\n");
790 }
791 #endif
792
793 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
794 static void skb_swap(struct sk_buff *skb)
795 {
796 unsigned char tmp[8];
797 unsigned char *p = skb->data;
798
799 if ( !(p[0] & 0x01) ) { // bypass broadcast/multicast
800 // swap MAC
801 memcpy(tmp, p, 6);
802 memcpy(p, p + 6, 6);
803 memcpy(p + 6, tmp, 6);
804 p += 12;
805
806 // bypass VLAN
807 while ( p[0] == 0x81 && p[1] == 0x00 )
808 p += 4;
809
810 // IP
811 if ( p[0] == 0x08 && p[1] == 0x00 ) {
812 p += 14;
813 memcpy(tmp, p, 4);
814 memcpy(p, p + 4, 4);
815 memcpy(p + 4, tmp, 4);
816 p += 8;
817 }
818
819 dma_cache_wback((unsigned long)skb->data, (unsigned long)p - (unsigned long)skb->data);
820 }
821 }
822 #endif
823
824 static INLINE void proc_file_create(void)
825 {
826 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
827 struct proc_dir_entry *res;
828
829 g_ptm_dir = proc_mkdir("driver/ifx_ptm", NULL);
830
831 create_proc_read_entry("version",
832 0,
833 g_ptm_dir,
834 proc_read_version,
835 NULL);
836
837 res = create_proc_entry("wanmib",
838 0,
839 g_ptm_dir);
840 if ( res != NULL ) {
841 res->read_proc = proc_read_wanmib;
842 res->write_proc = proc_write_wanmib;
843 }
844
845 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
846 create_proc_read_entry("genconf",
847 0,
848 g_ptm_dir,
849 proc_read_genconf,
850 NULL);
851
852 #ifdef CONFIG_AR9
853 create_proc_read_entry("regs",
854 0,
855 g_ptm_dir,
856 ifx_ptm_proc_read_regs,
857 NULL);
858 #endif
859 #endif
860
861 res = create_proc_entry("dbg",
862 0,
863 g_ptm_dir);
864 if ( res != NULL ) {
865 res->read_proc = proc_read_dbg;
866 res->write_proc = proc_write_dbg;
867 }
868 #endif
869 }
870
871 static INLINE void proc_file_delete(void)
872 {
873 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
874 remove_proc_entry("dbg", g_ptm_dir);
875 #endif
876
877 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
878 #ifdef CONFIG_AR9
879 remove_proc_entry("regs", g_ptm_dir);
880 #endif
881
882 remove_proc_entry("genconf", g_ptm_dir);
883 #endif
884
885 remove_proc_entry("wanmib", g_ptm_dir);
886
887 remove_proc_entry("version", g_ptm_dir);
888
889 remove_proc_entry("driver/ifx_ptm", NULL);
890 }
891
892 static int proc_read_version(char *buf, char **start, off_t offset, int count, int *eof, void *data)
893 {
894 int len = 0;
895
896 len += ifx_ptm_version(buf + len);
897
898 if ( offset >= len ) {
899 *start = buf;
900 *eof = 1;
901 return 0;
902 }
903 *start = buf + offset;
904 if ( (len -= offset) > count )
905 return count;
906 *eof = 1;
907 return len;
908 }
909
910 static int proc_read_wanmib(char *page, char **start, off_t off, int count, int *eof, void *data)
911 {
912 int len = 0;
913 int i;
914 char *title[] = {
915 "dsl0\n",
916 "dslfast0\n"
917 };
918
919 for ( i = 0; i < ARRAY_SIZE(title); i++ ) {
920 len += sprintf(page + off + len, title[i]);
921 len += sprintf(page + off + len, " wrx_correct_pdu = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu);
922 len += sprintf(page + off + len, " wrx_correct_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu_bytes);
923 len += sprintf(page + off + len, " wrx_tccrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu);
924 len += sprintf(page + off + len, " wrx_tccrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu_bytes);
925 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu);
926 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu_bytes);
927 len += sprintf(page + off + len, " wrx_nodesc_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_nodesc_drop_pdu);
928 len += sprintf(page + off + len, " wrx_len_violation_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_len_violation_drop_pdu);
929 len += sprintf(page + off + len, " wrx_idle_bytes = %d\n", WAN_MIB_TABLE[i].wrx_idle_bytes);
930 len += sprintf(page + off + len, " wrx_nonidle_cw = %d\n", WAN_MIB_TABLE[i].wrx_nonidle_cw);
931 len += sprintf(page + off + len, " wrx_idle_cw = %d\n", WAN_MIB_TABLE[i].wrx_idle_cw);
932 len += sprintf(page + off + len, " wrx_err_cw = %d\n", WAN_MIB_TABLE[i].wrx_err_cw);
933 len += sprintf(page + off + len, " wtx_total_pdu = %d\n", WAN_MIB_TABLE[i].wtx_total_pdu);
934 len += sprintf(page + off + len, " wtx_total_bytes = %d\n", WAN_MIB_TABLE[i].wtx_total_bytes);
935 }
936
937 *eof = 1;
938
939 return len;
940 }
941
942 static int proc_write_wanmib(struct file *file, const char *buf, unsigned long count, void *data)
943 {
944 char str[2048];
945 char *p;
946 int len, rlen;
947
948 int i;
949
950 len = count < sizeof(str) ? count : sizeof(str) - 1;
951 rlen = len - copy_from_user(str, buf, len);
952 while ( rlen && str[rlen - 1] <= ' ' )
953 rlen--;
954 str[rlen] = 0;
955 for ( p = str; *p && *p <= ' '; p++, rlen-- );
956 if ( !*p )
957 return count;
958
959 if ( stricmp(p, "clear") == 0 || stricmp(p, "clean") == 0 ) {
960 for ( i = 0; i < 2; i++ )
961 memset((void*)&WAN_MIB_TABLE[i], 0, sizeof(WAN_MIB_TABLE[i]));
962 }
963
964 return count;
965 }
966
967 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
968
969 static int proc_read_genconf(char *page, char **start, off_t off, int count, int *eof, void *data)
970 {
971 int len = 0;
972 int len_max = off + count;
973 char *pstr;
974 char str[2048];
975 int llen = 0;
976 int i;
977 unsigned long bit;
978
979 pstr = *start = page;
980
981 __sync();
982
983 llen += sprintf(str + llen, "CFG_WAN_WRDES_DELAY (0x%08X): %d\n", (unsigned int)CFG_WAN_WRDES_DELAY, IFX_REG_R32(CFG_WAN_WRDES_DELAY));
984 llen += sprintf(str + llen, "CFG_WRX_DMACH_ON (0x%08X):", (unsigned int)CFG_WRX_DMACH_ON);
985 for ( i = 0, bit = 1; i < MAX_RX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
986 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WRX_DMACH_ON) & bit) ? "on " : "off");
987 llen += sprintf(str + llen, "\n");
988 llen += sprintf(str + llen, "CFG_WTX_DMACH_ON (0x%08X):", (unsigned int)CFG_WTX_DMACH_ON);
989 for ( i = 0, bit = 1; i < MAX_TX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
990 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WTX_DMACH_ON) & bit) ? "on " : "off");
991 llen += sprintf(str + llen, "\n");
992 llen += sprintf(str + llen, "CFG_WRX_LOOK_BITTH (0x%08X): %d\n", (unsigned int)CFG_WRX_LOOK_BITTH, IFX_REG_R32(CFG_WRX_LOOK_BITTH));
993 llen += sprintf(str + llen, "CFG_ETH_EFMTC_CRC (0x%08X): rx_tc_crc_len - %2d, rx_tc_crc_check - %s\n", (unsigned int)CFG_ETH_EFMTC_CRC, CFG_ETH_EFMTC_CRC->rx_tc_crc_len, CFG_ETH_EFMTC_CRC->rx_tc_crc_check ? " on" : "off");
994 llen += sprintf(str + llen, " rx_eth_crc_check - %s, rx_eth_crc_present - %s\n", CFG_ETH_EFMTC_CRC->rx_eth_crc_check ? " on" : "off", CFG_ETH_EFMTC_CRC->rx_eth_crc_present ? " on" : "off");
995 llen += sprintf(str + llen, " tx_tc_crc_len - %2d, tx_tc_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_tc_crc_len, CFG_ETH_EFMTC_CRC->tx_tc_crc_gen ? " on" : "off");
996 llen += sprintf(str + llen, " tx_eth_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_eth_crc_gen ? " on" : "off");
997
998 llen += sprintf(str + llen, "RX Port:\n");
999 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
1000 llen += sprintf(str + llen, " %d (0x%08X). mfs - %5d, dmach - %d, local_state - %d, partner_state - %d\n", i, (unsigned int)WRX_PORT_CONFIG(i), WRX_PORT_CONFIG(i)->mfs, WRX_PORT_CONFIG(i)->dmach, WRX_PORT_CONFIG(i)->local_state, WRX_PORT_CONFIG(i)->partner_state);
1001 llen += sprintf(str + llen, "RX DMA Channel:\n");
1002 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
1003 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WRX_DMA_CHANNEL_CONFIG(i), WRX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WRX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WRX_DMA_CHANNEL_CONFIG(i)->deslen, WRX_DMA_CHANNEL_CONFIG(i)->vlddes);
1004
1005 llen += sprintf(str + llen, "TX Port:\n");
1006 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1007 llen += sprintf(str + llen, " %d (0x%08X). tx_cwth2 - %d, tx_cwth1 - %d\n", i, (unsigned int)WTX_PORT_CONFIG(i), WTX_PORT_CONFIG(i)->tx_cwth2, WTX_PORT_CONFIG(i)->tx_cwth1);
1008 llen += sprintf(str + llen, "TX DMA Channel:\n");
1009 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1010 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WTX_DMA_CHANNEL_CONFIG(i), WTX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WTX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WTX_DMA_CHANNEL_CONFIG(i)->deslen, WTX_DMA_CHANNEL_CONFIG(i)->vlddes);
1011
1012 if ( len <= off && len + llen > off )
1013 {
1014 memcpy(pstr, str + off - len, len + llen - off);
1015 pstr += len + llen - off;
1016 }
1017 else if ( len > off )
1018 {
1019 memcpy(pstr, str, llen);
1020 pstr += llen;
1021 }
1022 len += llen;
1023 if ( len >= len_max )
1024 goto PROC_READ_GENCONF_OVERRUN_END;
1025
1026 *eof = 1;
1027
1028 return len - off;
1029
1030 PROC_READ_GENCONF_OVERRUN_END:
1031 return len - llen - off;
1032 }
1033
1034 #endif // defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1035
1036 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1037
1038 static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data)
1039 {
1040 int len = 0;
1041
1042 len += sprintf(page + off + len, "error print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ERR) ? "enabled" : "disabled");
1043 len += sprintf(page + off + len, "debug print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ? "enabled" : "disabled");
1044 len += sprintf(page + off + len, "assert - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ASSERT) ? "enabled" : "disabled");
1045 len += sprintf(page + off + len, "dump rx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_RX) ? "enabled" : "disabled");
1046 len += sprintf(page + off + len, "dump tx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_TX) ? "enabled" : "disabled");
1047 len += sprintf(page + off + len, "mac swap - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ? "enabled" : "disabled");
1048
1049 *eof = 1;
1050
1051 return len;
1052 }
1053
1054 static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data)
1055 {
1056 static const char *dbg_enable_mask_str[] = {
1057 " error print",
1058 " err",
1059 " debug print",
1060 " dbg",
1061 " assert",
1062 " assert",
1063 " dump rx skb",
1064 " rx",
1065 " dump tx skb",
1066 " tx",
1067 " dump init",
1068 " init",
1069 " dump qos",
1070 " qos",
1071 " mac swap",
1072 " swap",
1073 " all"
1074 };
1075 static const int dbg_enable_mask_str_len[] = {
1076 12, 4,
1077 12, 4,
1078 7, 7,
1079 12, 3,
1080 12, 3,
1081 10, 5,
1082 9, 4,
1083 9, 5,
1084 4
1085 };
1086 unsigned int dbg_enable_mask[] = {
1087 DBG_ENABLE_MASK_ERR,
1088 DBG_ENABLE_MASK_DEBUG_PRINT,
1089 DBG_ENABLE_MASK_ASSERT,
1090 DBG_ENABLE_MASK_DUMP_SKB_RX,
1091 DBG_ENABLE_MASK_DUMP_SKB_TX,
1092 DBG_ENABLE_MASK_DUMP_INIT,
1093 DBG_ENABLE_MASK_DUMP_QOS,
1094 DBG_ENABLE_MASK_MAC_SWAP,
1095 DBG_ENABLE_MASK_ALL
1096 };
1097
1098 char str[2048];
1099 char *p;
1100
1101 int len, rlen;
1102
1103 int f_enable = 0;
1104 int i;
1105
1106 len = count < sizeof(str) ? count : sizeof(str) - 1;
1107 rlen = len - copy_from_user(str, buf, len);
1108 while ( rlen && str[rlen - 1] <= ' ' )
1109 rlen--;
1110 str[rlen] = 0;
1111 for ( p = str; *p && *p <= ' '; p++, rlen-- );
1112 if ( !*p )
1113 return 0;
1114
1115 // debugging feature for enter/leave showtime
1116 if ( strincmp(p, "enter", 5) == 0 && ifx_mei_atm_showtime_enter != NULL )
1117 ifx_mei_atm_showtime_enter(NULL, NULL);
1118 else if ( strincmp(p, "leave", 5) == 0 && ifx_mei_atm_showtime_exit != NULL )
1119 ifx_mei_atm_showtime_exit();
1120
1121 if ( strincmp(p, "enable", 6) == 0 ) {
1122 p += 6;
1123 f_enable = 1;
1124 }
1125 else if ( strincmp(p, "disable", 7) == 0 ) {
1126 p += 7;
1127 f_enable = -1;
1128 }
1129 else if ( strincmp(p, "help", 4) == 0 || *p == '?' ) {
1130 printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/qos/swap/all] > /proc/driver/ifx_ptm/dbg\n");
1131 }
1132
1133 if ( f_enable ) {
1134 if ( *p == 0 ) {
1135 if ( f_enable > 0 )
1136 ifx_ptm_dbg_enable |= DBG_ENABLE_MASK_ALL & ~DBG_ENABLE_MASK_MAC_SWAP;
1137 else
1138 ifx_ptm_dbg_enable &= ~DBG_ENABLE_MASK_ALL | DBG_ENABLE_MASK_MAC_SWAP;
1139 }
1140 else {
1141 do {
1142 for ( i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++ )
1143 if ( strincmp(p, dbg_enable_mask_str[i], dbg_enable_mask_str_len[i]) == 0 ) {
1144 if ( f_enable > 0 )
1145 ifx_ptm_dbg_enable |= dbg_enable_mask[i >> 1];
1146 else
1147 ifx_ptm_dbg_enable &= ~dbg_enable_mask[i >> 1];
1148 p += dbg_enable_mask_str_len[i];
1149 break;
1150 }
1151 } while ( i < ARRAY_SIZE(dbg_enable_mask_str) );
1152 }
1153 }
1154
1155 return count;
1156 }
1157
1158 #endif // defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1159
1160 static INLINE int stricmp(const char *p1, const char *p2)
1161 {
1162 int c1, c2;
1163
1164 while ( *p1 && *p2 )
1165 {
1166 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1167 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1168 if ( (c1 -= c2) )
1169 return c1;
1170 p1++;
1171 p2++;
1172 }
1173
1174 return *p1 - *p2;
1175 }
1176
1177 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1178 static INLINE int strincmp(const char *p1, const char *p2, int n)
1179 {
1180 int c1 = 0, c2;
1181
1182 while ( n && *p1 && *p2 )
1183 {
1184 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1185 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1186 if ( (c1 -= c2) )
1187 return c1;
1188 p1++;
1189 p2++;
1190 n--;
1191 }
1192
1193 return n ? *p1 - *p2 : c1;
1194 }
1195 #endif
1196
1197 static INLINE int ifx_ptm_version(char *buf)
1198 {
1199 int len = 0;
1200 unsigned int major, minor;
1201
1202 ifx_ptm_get_fw_ver(&major, &minor);
1203
1204 len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
1205 len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
1206
1207 return len;
1208 }
1209
1210 static INLINE void check_parameters(void)
1211 {
1212 /* There is a delay between PPE write descriptor and descriptor is */
1213 /* really stored in memory. Host also has this delay when writing */
1214 /* descriptor. So PPE will use this value to determine if the write */
1215 /* operation makes effect. */
1216 if ( write_desc_delay < 0 )
1217 write_desc_delay = 0;
1218
1219 /* Because of the limitation of length field in descriptors, the packet */
1220 /* size could not be larger than 64K minus overhead size. */
1221 if ( rx_max_packet_size < ETH_MIN_FRAME_LENGTH )
1222 rx_max_packet_size = ETH_MIN_FRAME_LENGTH;
1223 else if ( rx_max_packet_size > 65536 - 1 )
1224 rx_max_packet_size = 65536 - 1;
1225
1226 if ( dma_rx_descriptor_length < 2 )
1227 dma_rx_descriptor_length = 2;
1228 if ( dma_tx_descriptor_length < 2 )
1229 dma_tx_descriptor_length = 2;
1230 }
1231
1232 static INLINE int init_priv_data(void)
1233 {
1234 void *p;
1235 int i;
1236 struct rx_descriptor rx_desc = {0};
1237 struct sk_buff *skb;
1238 volatile struct rx_descriptor *p_rx_desc;
1239 volatile struct tx_descriptor *p_tx_desc;
1240 struct sk_buff **ppskb;
1241
1242 // clear ptm private data structure
1243 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
1244
1245 // allocate memory for RX descriptors
1246 p = kzalloc(MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1247 if ( p == NULL )
1248 return -1;
1249 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT);
1250 g_ptm_priv_data.rx_desc_base = p;
1251 //p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1252
1253 // allocate memory for TX descriptors
1254 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1255 if ( p == NULL )
1256 return -1;
1257 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT);
1258 g_ptm_priv_data.tx_desc_base = p;
1259
1260 // allocate memroy for TX skb pointers
1261 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4, GFP_KERNEL);
1262 if ( p == NULL )
1263 return -1;
1264 dma_cache_wback_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4);
1265 g_ptm_priv_data.tx_skb_base = p;
1266
1267 p_rx_desc = (volatile struct rx_descriptor *)((((unsigned int)g_ptm_priv_data.rx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1268 p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_ptm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1269 ppskb = (struct sk_buff **)(((unsigned int)g_ptm_priv_data.tx_skb_base + 3) & ~3);
1270 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1271 g_ptm_priv_data.itf[i].rx_desc = &p_rx_desc[i * dma_rx_descriptor_length];
1272 g_ptm_priv_data.itf[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length];
1273 g_ptm_priv_data.itf[i].tx_skb = &ppskb[i * dma_tx_descriptor_length];
1274 }
1275
1276 rx_desc.own = 1;
1277 rx_desc.c = 0;
1278 rx_desc.sop = 1;
1279 rx_desc.eop = 1;
1280 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
1281 rx_desc.id = 0;
1282 rx_desc.err = 0;
1283 rx_desc.datalen = rx_max_packet_size;
1284 for ( i = 0; i < MAX_ITF_NUMBER * dma_rx_descriptor_length; i++ ) {
1285 skb = alloc_skb_rx();
1286 if ( skb == NULL )
1287 return -1;
1288 rx_desc.dataptr = ((unsigned int)skb->data >> 2) & 0x0FFFFFFF;
1289 p_rx_desc[i] = rx_desc;
1290 }
1291
1292 return 0;
1293 }
1294
1295 static INLINE void clear_priv_data(void)
1296 {
1297 int i, j;
1298 struct sk_buff *skb;
1299
1300 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1301 if ( g_ptm_priv_data.itf[i].tx_skb != NULL ) {
1302 for ( j = 0; j < dma_tx_descriptor_length; j++ )
1303 if ( g_ptm_priv_data.itf[i].tx_skb[j] != NULL )
1304 dev_kfree_skb_any(g_ptm_priv_data.itf[i].tx_skb[j]);
1305 }
1306 if ( g_ptm_priv_data.itf[i].rx_desc != NULL ) {
1307 for ( j = 0; j < dma_rx_descriptor_length; j++ ) {
1308 if ( g_ptm_priv_data.itf[i].rx_desc[j].sop || g_ptm_priv_data.itf[i].rx_desc[j].eop ) { // descriptor initialized
1309 skb = get_skb_rx_pointer(g_ptm_priv_data.itf[i].rx_desc[j].dataptr);
1310 dev_kfree_skb_any(skb);
1311 }
1312 }
1313 }
1314 }
1315
1316 if ( g_ptm_priv_data.rx_desc_base != NULL )
1317 kfree(g_ptm_priv_data.rx_desc_base);
1318
1319 if ( g_ptm_priv_data.tx_desc_base != NULL )
1320 kfree(g_ptm_priv_data.tx_desc_base);
1321
1322 if ( g_ptm_priv_data.tx_skb_base != NULL )
1323 kfree(g_ptm_priv_data.tx_skb_base);
1324 }
1325
1326 static INLINE void init_tables(void)
1327 {
1328 int i;
1329 volatile unsigned int *p;
1330 struct wrx_dma_channel_config rx_config = {0};
1331 struct wtx_dma_channel_config tx_config = {0};
1332 struct wrx_port_cfg_status rx_port_cfg = { 0 };
1333 struct wtx_port_cfg tx_port_cfg = { 0 };
1334
1335 /*
1336 * CDM Block 1
1337 */
1338 IFX_REG_W32(CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00), CDM_CFG); // CDM block 1 must be data memory and mapped to 0x5000 (dword addr)
1339 p = CDM_DATA_MEMORY(0, 0); // Clear CDM block 1
1340 for ( i = 0; i < CDM_DATA_MEMORY_DWLEN; i++, p++ )
1341 IFX_REG_W32(0, p);
1342
1343 /*
1344 * General Registers
1345 */
1346 IFX_REG_W32(write_desc_delay, CFG_WAN_WRDES_DELAY);
1347 IFX_REG_W32((1 << MAX_RX_DMA_CHANNEL_NUMBER) - 1, CFG_WRX_DMACH_ON);
1348 IFX_REG_W32((1 << MAX_TX_DMA_CHANNEL_NUMBER) - 1, CFG_WTX_DMACH_ON);
1349
1350 IFX_REG_W32(8, CFG_WRX_LOOK_BITTH); // WAN RX EFM-TC Looking Threshold
1351
1352 IFX_REG_W32(eth_efmtc_crc_cfg, CFG_ETH_EFMTC_CRC);
1353
1354 /*
1355 * WRX DMA Channel Configuration Table
1356 */
1357 rx_config.deslen = dma_rx_descriptor_length;
1358 rx_port_cfg.mfs = ETH_MAX_FRAME_LENGTH;
1359 rx_port_cfg.local_state = 0; // looking for sync
1360 rx_port_cfg.partner_state = 0; // parter receiver is out of sync
1361
1362 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ ) {
1363 rx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].rx_desc >> 2) & 0x0FFFFFFF;
1364 *WRX_DMA_CHANNEL_CONFIG(i) = rx_config;
1365
1366 rx_port_cfg.dmach = i;
1367 *WRX_PORT_CONFIG(i) = rx_port_cfg;
1368 }
1369
1370 /*
1371 * WTX DMA Channel Configuration Table
1372 */
1373 tx_config.deslen = dma_tx_descriptor_length;
1374 tx_port_cfg.tx_cwth1 = 5;
1375 tx_port_cfg.tx_cwth2 = 4;
1376
1377 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ ) {
1378 tx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].tx_desc >> 2) & 0x0FFFFFFF;
1379 *WTX_DMA_CHANNEL_CONFIG(i) = tx_config;
1380
1381 *WTX_PORT_CONFIG(i) = tx_port_cfg;
1382 }
1383 }
1384
1385
1386
1387 /*
1388 * ####################################
1389 * Global Function
1390 * ####################################
1391 */
1392
1393 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
1394 {
1395 int i;
1396
1397 g_showtime = 1;
1398
1399 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1400 netif_carrier_on(g_net_dev[i]);
1401
1402 printk("enter showtime\n");
1403
1404 return 0;
1405 }
1406
1407 static int ptm_showtime_exit(void)
1408 {
1409 int i;
1410
1411 if ( !g_showtime )
1412 return -1;
1413
1414 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1415 netif_carrier_off(g_net_dev[i]);
1416
1417 g_showtime = 0;
1418
1419 printk("leave showtime\n");
1420
1421 return 0;
1422 }
1423
1424
1425 static const struct of_device_id ltq_ptm_match[] = {
1426 #ifdef CONFIG_DANUBE
1427 { .compatible = "lantiq,ppe-danube", .data = NULL },
1428 #elif defined CONFIG_AMAZON_SE
1429 { .compatible = "lantiq,ppe-ase", .data = NULL },
1430 #elif defined CONFIG_AR9
1431 { .compatible = "lantiq,ppe-arx100", .data = NULL },
1432 #elif defined CONFIG_VR9
1433 { .compatible = "lantiq,ppe-xrx200", .data = NULL },
1434 #endif
1435 {},
1436 };
1437 MODULE_DEVICE_TABLE(of, ltq_ptm_match);
1438
1439 /*
1440 * ####################################
1441 * Init/Cleanup API
1442 * ####################################
1443 */
1444
1445 /*
1446 * Description:
1447 * Initialize global variables, PP32, comunication structures, register IRQ
1448 * and register device.
1449 * Input:
1450 * none
1451 * Output:
1452 * 0 --- successful
1453 * else --- failure, usually it is negative value of error code
1454 */
1455 static int ltq_ptm_probe(struct platform_device *pdev)
1456 {
1457 int ret;
1458 struct port_cell_info port_cell = {0};
1459 void *xdata_addr = NULL;
1460 int i;
1461 char ver_str[256];
1462
1463 check_parameters();
1464
1465 ret = init_priv_data();
1466 if ( ret != 0 ) {
1467 err("INIT_PRIV_DATA_FAIL");
1468 goto INIT_PRIV_DATA_FAIL;
1469 }
1470
1471 ifx_ptm_init_chip(pdev);
1472 init_tables();
1473
1474 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1475 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
1476 if ( g_net_dev[i] == NULL )
1477 goto ALLOC_NETDEV_FAIL;
1478 ptm_setup(g_net_dev[i], i);
1479 }
1480
1481 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1482 ret = register_netdev(g_net_dev[i]);
1483 if ( ret != 0 )
1484 goto REGISTER_NETDEV_FAIL;
1485 }
1486
1487 /* register interrupt handler */
1488 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
1489 if ( ret ) {
1490 if ( ret == -EBUSY ) {
1491 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1492 }
1493 else {
1494 err("request_irq fail");
1495 }
1496 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1497 }
1498 disable_irq(PPE_MAILBOX_IGU1_INT);
1499
1500 ret = ifx_pp32_start(0);
1501 if ( ret ) {
1502 err("ifx_pp32_start fail!");
1503 goto PP32_START_FAIL;
1504 }
1505 IFX_REG_W32(0, MBOX_IGU1_IER);
1506 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1507
1508 enable_irq(PPE_MAILBOX_IGU1_INT);
1509
1510
1511 proc_file_create();
1512
1513 port_cell.port_num = 1;
1514 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &xdata_addr);
1515 if ( g_showtime ) {
1516 ptm_showtime_enter(&port_cell, &xdata_addr);
1517 }
1518
1519 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1520 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1521
1522 ifx_ptm_version(ver_str);
1523 printk(KERN_INFO "%s", ver_str);
1524
1525 printk("ifxmips_ptm: PTM init succeed\n");
1526
1527 return 0;
1528
1529 PP32_START_FAIL:
1530 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1531 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1532 i = ARRAY_SIZE(g_net_dev);
1533 REGISTER_NETDEV_FAIL:
1534 while ( i-- )
1535 unregister_netdev(g_net_dev[i]);
1536 i = ARRAY_SIZE(g_net_dev);
1537 ALLOC_NETDEV_FAIL:
1538 while ( i-- ) {
1539 free_netdev(g_net_dev[i]);
1540 g_net_dev[i] = NULL;
1541 }
1542 INIT_PRIV_DATA_FAIL:
1543 clear_priv_data();
1544 printk("ifxmips_ptm: PTM init failed\n");
1545 return ret;
1546 }
1547
1548 /*
1549 * Description:
1550 * Release memory, free IRQ, and deregister device.
1551 * Input:
1552 * none
1553 * Output:
1554 * none
1555 */
1556 static int ltq_ptm_remove(struct platform_device *pdev)
1557 {
1558 int i;
1559
1560 ifx_mei_atm_showtime_enter = NULL;
1561 ifx_mei_atm_showtime_exit = NULL;
1562
1563 proc_file_delete();
1564
1565
1566 ifx_pp32_stop(0);
1567
1568 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1569
1570 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1571 unregister_netdev(g_net_dev[i]);
1572
1573 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1574 free_netdev(g_net_dev[i]);
1575 g_net_dev[i] = NULL;
1576 }
1577
1578 ifx_ptm_uninit_chip();
1579
1580 clear_priv_data();
1581
1582 return 0;
1583 }
1584
1585 static struct platform_driver ltq_ptm_driver = {
1586 .probe = ltq_ptm_probe,
1587 .remove = ltq_ptm_remove,
1588 .driver = {
1589 .name = "ptm",
1590 .owner = THIS_MODULE,
1591 .of_match_table = ltq_ptm_match,
1592 },
1593 };
1594
1595 module_platform_driver(ltq_ptm_driver);
1596
1597 MODULE_LICENSE("GPL");