tools/fakeroot: update to 1.33
[openwrt/openwrt.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_adsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_adsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for Danube/
10 ** Amazon-SE/AR9)
11 ** COPYRIGHT : Copyright (c) 2006
12 ** Infineon Technologies AG
13 ** Am Campeon 1-12, 85579 Neubiberg, Germany
14 **
15 ** This program is free software; you can redistribute it and/or modify
16 ** it under the terms of the GNU General Public License as published by
17 ** the Free Software Foundation; either version 2 of the License, or
18 ** (at your option) any later version.
19 **
20 ** HISTORY
21 ** $Date $Author $Comment
22 ** 07 JUL 2009 Xu Liang Init Version
23 *******************************************************************************/
24
25
26
27 /*
28 * ####################################
29 * Head File
30 * ####################################
31 */
32
33 /*
34 * Common Head File
35 */
36 #include <linux/version.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/errno.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/ioctl.h>
44 #include <linux/etherdevice.h>
45 #include <linux/interrupt.h>
46 #include <linux/netdevice.h>
47 #include <linux/platform_device.h>
48 #include <linux/of_device.h>
49 #include <asm/io.h>
50
51 /*
52 * Chip Specific Head File
53 */
54 #include "ifxmips_ptm_adsl.h"
55
56
57 #include <lantiq_soc.h>
58
59 /*
60 * ####################################
61 * Kernel Version Adaption
62 * ####################################
63 */
64 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
65 #define MODULE_PARM(a, b) module_param(a, int, 0)
66
67
68
69 /*
70 * ####################################
71 * Parameters to Configure PPE
72 * ####################################
73 */
74
75 static int write_desc_delay = 0x20; /* Write descriptor delay */
76
77 static int rx_max_packet_size = ETH_MAX_FRAME_LENGTH;
78 /* Max packet size for RX */
79
80 static int dma_rx_descriptor_length = 24; /* Number of descriptors per DMA RX channel */
81 static int dma_tx_descriptor_length = 24; /* Number of descriptors per DMA TX channel */
82
83 static int eth_efmtc_crc_cfg = 0x03100710; /* default: tx_eth_crc_check: 1, tx_tc_crc_check: 1, tx_tc_crc_len = 16 */
84 /* rx_eth_crc_present: 1, rx_eth_crc_check: 1, rx_tc_crc_check: 1, rx_tc_crc_len = 16 */
85
86 MODULE_PARM(write_desc_delay, "i");
87 MODULE_PARM_DESC(write_desc_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
88
89 MODULE_PARM(rx_max_packet_size, "i");
90 MODULE_PARM_DESC(rx_max_packet_size, "Max packet size in byte for downstream ethernet frames");
91
92 MODULE_PARM(dma_rx_descriptor_length, "i");
93 MODULE_PARM_DESC(dma_rx_descriptor_length, "Number of descriptor assigned to DMA RX channel (>16)");
94 MODULE_PARM(dma_tx_descriptor_length, "i");
95 MODULE_PARM_DESC(dma_tx_descriptor_length, "Number of descriptor assigned to DMA TX channel (>16)");
96
97 MODULE_PARM(eth_efmtc_crc_cfg, "i");
98 MODULE_PARM_DESC(eth_efmtc_crc_cfg, "Configuration for PTM TX/RX ethernet/efm-tc CRC");
99
100
101
102 /*
103 * ####################################
104 * Definition
105 * ####################################
106 */
107
108
109 #define DUMP_SKB_LEN ~0
110
111
112
113 /*
114 * ####################################
115 * Declaration
116 * ####################################
117 */
118
119 /*
120 * Network Operations
121 */
122 static void ptm_setup(struct net_device *, int);
123 static struct net_device_stats *ptm_get_stats(struct net_device *);
124 static int ptm_open(struct net_device *);
125 static int ptm_stop(struct net_device *);
126 static unsigned int ptm_poll(int, unsigned int);
127 static int ptm_napi_poll(struct napi_struct *, int);
128 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
129 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
130 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
131 static void ptm_tx_timeout(struct net_device *);
132 #else
133 static void ptm_tx_timeout(struct net_device *, unsigned int txqueue);
134 #endif
135
136 /*
137 * DSL Data LED
138 */
139 static INLINE void adsl_led_flash(void);
140
141 /*
142 * buffer manage functions
143 */
144 static INLINE struct sk_buff* alloc_skb_rx(void);
145 //static INLINE struct sk_buff* alloc_skb_tx(unsigned int);
146 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int);
147 static INLINE int get_tx_desc(unsigned int, unsigned int *);
148
149 /*
150 * Mailbox handler and signal function
151 */
152 static INLINE int mailbox_rx_irq_handler(unsigned int);
153 static irqreturn_t mailbox_irq_handler(int, void *);
154 static INLINE void mailbox_signal(unsigned int, int);
155 #ifdef CONFIG_IFX_PTM_RX_TASKLET
156 static void do_ptm_tasklet(unsigned long);
157 #endif
158
159 /*
160 * Debug Functions
161 */
162 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
163 static void dump_skb(struct sk_buff *, u32, char *, int, int, int);
164 #else
165 #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
166 #endif
167 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
168 static void skb_swap(struct sk_buff *);
169 #else
170 #define skb_swap(skb) do {} while (0)
171 #endif
172
173 /*
174 * Proc File Functions
175 */
176 static INLINE void proc_file_create(void);
177 static INLINE void proc_file_delete(void);
178 static int proc_read_version(char *, char **, off_t, int, int *, void *);
179 static int proc_read_wanmib(char *, char **, off_t, int, int *, void *);
180 static int proc_write_wanmib(struct file *, const char *, unsigned long, void *);
181 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
182 static int proc_read_genconf(char *, char **, off_t, int, int *, void *);
183 #endif
184 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
185 static int proc_read_dbg(char *, char **, off_t, int, int *, void *);
186 static int proc_write_dbg(struct file *, const char *, unsigned long, void *);
187 #endif
188
189 /*
190 * Proc Help Functions
191 */
192 static INLINE int stricmp(const char *, const char *);
193 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
194 static INLINE int strincmp(const char *, const char *, int);
195 #endif
196 static INLINE int ifx_ptm_version(char *);
197
198 /*
199 * Init & clean-up functions
200 */
201 static INLINE void check_parameters(void);
202 static INLINE int init_priv_data(void);
203 static INLINE void clear_priv_data(void);
204 static INLINE void init_tables(void);
205
206 /*
207 * Exteranl Function
208 */
209 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
210 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
211 #else
212 static inline int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr)
213 {
214 if ( is_showtime != NULL )
215 *is_showtime = 0;
216 return 0;
217 }
218 #endif
219
220 /*
221 * External variable
222 */
223 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
224 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
225 extern int (*ifx_mei_atm_showtime_exit)(void);
226 #else
227 int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL;
228 EXPORT_SYMBOL(ifx_mei_atm_showtime_enter);
229 int (*ifx_mei_atm_showtime_exit)(void) = NULL;
230 EXPORT_SYMBOL(ifx_mei_atm_showtime_exit);
231 #endif
232
233
234
235 /*
236 * ####################################
237 * Local Variable
238 * ####################################
239 */
240
241 static struct ptm_priv_data g_ptm_priv_data;
242
243 static struct net_device_ops g_ptm_netdev_ops = {
244 .ndo_get_stats = ptm_get_stats,
245 .ndo_open = ptm_open,
246 .ndo_stop = ptm_stop,
247 .ndo_start_xmit = ptm_hard_start_xmit,
248 .ndo_validate_addr = eth_validate_addr,
249 .ndo_set_mac_address = eth_mac_addr,
250 .ndo_do_ioctl = ptm_ioctl,
251 .ndo_tx_timeout = ptm_tx_timeout,
252 };
253
254 static struct net_device *g_net_dev[2] = {0};
255 static char *g_net_dev_name[2] = {"dsl0", "dslfast0"};
256
257 #ifdef CONFIG_IFX_PTM_RX_TASKLET
258 static struct tasklet_struct g_ptm_tasklet[] = {
259 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 0},
260 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 1},
261 };
262 #endif
263
264 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
265
266 static struct proc_dir_entry* g_ptm_dir = NULL;
267
268 static int g_showtime = 0;
269
270
271
272 /*
273 * ####################################
274 * Local Function
275 * ####################################
276 */
277
278 static void ptm_setup(struct net_device *dev, int ndev)
279 {
280 u8 addr[ETH_ALEN];
281
282 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
283 netif_carrier_off(dev);
284 #endif
285
286 /* hook network operations */
287 dev->netdev_ops = &g_ptm_netdev_ops;
288 /* Allow up to 1508 bytes, for RFC4638 */
289 dev->max_mtu = ETH_DATA_LEN + 8;
290 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,19,0))
291 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 25);
292 #else
293 netif_napi_add_weight(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 25);
294 #endif
295 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
296
297 addr[0] = 0x00;
298 addr[1] = 0x20;
299 addr[2] = 0xda;
300 addr[3] = 0x86;
301 addr[4] = 0x23;
302 addr[5] = 0x75 + ndev;
303 eth_hw_addr_set(dev, addr);
304 }
305
306 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
307 {
308 int ndev;
309
310 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
311 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
312
313 g_ptm_priv_data.itf[ndev].stats.rx_errors = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu + WAN_MIB_TABLE[ndev].wrx_ethcrc_err_pdu;
314 g_ptm_priv_data.itf[ndev].stats.rx_dropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu + (WAN_MIB_TABLE[ndev].wrx_correct_pdu - g_ptm_priv_data.itf[ndev].stats.rx_packets);
315
316 return &g_ptm_priv_data.itf[ndev].stats;
317 }
318
319 static int ptm_open(struct net_device *dev)
320 {
321 int ndev;
322
323 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
324 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
325
326 napi_enable(&g_ptm_priv_data.itf[ndev].napi);
327
328 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
329
330 netif_start_queue(dev);
331
332 return 0;
333 }
334
335 static int ptm_stop(struct net_device *dev)
336 {
337 int ndev;
338
339 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
340 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
341
342 IFX_REG_W32_MASK((1 << ndev) | (1 << (ndev + 16)), 0, MBOX_IGU1_IER);
343
344 napi_disable(&g_ptm_priv_data.itf[ndev].napi);
345
346 netif_stop_queue(dev);
347
348 return 0;
349 }
350
351 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
352 {
353 unsigned int work_done = 0;
354
355 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
356
357 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes > 0 ) {
358 if ( mailbox_rx_irq_handler(ndev) < 0 )
359 break;
360
361 work_done++;
362 }
363
364 return work_done;
365 }
366 static int ptm_napi_poll(struct napi_struct *napi, int budget)
367 {
368 int ndev;
369 unsigned int work_done;
370
371 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != napi->dev; ndev++ );
372
373 work_done = ptm_poll(ndev, budget);
374
375 // interface down
376 if ( !netif_running(napi->dev) ) {
377 napi_complete(napi);
378 return work_done;
379 }
380
381 // no more traffic
382 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
383 // clear interrupt
384 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_ISRC);
385 // double check
386 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
387 napi_complete(napi);
388 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
389 return work_done;
390 }
391 }
392
393 // next round
394 return work_done;
395 }
396
397 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
398 {
399 int ndev;
400 unsigned int f_full;
401 int desc_base;
402 register struct tx_descriptor reg_desc = {0};
403
404 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
405 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
406
407 if ( !g_showtime ) {
408 err("not in showtime");
409 goto PTM_HARD_START_XMIT_FAIL;
410 }
411
412 /* allocate descriptor */
413 desc_base = get_tx_desc(ndev, &f_full);
414 if ( f_full ) {
415 netif_trans_update(dev);
416 netif_stop_queue(dev);
417
418 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_ISRC);
419 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_IER);
420 }
421 if ( desc_base < 0 )
422 goto PTM_HARD_START_XMIT_FAIL;
423
424 if ( g_ptm_priv_data.itf[ndev].tx_skb[desc_base] != NULL )
425 dev_kfree_skb_any(g_ptm_priv_data.itf[ndev].tx_skb[desc_base]);
426 g_ptm_priv_data.itf[ndev].tx_skb[desc_base] = skb;
427
428 reg_desc.dataptr = (unsigned int)skb->data >> 2;
429 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
430 reg_desc.byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
431 reg_desc.own = 1;
432 reg_desc.c = 1;
433 reg_desc.sop = reg_desc.eop = 1;
434
435 /* write discriptor to memory and write back cache */
436 g_ptm_priv_data.itf[ndev].tx_desc[desc_base] = reg_desc;
437 dma_cache_wback((unsigned long)skb->data, skb->len);
438 wmb();
439
440 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 1);
441
442 if ( (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ) {
443 skb_swap(skb);
444 }
445
446 g_ptm_priv_data.itf[ndev].stats.tx_packets++;
447 g_ptm_priv_data.itf[ndev].stats.tx_bytes += reg_desc.datalen;
448
449 netif_trans_update(dev);
450 mailbox_signal(ndev, 1);
451
452 adsl_led_flash();
453
454 return NETDEV_TX_OK;
455
456 PTM_HARD_START_XMIT_FAIL:
457 dev_kfree_skb_any(skb);
458 g_ptm_priv_data.itf[ndev].stats.tx_dropped++;
459 return NETDEV_TX_OK;
460 }
461
462 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
463 {
464 int ndev;
465
466 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
467 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
468
469 switch ( cmd )
470 {
471 case IFX_PTM_MIB_CW_GET:
472 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = WAN_MIB_TABLE[ndev].wrx_nonidle_cw;
473 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = WAN_MIB_TABLE[ndev].wrx_idle_cw;
474 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = WAN_MIB_TABLE[ndev].wrx_err_cw;
475 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = 0;
476 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = 0;
477 break;
478 case IFX_PTM_MIB_FRAME_GET:
479 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxCorrect = WAN_MIB_TABLE[ndev].wrx_correct_pdu;
480 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TC_CrcError = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu;
481 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxDropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu;
482 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TxSend = WAN_MIB_TABLE[ndev].wtx_total_pdu;
483 break;
484 case IFX_PTM_CFG_GET:
485 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = CFG_ETH_EFMTC_CRC->rx_eth_crc_present;
486 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = CFG_ETH_EFMTC_CRC->rx_eth_crc_check;
487 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = CFG_ETH_EFMTC_CRC->rx_tc_crc_check;
488 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = CFG_ETH_EFMTC_CRC->rx_tc_crc_len;
489 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = CFG_ETH_EFMTC_CRC->tx_eth_crc_gen;
490 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = CFG_ETH_EFMTC_CRC->tx_tc_crc_gen;
491 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = CFG_ETH_EFMTC_CRC->tx_tc_crc_len;
492 break;
493 case IFX_PTM_CFG_SET:
494 CFG_ETH_EFMTC_CRC->rx_eth_crc_present = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent ? 1 : 0;
495 CFG_ETH_EFMTC_CRC->rx_eth_crc_check = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 1 : 0;
496 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck && (((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 32) )
497 {
498 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 1;
499 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen;
500 }
501 else
502 {
503 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 0;
504 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = 0;
505 }
506 CFG_ETH_EFMTC_CRC->tx_eth_crc_gen = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 1 : 0;
507 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen && (((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 32) )
508 {
509 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 1;
510 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen;
511 }
512 else
513 {
514 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 0;
515 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = 0;
516 }
517 break;
518 default:
519 return -EOPNOTSUPP;
520 }
521
522 return 0;
523 }
524
525 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
526 static void ptm_tx_timeout(struct net_device *dev)
527 #else
528 static void ptm_tx_timeout(struct net_device *dev, unsigned int txqueue)
529 #endif
530 {
531 int ndev;
532
533 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
534 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
535
536 /* disable TX irq, release skb when sending new packet */
537 IFX_REG_W32_MASK(1 << (ndev + 16), 0, MBOX_IGU1_IER);
538
539 /* wake up TX queue */
540 netif_wake_queue(dev);
541
542 return;
543 }
544
545 static INLINE void adsl_led_flash(void)
546 {
547 }
548
549 static INLINE struct sk_buff* alloc_skb_rx(void)
550 {
551 struct sk_buff *skb;
552
553 /* allocate memroy including trailer and padding */
554 skb = dev_alloc_skb(rx_max_packet_size + RX_HEAD_MAC_ADDR_ALIGNMENT + DATA_BUFFER_ALIGNMENT);
555 if ( skb != NULL ) {
556 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
557 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
558 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
559 /* pub skb in reserved area "skb->data - 4" */
560 *((struct sk_buff **)skb->data - 1) = skb;
561 wmb();
562 /* write back and invalidate cache */
563 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
564 /* invalidate cache */
565 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
566 }
567
568 return skb;
569 }
570
571 #if 0
572 static INLINE struct sk_buff* alloc_skb_tx(unsigned int size)
573 {
574 struct sk_buff *skb;
575
576 /* allocate memory including padding */
577 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
578 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
579 /* must be burst length alignment */
580 if ( skb != NULL )
581 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
582 return skb;
583 }
584 #endif
585
586 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int dataptr)
587 {
588 unsigned int skb_dataptr;
589 struct sk_buff *skb;
590
591 skb_dataptr = ((dataptr - 1) << 2) | KSEG1;
592 skb = *(struct sk_buff **)skb_dataptr;
593
594 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
595 ASSERT(((unsigned int)skb->data | KSEG1) == ((dataptr << 2) | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
596
597 return skb;
598 }
599
600 static INLINE int get_tx_desc(unsigned int itf, unsigned int *f_full)
601 {
602 int desc_base = -1;
603 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[itf];
604
605 // assume TX is serial operation
606 // no protection provided
607
608 *f_full = 1;
609
610 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 ) {
611 desc_base = p_itf->tx_desc_pos;
612 if ( ++(p_itf->tx_desc_pos) == dma_tx_descriptor_length )
613 p_itf->tx_desc_pos = 0;
614 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 )
615 *f_full = 0;
616 }
617
618 return desc_base;
619 }
620
621 static INLINE int mailbox_rx_irq_handler(unsigned int ch) // return: < 0 - descriptor not available, 0 - received one packet
622 {
623 unsigned int ndev = ch;
624 struct sk_buff *skb;
625 struct sk_buff *new_skb;
626 volatile struct rx_descriptor *desc;
627 struct rx_descriptor reg_desc;
628 int netif_rx_ret;
629
630 desc = &g_ptm_priv_data.itf[ndev].rx_desc[g_ptm_priv_data.itf[ndev].rx_desc_pos];
631 if ( desc->own || !desc->c ) // if PP32 hold descriptor or descriptor not completed
632 return -EAGAIN;
633 if ( ++g_ptm_priv_data.itf[ndev].rx_desc_pos == dma_rx_descriptor_length )
634 g_ptm_priv_data.itf[ndev].rx_desc_pos = 0;
635
636 reg_desc = *desc;
637 skb = get_skb_rx_pointer(reg_desc.dataptr);
638
639 if ( !reg_desc.err ) {
640 new_skb = alloc_skb_rx();
641 if ( new_skb != NULL ) {
642 skb_reserve(skb, reg_desc.byteoff);
643 skb_put(skb, reg_desc.datalen);
644
645 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 0);
646
647 // parse protocol header
648 skb->dev = g_net_dev[ndev];
649 skb->protocol = eth_type_trans(skb, skb->dev);
650
651 netif_rx_ret = netif_receive_skb(skb);
652
653 if ( netif_rx_ret != NET_RX_DROP ) {
654 g_ptm_priv_data.itf[ndev].stats.rx_packets++;
655 g_ptm_priv_data.itf[ndev].stats.rx_bytes += reg_desc.datalen;
656 }
657
658 reg_desc.dataptr = ((unsigned int)new_skb->data >> 2) & 0x0FFFFFFF;
659 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
660 }
661 }
662 else
663 reg_desc.err = 0;
664
665 reg_desc.datalen = rx_max_packet_size;
666 reg_desc.own = 1;
667 reg_desc.c = 0;
668
669 // update descriptor
670 *desc = reg_desc;
671 wmb();
672
673 mailbox_signal(ndev, 0);
674
675 adsl_led_flash();
676
677 return 0;
678 }
679
680 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
681 {
682 unsigned int isr;
683 int i;
684
685 isr = IFX_REG_R32(MBOX_IGU1_ISR);
686 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
687 isr &= IFX_REG_R32(MBOX_IGU1_IER);
688
689 while ( (i = __fls(isr)) >= 0 ) {
690 isr ^= 1 << i;
691
692 if ( i >= 16 ) {
693 // TX
694 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
695 i -= 16;
696 if ( i < MAX_ITF_NUMBER )
697 netif_wake_queue(g_net_dev[i]);
698 }
699 else {
700 // RX
701 #ifdef CONFIG_IFX_PTM_RX_INTERRUPT
702 while ( WRX_DMA_CHANNEL_CONFIG(i)->vlddes > 0 )
703 mailbox_rx_irq_handler(i);
704 #else
705 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
706 napi_schedule(&g_ptm_priv_data.itf[i].napi);
707 #endif
708 }
709 }
710
711 return IRQ_HANDLED;
712 }
713
714 static INLINE void mailbox_signal(unsigned int itf, int is_tx)
715 {
716 int count = 1000;
717
718 if ( is_tx ) {
719 while ( MBOX_IGU3_ISR_ISR(itf + 16) && count > 0 )
720 count--;
721 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf + 16), MBOX_IGU3_ISRS);
722 }
723 else {
724 while ( MBOX_IGU3_ISR_ISR(itf) && count > 0 )
725 count--;
726 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf), MBOX_IGU3_ISRS);
727 }
728
729 ASSERT(count != 0, "MBOX_IGU3_ISR = 0x%08x", IFX_REG_R32(MBOX_IGU3_ISR));
730 }
731
732 #ifdef CONFIG_IFX_PTM_RX_TASKLET
733 static void do_ptm_tasklet(unsigned long arg)
734 {
735 unsigned int work_to_do = 25;
736 unsigned int work_done = 0;
737
738 ASSERT(arg >= 0 && arg < ARRAY_SIZE(g_net_dev), "arg = %lu (wrong value)", arg);
739
740 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(arg)->vlddes > 0 ) {
741 if ( mailbox_rx_irq_handler(arg) < 0 )
742 break;
743
744 work_done++;
745 }
746
747 // interface down
748 if ( !netif_running(g_net_dev[arg]) )
749 return;
750
751 // no more traffic
752 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
753 // clear interrupt
754 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_ISRC);
755 // double check
756 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
757 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_IER);
758 return;
759 }
760 }
761
762 // next round
763 tasklet_schedule(&g_ptm_tasklet[arg]);
764 }
765 #endif
766
767 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
768 static void dump_skb(struct sk_buff *skb, u32 len, char *title, int port, int ch, int is_tx)
769 {
770 int i;
771
772 if ( !(ifx_ptm_dbg_enable & (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : DBG_ENABLE_MASK_DUMP_SKB_RX)) )
773 return;
774
775 if ( skb->len < len )
776 len = skb->len;
777
778 if ( len > rx_max_packet_size ) {
779 printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32)skb, (u32)skb->data, skb->len);
780 return;
781 }
782
783 if ( ch >= 0 )
784 printk("%s (port %d, ch %d)\n", title, port, ch);
785 else
786 printk("%s\n", title);
787 printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len);
788 for ( i = 1; i <= len; i++ ) {
789 if ( i % 16 == 1 )
790 printk(" %4d:", i - 1);
791 printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF));
792 if ( i % 16 == 0 )
793 printk("\n");
794 }
795 if ( (i - 1) % 16 != 0 )
796 printk("\n");
797 }
798 #endif
799
800 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
801 static void skb_swap(struct sk_buff *skb)
802 {
803 unsigned char tmp[8];
804 unsigned char *p = skb->data;
805
806 if ( !(p[0] & 0x01) ) { // bypass broadcast/multicast
807 // swap MAC
808 memcpy(tmp, p, 6);
809 memcpy(p, p + 6, 6);
810 memcpy(p + 6, tmp, 6);
811 p += 12;
812
813 // bypass VLAN
814 while ( p[0] == 0x81 && p[1] == 0x00 )
815 p += 4;
816
817 // IP
818 if ( p[0] == 0x08 && p[1] == 0x00 ) {
819 p += 14;
820 memcpy(tmp, p, 4);
821 memcpy(p, p + 4, 4);
822 memcpy(p + 4, tmp, 4);
823 p += 8;
824 }
825
826 dma_cache_wback((unsigned long)skb->data, (unsigned long)p - (unsigned long)skb->data);
827 }
828 }
829 #endif
830
831 static INLINE void proc_file_create(void)
832 {
833 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
834 struct proc_dir_entry *res;
835
836 g_ptm_dir = proc_mkdir("driver/ifx_ptm", NULL);
837
838 create_proc_read_entry("version",
839 0,
840 g_ptm_dir,
841 proc_read_version,
842 NULL);
843
844 res = create_proc_entry("wanmib",
845 0,
846 g_ptm_dir);
847 if ( res != NULL ) {
848 res->read_proc = proc_read_wanmib;
849 res->write_proc = proc_write_wanmib;
850 }
851
852 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
853 create_proc_read_entry("genconf",
854 0,
855 g_ptm_dir,
856 proc_read_genconf,
857 NULL);
858
859 #ifdef CONFIG_AR9
860 create_proc_read_entry("regs",
861 0,
862 g_ptm_dir,
863 ifx_ptm_proc_read_regs,
864 NULL);
865 #endif
866 #endif
867
868 res = create_proc_entry("dbg",
869 0,
870 g_ptm_dir);
871 if ( res != NULL ) {
872 res->read_proc = proc_read_dbg;
873 res->write_proc = proc_write_dbg;
874 }
875 #endif
876 }
877
878 static INLINE void proc_file_delete(void)
879 {
880 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
881 remove_proc_entry("dbg", g_ptm_dir);
882 #endif
883
884 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
885 #ifdef CONFIG_AR9
886 remove_proc_entry("regs", g_ptm_dir);
887 #endif
888
889 remove_proc_entry("genconf", g_ptm_dir);
890 #endif
891
892 remove_proc_entry("wanmib", g_ptm_dir);
893
894 remove_proc_entry("version", g_ptm_dir);
895
896 remove_proc_entry("driver/ifx_ptm", NULL);
897 }
898
899 static int proc_read_version(char *buf, char **start, off_t offset, int count, int *eof, void *data)
900 {
901 int len = 0;
902
903 len += ifx_ptm_version(buf + len);
904
905 if ( offset >= len ) {
906 *start = buf;
907 *eof = 1;
908 return 0;
909 }
910 *start = buf + offset;
911 if ( (len -= offset) > count )
912 return count;
913 *eof = 1;
914 return len;
915 }
916
917 static int proc_read_wanmib(char *page, char **start, off_t off, int count, int *eof, void *data)
918 {
919 int len = 0;
920 int i;
921 char *title[] = {
922 "dsl0\n",
923 "dslfast0\n"
924 };
925
926 for ( i = 0; i < ARRAY_SIZE(title); i++ ) {
927 len += sprintf(page + off + len, title[i]);
928 len += sprintf(page + off + len, " wrx_correct_pdu = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu);
929 len += sprintf(page + off + len, " wrx_correct_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu_bytes);
930 len += sprintf(page + off + len, " wrx_tccrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu);
931 len += sprintf(page + off + len, " wrx_tccrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu_bytes);
932 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu);
933 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu_bytes);
934 len += sprintf(page + off + len, " wrx_nodesc_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_nodesc_drop_pdu);
935 len += sprintf(page + off + len, " wrx_len_violation_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_len_violation_drop_pdu);
936 len += sprintf(page + off + len, " wrx_idle_bytes = %d\n", WAN_MIB_TABLE[i].wrx_idle_bytes);
937 len += sprintf(page + off + len, " wrx_nonidle_cw = %d\n", WAN_MIB_TABLE[i].wrx_nonidle_cw);
938 len += sprintf(page + off + len, " wrx_idle_cw = %d\n", WAN_MIB_TABLE[i].wrx_idle_cw);
939 len += sprintf(page + off + len, " wrx_err_cw = %d\n", WAN_MIB_TABLE[i].wrx_err_cw);
940 len += sprintf(page + off + len, " wtx_total_pdu = %d\n", WAN_MIB_TABLE[i].wtx_total_pdu);
941 len += sprintf(page + off + len, " wtx_total_bytes = %d\n", WAN_MIB_TABLE[i].wtx_total_bytes);
942 }
943
944 *eof = 1;
945
946 return len;
947 }
948
949 static int proc_write_wanmib(struct file *file, const char *buf, unsigned long count, void *data)
950 {
951 char str[2048];
952 char *p;
953 int len, rlen;
954
955 int i;
956
957 len = count < sizeof(str) ? count : sizeof(str) - 1;
958 rlen = len - copy_from_user(str, buf, len);
959 while ( rlen && str[rlen - 1] <= ' ' )
960 rlen--;
961 str[rlen] = 0;
962 for ( p = str; *p && *p <= ' '; p++, rlen-- );
963 if ( !*p )
964 return count;
965
966 if ( stricmp(p, "clear") == 0 || stricmp(p, "clean") == 0 ) {
967 for ( i = 0; i < 2; i++ )
968 memset((void*)&WAN_MIB_TABLE[i], 0, sizeof(WAN_MIB_TABLE[i]));
969 }
970
971 return count;
972 }
973
974 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
975
976 static int proc_read_genconf(char *page, char **start, off_t off, int count, int *eof, void *data)
977 {
978 int len = 0;
979 int len_max = off + count;
980 char *pstr;
981 char str[2048];
982 int llen = 0;
983 int i;
984 unsigned long bit;
985
986 pstr = *start = page;
987
988 __sync();
989
990 llen += sprintf(str + llen, "CFG_WAN_WRDES_DELAY (0x%08X): %d\n", (unsigned int)CFG_WAN_WRDES_DELAY, IFX_REG_R32(CFG_WAN_WRDES_DELAY));
991 llen += sprintf(str + llen, "CFG_WRX_DMACH_ON (0x%08X):", (unsigned int)CFG_WRX_DMACH_ON);
992 for ( i = 0, bit = 1; i < MAX_RX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
993 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WRX_DMACH_ON) & bit) ? "on " : "off");
994 llen += sprintf(str + llen, "\n");
995 llen += sprintf(str + llen, "CFG_WTX_DMACH_ON (0x%08X):", (unsigned int)CFG_WTX_DMACH_ON);
996 for ( i = 0, bit = 1; i < MAX_TX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
997 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WTX_DMACH_ON) & bit) ? "on " : "off");
998 llen += sprintf(str + llen, "\n");
999 llen += sprintf(str + llen, "CFG_WRX_LOOK_BITTH (0x%08X): %d\n", (unsigned int)CFG_WRX_LOOK_BITTH, IFX_REG_R32(CFG_WRX_LOOK_BITTH));
1000 llen += sprintf(str + llen, "CFG_ETH_EFMTC_CRC (0x%08X): rx_tc_crc_len - %2d, rx_tc_crc_check - %s\n", (unsigned int)CFG_ETH_EFMTC_CRC, CFG_ETH_EFMTC_CRC->rx_tc_crc_len, CFG_ETH_EFMTC_CRC->rx_tc_crc_check ? " on" : "off");
1001 llen += sprintf(str + llen, " rx_eth_crc_check - %s, rx_eth_crc_present - %s\n", CFG_ETH_EFMTC_CRC->rx_eth_crc_check ? " on" : "off", CFG_ETH_EFMTC_CRC->rx_eth_crc_present ? " on" : "off");
1002 llen += sprintf(str + llen, " tx_tc_crc_len - %2d, tx_tc_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_tc_crc_len, CFG_ETH_EFMTC_CRC->tx_tc_crc_gen ? " on" : "off");
1003 llen += sprintf(str + llen, " tx_eth_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_eth_crc_gen ? " on" : "off");
1004
1005 llen += sprintf(str + llen, "RX Port:\n");
1006 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
1007 llen += sprintf(str + llen, " %d (0x%08X). mfs - %5d, dmach - %d, local_state - %d, partner_state - %d\n", i, (unsigned int)WRX_PORT_CONFIG(i), WRX_PORT_CONFIG(i)->mfs, WRX_PORT_CONFIG(i)->dmach, WRX_PORT_CONFIG(i)->local_state, WRX_PORT_CONFIG(i)->partner_state);
1008 llen += sprintf(str + llen, "RX DMA Channel:\n");
1009 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
1010 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WRX_DMA_CHANNEL_CONFIG(i), WRX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WRX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WRX_DMA_CHANNEL_CONFIG(i)->deslen, WRX_DMA_CHANNEL_CONFIG(i)->vlddes);
1011
1012 llen += sprintf(str + llen, "TX Port:\n");
1013 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1014 llen += sprintf(str + llen, " %d (0x%08X). tx_cwth2 - %d, tx_cwth1 - %d\n", i, (unsigned int)WTX_PORT_CONFIG(i), WTX_PORT_CONFIG(i)->tx_cwth2, WTX_PORT_CONFIG(i)->tx_cwth1);
1015 llen += sprintf(str + llen, "TX DMA Channel:\n");
1016 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1017 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WTX_DMA_CHANNEL_CONFIG(i), WTX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WTX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WTX_DMA_CHANNEL_CONFIG(i)->deslen, WTX_DMA_CHANNEL_CONFIG(i)->vlddes);
1018
1019 if ( len <= off && len + llen > off )
1020 {
1021 memcpy(pstr, str + off - len, len + llen - off);
1022 pstr += len + llen - off;
1023 }
1024 else if ( len > off )
1025 {
1026 memcpy(pstr, str, llen);
1027 pstr += llen;
1028 }
1029 len += llen;
1030 if ( len >= len_max )
1031 goto PROC_READ_GENCONF_OVERRUN_END;
1032
1033 *eof = 1;
1034
1035 return len - off;
1036
1037 PROC_READ_GENCONF_OVERRUN_END:
1038 return len - llen - off;
1039 }
1040
1041 #endif // defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1042
1043 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1044
1045 static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data)
1046 {
1047 int len = 0;
1048
1049 len += sprintf(page + off + len, "error print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ERR) ? "enabled" : "disabled");
1050 len += sprintf(page + off + len, "debug print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ? "enabled" : "disabled");
1051 len += sprintf(page + off + len, "assert - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ASSERT) ? "enabled" : "disabled");
1052 len += sprintf(page + off + len, "dump rx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_RX) ? "enabled" : "disabled");
1053 len += sprintf(page + off + len, "dump tx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_TX) ? "enabled" : "disabled");
1054 len += sprintf(page + off + len, "mac swap - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ? "enabled" : "disabled");
1055
1056 *eof = 1;
1057
1058 return len;
1059 }
1060
1061 static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data)
1062 {
1063 static const char *dbg_enable_mask_str[] = {
1064 " error print",
1065 " err",
1066 " debug print",
1067 " dbg",
1068 " assert",
1069 " assert",
1070 " dump rx skb",
1071 " rx",
1072 " dump tx skb",
1073 " tx",
1074 " dump init",
1075 " init",
1076 " dump qos",
1077 " qos",
1078 " mac swap",
1079 " swap",
1080 " all"
1081 };
1082 static const int dbg_enable_mask_str_len[] = {
1083 12, 4,
1084 12, 4,
1085 7, 7,
1086 12, 3,
1087 12, 3,
1088 10, 5,
1089 9, 4,
1090 9, 5,
1091 4
1092 };
1093 unsigned int dbg_enable_mask[] = {
1094 DBG_ENABLE_MASK_ERR,
1095 DBG_ENABLE_MASK_DEBUG_PRINT,
1096 DBG_ENABLE_MASK_ASSERT,
1097 DBG_ENABLE_MASK_DUMP_SKB_RX,
1098 DBG_ENABLE_MASK_DUMP_SKB_TX,
1099 DBG_ENABLE_MASK_DUMP_INIT,
1100 DBG_ENABLE_MASK_DUMP_QOS,
1101 DBG_ENABLE_MASK_MAC_SWAP,
1102 DBG_ENABLE_MASK_ALL
1103 };
1104
1105 char str[2048];
1106 char *p;
1107
1108 int len, rlen;
1109
1110 int f_enable = 0;
1111 int i;
1112
1113 len = count < sizeof(str) ? count : sizeof(str) - 1;
1114 rlen = len - copy_from_user(str, buf, len);
1115 while ( rlen && str[rlen - 1] <= ' ' )
1116 rlen--;
1117 str[rlen] = 0;
1118 for ( p = str; *p && *p <= ' '; p++, rlen-- );
1119 if ( !*p )
1120 return 0;
1121
1122 // debugging feature for enter/leave showtime
1123 if ( strincmp(p, "enter", 5) == 0 && ifx_mei_atm_showtime_enter != NULL )
1124 ifx_mei_atm_showtime_enter(NULL, NULL);
1125 else if ( strincmp(p, "leave", 5) == 0 && ifx_mei_atm_showtime_exit != NULL )
1126 ifx_mei_atm_showtime_exit();
1127
1128 if ( strincmp(p, "enable", 6) == 0 ) {
1129 p += 6;
1130 f_enable = 1;
1131 }
1132 else if ( strincmp(p, "disable", 7) == 0 ) {
1133 p += 7;
1134 f_enable = -1;
1135 }
1136 else if ( strincmp(p, "help", 4) == 0 || *p == '?' ) {
1137 printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/qos/swap/all] > /proc/driver/ifx_ptm/dbg\n");
1138 }
1139
1140 if ( f_enable ) {
1141 if ( *p == 0 ) {
1142 if ( f_enable > 0 )
1143 ifx_ptm_dbg_enable |= DBG_ENABLE_MASK_ALL & ~DBG_ENABLE_MASK_MAC_SWAP;
1144 else
1145 ifx_ptm_dbg_enable &= ~DBG_ENABLE_MASK_ALL | DBG_ENABLE_MASK_MAC_SWAP;
1146 }
1147 else {
1148 do {
1149 for ( i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++ )
1150 if ( strincmp(p, dbg_enable_mask_str[i], dbg_enable_mask_str_len[i]) == 0 ) {
1151 if ( f_enable > 0 )
1152 ifx_ptm_dbg_enable |= dbg_enable_mask[i >> 1];
1153 else
1154 ifx_ptm_dbg_enable &= ~dbg_enable_mask[i >> 1];
1155 p += dbg_enable_mask_str_len[i];
1156 break;
1157 }
1158 } while ( i < ARRAY_SIZE(dbg_enable_mask_str) );
1159 }
1160 }
1161
1162 return count;
1163 }
1164
1165 #endif // defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1166
1167 static INLINE int stricmp(const char *p1, const char *p2)
1168 {
1169 int c1, c2;
1170
1171 while ( *p1 && *p2 )
1172 {
1173 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1174 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1175 if ( (c1 -= c2) )
1176 return c1;
1177 p1++;
1178 p2++;
1179 }
1180
1181 return *p1 - *p2;
1182 }
1183
1184 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1185 static INLINE int strincmp(const char *p1, const char *p2, int n)
1186 {
1187 int c1 = 0, c2;
1188
1189 while ( n && *p1 && *p2 )
1190 {
1191 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1192 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1193 if ( (c1 -= c2) )
1194 return c1;
1195 p1++;
1196 p2++;
1197 n--;
1198 }
1199
1200 return n ? *p1 - *p2 : c1;
1201 }
1202 #endif
1203
1204 static INLINE int ifx_ptm_version(char *buf)
1205 {
1206 int len = 0;
1207 unsigned int major, minor;
1208
1209 ifx_ptm_get_fw_ver(&major, &minor);
1210
1211 len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
1212 len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
1213
1214 return len;
1215 }
1216
1217 static INLINE void check_parameters(void)
1218 {
1219 /* There is a delay between PPE write descriptor and descriptor is */
1220 /* really stored in memory. Host also has this delay when writing */
1221 /* descriptor. So PPE will use this value to determine if the write */
1222 /* operation makes effect. */
1223 if ( write_desc_delay < 0 )
1224 write_desc_delay = 0;
1225
1226 /* Because of the limitation of length field in descriptors, the packet */
1227 /* size could not be larger than 64K minus overhead size. */
1228 if ( rx_max_packet_size < ETH_MIN_FRAME_LENGTH )
1229 rx_max_packet_size = ETH_MIN_FRAME_LENGTH;
1230 else if ( rx_max_packet_size > 65536 - 1 )
1231 rx_max_packet_size = 65536 - 1;
1232
1233 if ( dma_rx_descriptor_length < 2 )
1234 dma_rx_descriptor_length = 2;
1235 if ( dma_tx_descriptor_length < 2 )
1236 dma_tx_descriptor_length = 2;
1237 }
1238
1239 static INLINE int init_priv_data(void)
1240 {
1241 void *p;
1242 int i;
1243 struct rx_descriptor rx_desc = {0};
1244 struct sk_buff *skb;
1245 volatile struct rx_descriptor *p_rx_desc;
1246 volatile struct tx_descriptor *p_tx_desc;
1247 struct sk_buff **ppskb;
1248
1249 // clear ptm private data structure
1250 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
1251
1252 // allocate memory for RX descriptors
1253 p = kzalloc(MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1254 if ( p == NULL )
1255 return -1;
1256 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT);
1257 g_ptm_priv_data.rx_desc_base = p;
1258 //p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1259
1260 // allocate memory for TX descriptors
1261 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1262 if ( p == NULL )
1263 return -1;
1264 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT);
1265 g_ptm_priv_data.tx_desc_base = p;
1266
1267 // allocate memroy for TX skb pointers
1268 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4, GFP_KERNEL);
1269 if ( p == NULL )
1270 return -1;
1271 dma_cache_wback_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4);
1272 g_ptm_priv_data.tx_skb_base = p;
1273
1274 p_rx_desc = (volatile struct rx_descriptor *)((((unsigned int)g_ptm_priv_data.rx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1275 p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_ptm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1276 ppskb = (struct sk_buff **)(((unsigned int)g_ptm_priv_data.tx_skb_base + 3) & ~3);
1277 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1278 g_ptm_priv_data.itf[i].rx_desc = &p_rx_desc[i * dma_rx_descriptor_length];
1279 g_ptm_priv_data.itf[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length];
1280 g_ptm_priv_data.itf[i].tx_skb = &ppskb[i * dma_tx_descriptor_length];
1281 }
1282
1283 rx_desc.own = 1;
1284 rx_desc.c = 0;
1285 rx_desc.sop = 1;
1286 rx_desc.eop = 1;
1287 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
1288 rx_desc.id = 0;
1289 rx_desc.err = 0;
1290 rx_desc.datalen = rx_max_packet_size;
1291 for ( i = 0; i < MAX_ITF_NUMBER * dma_rx_descriptor_length; i++ ) {
1292 skb = alloc_skb_rx();
1293 if ( skb == NULL )
1294 return -1;
1295 rx_desc.dataptr = ((unsigned int)skb->data >> 2) & 0x0FFFFFFF;
1296 p_rx_desc[i] = rx_desc;
1297 }
1298
1299 return 0;
1300 }
1301
1302 static INLINE void clear_priv_data(void)
1303 {
1304 int i, j;
1305 struct sk_buff *skb;
1306
1307 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1308 if ( g_ptm_priv_data.itf[i].tx_skb != NULL ) {
1309 for ( j = 0; j < dma_tx_descriptor_length; j++ )
1310 if ( g_ptm_priv_data.itf[i].tx_skb[j] != NULL )
1311 dev_kfree_skb_any(g_ptm_priv_data.itf[i].tx_skb[j]);
1312 }
1313 if ( g_ptm_priv_data.itf[i].rx_desc != NULL ) {
1314 for ( j = 0; j < dma_rx_descriptor_length; j++ ) {
1315 if ( g_ptm_priv_data.itf[i].rx_desc[j].sop || g_ptm_priv_data.itf[i].rx_desc[j].eop ) { // descriptor initialized
1316 skb = get_skb_rx_pointer(g_ptm_priv_data.itf[i].rx_desc[j].dataptr);
1317 dev_kfree_skb_any(skb);
1318 }
1319 }
1320 }
1321 }
1322
1323 if ( g_ptm_priv_data.rx_desc_base != NULL )
1324 kfree(g_ptm_priv_data.rx_desc_base);
1325
1326 if ( g_ptm_priv_data.tx_desc_base != NULL )
1327 kfree(g_ptm_priv_data.tx_desc_base);
1328
1329 if ( g_ptm_priv_data.tx_skb_base != NULL )
1330 kfree(g_ptm_priv_data.tx_skb_base);
1331 }
1332
1333 static INLINE void init_tables(void)
1334 {
1335 int i;
1336 volatile unsigned int *p;
1337 struct wrx_dma_channel_config rx_config = {0};
1338 struct wtx_dma_channel_config tx_config = {0};
1339 struct wrx_port_cfg_status rx_port_cfg = { 0 };
1340 struct wtx_port_cfg tx_port_cfg = { 0 };
1341
1342 /*
1343 * CDM Block 1
1344 */
1345 IFX_REG_W32(CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00), CDM_CFG); // CDM block 1 must be data memory and mapped to 0x5000 (dword addr)
1346 p = CDM_DATA_MEMORY(0, 0); // Clear CDM block 1
1347 for ( i = 0; i < CDM_DATA_MEMORY_DWLEN; i++, p++ )
1348 IFX_REG_W32(0, p);
1349
1350 /*
1351 * General Registers
1352 */
1353 IFX_REG_W32(write_desc_delay, CFG_WAN_WRDES_DELAY);
1354 IFX_REG_W32((1 << MAX_RX_DMA_CHANNEL_NUMBER) - 1, CFG_WRX_DMACH_ON);
1355 IFX_REG_W32((1 << MAX_TX_DMA_CHANNEL_NUMBER) - 1, CFG_WTX_DMACH_ON);
1356
1357 IFX_REG_W32(8, CFG_WRX_LOOK_BITTH); // WAN RX EFM-TC Looking Threshold
1358
1359 IFX_REG_W32(eth_efmtc_crc_cfg, CFG_ETH_EFMTC_CRC);
1360
1361 /*
1362 * WRX DMA Channel Configuration Table
1363 */
1364 rx_config.deslen = dma_rx_descriptor_length;
1365 rx_port_cfg.mfs = ETH_MAX_FRAME_LENGTH;
1366 rx_port_cfg.local_state = 0; // looking for sync
1367 rx_port_cfg.partner_state = 0; // parter receiver is out of sync
1368
1369 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ ) {
1370 rx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].rx_desc >> 2) & 0x0FFFFFFF;
1371 *WRX_DMA_CHANNEL_CONFIG(i) = rx_config;
1372
1373 rx_port_cfg.dmach = i;
1374 *WRX_PORT_CONFIG(i) = rx_port_cfg;
1375 }
1376
1377 /*
1378 * WTX DMA Channel Configuration Table
1379 */
1380 tx_config.deslen = dma_tx_descriptor_length;
1381 tx_port_cfg.tx_cwth1 = 5;
1382 tx_port_cfg.tx_cwth2 = 4;
1383
1384 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ ) {
1385 tx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].tx_desc >> 2) & 0x0FFFFFFF;
1386 *WTX_DMA_CHANNEL_CONFIG(i) = tx_config;
1387
1388 *WTX_PORT_CONFIG(i) = tx_port_cfg;
1389 }
1390 }
1391
1392
1393
1394 /*
1395 * ####################################
1396 * Global Function
1397 * ####################################
1398 */
1399
1400 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
1401 {
1402 int i;
1403
1404 g_showtime = 1;
1405
1406 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1407 netif_carrier_on(g_net_dev[i]);
1408
1409 printk("enter showtime\n");
1410
1411 return 0;
1412 }
1413
1414 static int ptm_showtime_exit(void)
1415 {
1416 int i;
1417
1418 if ( !g_showtime )
1419 return -1;
1420
1421 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1422 netif_carrier_off(g_net_dev[i]);
1423
1424 g_showtime = 0;
1425
1426 printk("leave showtime\n");
1427
1428 return 0;
1429 }
1430
1431
1432 static const struct of_device_id ltq_ptm_match[] = {
1433 #ifdef CONFIG_DANUBE
1434 { .compatible = "lantiq,ppe-danube", .data = NULL },
1435 #elif defined CONFIG_AMAZON_SE
1436 { .compatible = "lantiq,ppe-ase", .data = NULL },
1437 #elif defined CONFIG_AR9
1438 { .compatible = "lantiq,ppe-arx100", .data = NULL },
1439 #elif defined CONFIG_VR9
1440 { .compatible = "lantiq,ppe-xrx200", .data = NULL },
1441 #endif
1442 {},
1443 };
1444 MODULE_DEVICE_TABLE(of, ltq_ptm_match);
1445
1446 /*
1447 * ####################################
1448 * Init/Cleanup API
1449 * ####################################
1450 */
1451
1452 /*
1453 * Description:
1454 * Initialize global variables, PP32, comunication structures, register IRQ
1455 * and register device.
1456 * Input:
1457 * none
1458 * Output:
1459 * 0 --- successful
1460 * else --- failure, usually it is negative value of error code
1461 */
1462 static int ltq_ptm_probe(struct platform_device *pdev)
1463 {
1464 int ret;
1465 struct port_cell_info port_cell = {0};
1466 void *xdata_addr = NULL;
1467 int i;
1468 char ver_str[256];
1469
1470 check_parameters();
1471
1472 ret = init_priv_data();
1473 if ( ret != 0 ) {
1474 err("INIT_PRIV_DATA_FAIL");
1475 goto INIT_PRIV_DATA_FAIL;
1476 }
1477
1478 ifx_ptm_init_chip(pdev);
1479 init_tables();
1480
1481 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1482 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
1483 if ( g_net_dev[i] == NULL )
1484 goto ALLOC_NETDEV_FAIL;
1485 ptm_setup(g_net_dev[i], i);
1486 }
1487
1488 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1489 ret = register_netdev(g_net_dev[i]);
1490 if ( ret != 0 )
1491 goto REGISTER_NETDEV_FAIL;
1492 }
1493
1494 g_ptm_priv_data.irq = platform_get_irq(pdev, 0);
1495 if (g_ptm_priv_data.irq < 0) {
1496 err("platform_get_irq fail");
1497 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1498 }
1499
1500 /* register interrupt handler */
1501 ret = request_irq(g_ptm_priv_data.irq, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
1502 if ( ret ) {
1503 if ( ret == -EBUSY ) {
1504 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1505 }
1506 else {
1507 err("request_irq fail");
1508 }
1509 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1510 }
1511 disable_irq(g_ptm_priv_data.irq);
1512
1513 ret = ifx_pp32_start(0);
1514 if ( ret ) {
1515 err("ifx_pp32_start fail!");
1516 goto PP32_START_FAIL;
1517 }
1518 IFX_REG_W32(0, MBOX_IGU1_IER);
1519 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1520
1521 enable_irq(g_ptm_priv_data.irq);
1522
1523
1524 proc_file_create();
1525
1526 port_cell.port_num = 1;
1527 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &xdata_addr);
1528 if ( g_showtime ) {
1529 ptm_showtime_enter(&port_cell, &xdata_addr);
1530 }
1531
1532 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1533 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1534
1535 ifx_ptm_version(ver_str);
1536 printk(KERN_INFO "%s", ver_str);
1537
1538 printk("ifxmips_ptm: PTM init succeed\n");
1539
1540 return 0;
1541
1542 PP32_START_FAIL:
1543 free_irq(g_ptm_priv_data.irq, &g_ptm_priv_data);
1544 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1545 i = ARRAY_SIZE(g_net_dev);
1546 REGISTER_NETDEV_FAIL:
1547 while ( i-- )
1548 unregister_netdev(g_net_dev[i]);
1549 i = ARRAY_SIZE(g_net_dev);
1550 ALLOC_NETDEV_FAIL:
1551 while ( i-- ) {
1552 free_netdev(g_net_dev[i]);
1553 g_net_dev[i] = NULL;
1554 }
1555 INIT_PRIV_DATA_FAIL:
1556 clear_priv_data();
1557 printk("ifxmips_ptm: PTM init failed\n");
1558 return ret;
1559 }
1560
1561 /*
1562 * Description:
1563 * Release memory, free IRQ, and deregister device.
1564 * Input:
1565 * none
1566 * Output:
1567 * none
1568 */
1569 static int ltq_ptm_remove(struct platform_device *pdev)
1570 {
1571 int i;
1572
1573 ifx_mei_atm_showtime_enter = NULL;
1574 ifx_mei_atm_showtime_exit = NULL;
1575
1576 proc_file_delete();
1577
1578
1579 ifx_pp32_stop(0);
1580
1581 free_irq(g_ptm_priv_data.irq, &g_ptm_priv_data);
1582
1583 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1584 unregister_netdev(g_net_dev[i]);
1585
1586 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1587 free_netdev(g_net_dev[i]);
1588 g_net_dev[i] = NULL;
1589 }
1590
1591 ifx_ptm_uninit_chip();
1592
1593 clear_priv_data();
1594
1595 return 0;
1596 }
1597
1598 static struct platform_driver ltq_ptm_driver = {
1599 .probe = ltq_ptm_probe,
1600 .remove = ltq_ptm_remove,
1601 .driver = {
1602 .name = "ptm",
1603 .owner = THIS_MODULE,
1604 .of_match_table = ltq_ptm_match,
1605 },
1606 };
1607
1608 module_platform_driver(ltq_ptm_driver);
1609
1610 MODULE_LICENSE("GPL");