lantiq: fix lantiq applications kernel 4.14 compatiblity
[openwrt/openwrt.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_adsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_adsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for Danube/
10 ** Amazon-SE/AR9)
11 ** COPYRIGHT : Copyright (c) 2006
12 ** Infineon Technologies AG
13 ** Am Campeon 1-12, 85579 Neubiberg, Germany
14 **
15 ** This program is free software; you can redistribute it and/or modify
16 ** it under the terms of the GNU General Public License as published by
17 ** the Free Software Foundation; either version 2 of the License, or
18 ** (at your option) any later version.
19 **
20 ** HISTORY
21 ** $Date $Author $Comment
22 ** 07 JUL 2009 Xu Liang Init Version
23 *******************************************************************************/
24
25
26
27 /*
28 * ####################################
29 * Head File
30 * ####################################
31 */
32
33 /*
34 * Common Head File
35 */
36 #include <linux/version.h>
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/errno.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
43 #include <linux/ioctl.h>
44 #include <linux/etherdevice.h>
45 #include <linux/interrupt.h>
46 #include <linux/netdevice.h>
47 #include <asm/io.h>
48
49 /*
50 * Chip Specific Head File
51 */
52 #include "ifxmips_ptm_adsl.h"
53
54
55 #include <lantiq_soc.h>
56
57 /*
58 * ####################################
59 * Kernel Version Adaption
60 * ####################################
61 */
62 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
63 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
64 #define MODULE_PARM(a, b) module_param(a, int, 0)
65 #else
66 #define MODULE_PARM_ARRAY(a, b) MODULE_PARM(a, b)
67 #endif
68
69
70
71 /*
72 * ####################################
73 * Parameters to Configure PPE
74 * ####################################
75 */
76
77 static int write_desc_delay = 0x20; /* Write descriptor delay */
78
79 static int rx_max_packet_size = ETH_MAX_FRAME_LENGTH;
80 /* Max packet size for RX */
81
82 static int dma_rx_descriptor_length = 24; /* Number of descriptors per DMA RX channel */
83 static int dma_tx_descriptor_length = 24; /* Number of descriptors per DMA TX channel */
84
85 static int eth_efmtc_crc_cfg = 0x03100710; /* default: tx_eth_crc_check: 1, tx_tc_crc_check: 1, tx_tc_crc_len = 16 */
86 /* rx_eth_crc_present: 1, rx_eth_crc_check: 1, rx_tc_crc_check: 1, rx_tc_crc_len = 16 */
87
88 MODULE_PARM(write_desc_delay, "i");
89 MODULE_PARM_DESC(write_desc_delay, "PPE core clock cycles between descriptor write and effectiveness in external RAM");
90
91 MODULE_PARM(rx_max_packet_size, "i");
92 MODULE_PARM_DESC(rx_max_packet_size, "Max packet size in byte for downstream ethernet frames");
93
94 MODULE_PARM(dma_rx_descriptor_length, "i");
95 MODULE_PARM_DESC(dma_rx_descriptor_length, "Number of descriptor assigned to DMA RX channel (>16)");
96 MODULE_PARM(dma_tx_descriptor_length, "i");
97 MODULE_PARM_DESC(dma_tx_descriptor_length, "Number of descriptor assigned to DMA TX channel (>16)");
98
99 MODULE_PARM(eth_efmtc_crc_cfg, "i");
100 MODULE_PARM_DESC(eth_efmtc_crc_cfg, "Configuration for PTM TX/RX ethernet/efm-tc CRC");
101
102
103
104 /*
105 * ####################################
106 * Definition
107 * ####################################
108 */
109
110
111 #define DUMP_SKB_LEN ~0
112
113
114
115 /*
116 * ####################################
117 * Declaration
118 * ####################################
119 */
120
121 /*
122 * Network Operations
123 */
124 static void ptm_setup(struct net_device *, int);
125 static struct net_device_stats *ptm_get_stats(struct net_device *);
126 static int ptm_open(struct net_device *);
127 static int ptm_stop(struct net_device *);
128 static unsigned int ptm_poll(int, unsigned int);
129 static int ptm_napi_poll(struct napi_struct *, int);
130 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
131 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
132 static int ptm_change_mtu(struct net_device *, int);
133 #endif
134 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
135 static void ptm_tx_timeout(struct net_device *);
136
137 /*
138 * DSL Data LED
139 */
140 static INLINE void adsl_led_flash(void);
141
142 /*
143 * buffer manage functions
144 */
145 static INLINE struct sk_buff* alloc_skb_rx(void);
146 //static INLINE struct sk_buff* alloc_skb_tx(unsigned int);
147 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int);
148 static INLINE int get_tx_desc(unsigned int, unsigned int *);
149
150 /*
151 * Mailbox handler and signal function
152 */
153 static INLINE int mailbox_rx_irq_handler(unsigned int);
154 static irqreturn_t mailbox_irq_handler(int, void *);
155 static INLINE void mailbox_signal(unsigned int, int);
156 #ifdef CONFIG_IFX_PTM_RX_TASKLET
157 static void do_ptm_tasklet(unsigned long);
158 #endif
159
160 /*
161 * Debug Functions
162 */
163 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
164 static void dump_skb(struct sk_buff *, u32, char *, int, int, int);
165 #else
166 #define dump_skb(skb, len, title, port, ch, is_tx) do {} while (0)
167 #endif
168 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
169 static void skb_swap(struct sk_buff *);
170 #else
171 #define skb_swap(skb) do {} while (0)
172 #endif
173
174 /*
175 * Proc File Functions
176 */
177 static INLINE void proc_file_create(void);
178 static INLINE void proc_file_delete(void);
179 static int proc_read_version(char *, char **, off_t, int, int *, void *);
180 static int proc_read_wanmib(char *, char **, off_t, int, int *, void *);
181 static int proc_write_wanmib(struct file *, const char *, unsigned long, void *);
182 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
183 static int proc_read_genconf(char *, char **, off_t, int, int *, void *);
184 #endif
185 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
186 static int proc_read_dbg(char *, char **, off_t, int, int *, void *);
187 static int proc_write_dbg(struct file *, const char *, unsigned long, void *);
188 #endif
189
190 /*
191 * Proc Help Functions
192 */
193 static INLINE int stricmp(const char *, const char *);
194 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
195 static INLINE int strincmp(const char *, const char *, int);
196 #endif
197 static INLINE int ifx_ptm_version(char *);
198
199 /*
200 * Init & clean-up functions
201 */
202 static INLINE void check_parameters(void);
203 static INLINE int init_priv_data(void);
204 static INLINE void clear_priv_data(void);
205 static INLINE void init_tables(void);
206
207 /*
208 * Exteranl Function
209 */
210 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
211 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
212 #else
213 static inline int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr)
214 {
215 if ( is_showtime != NULL )
216 *is_showtime = 0;
217 return 0;
218 }
219 #endif
220
221 /*
222 * External variable
223 */
224 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
225 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
226 extern int (*ifx_mei_atm_showtime_exit)(void);
227 #else
228 int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *) = NULL;
229 EXPORT_SYMBOL(ifx_mei_atm_showtime_enter);
230 int (*ifx_mei_atm_showtime_exit)(void) = NULL;
231 EXPORT_SYMBOL(ifx_mei_atm_showtime_exit);
232 #endif
233
234
235
236 /*
237 * ####################################
238 * Local Variable
239 * ####################################
240 */
241
242 static struct ptm_priv_data g_ptm_priv_data;
243
244 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)
245 static struct net_device_ops g_ptm_netdev_ops = {
246 .ndo_get_stats = ptm_get_stats,
247 .ndo_open = ptm_open,
248 .ndo_stop = ptm_stop,
249 .ndo_start_xmit = ptm_hard_start_xmit,
250 .ndo_validate_addr = eth_validate_addr,
251 .ndo_set_mac_address = eth_mac_addr,
252 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
253 .ndo_change_mtu = ptm_change_mtu,
254 #endif
255 .ndo_do_ioctl = ptm_ioctl,
256 .ndo_tx_timeout = ptm_tx_timeout,
257 };
258 #endif
259
260 static struct net_device *g_net_dev[2] = {0};
261 static char *g_net_dev_name[2] = {"dsl0", "dslfast0"};
262
263 #ifdef CONFIG_IFX_PTM_RX_TASKLET
264 static struct tasklet_struct g_ptm_tasklet[] = {
265 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 0},
266 {NULL, 0, ATOMIC_INIT(0), do_ptm_tasklet, 1},
267 };
268 #endif
269
270 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
271
272 static struct proc_dir_entry* g_ptm_dir = NULL;
273
274 static int g_showtime = 0;
275
276
277
278 /*
279 * ####################################
280 * Local Function
281 * ####################################
282 */
283
284 static void ptm_setup(struct net_device *dev, int ndev)
285 {
286 #if defined(CONFIG_IFXMIPS_DSL_CPE_MEI) || defined(CONFIG_IFXMIPS_DSL_CPE_MEI_MODULE)
287 netif_carrier_off(dev);
288 #endif
289
290 /* hook network operations */
291 dev->netdev_ops = &g_ptm_netdev_ops;
292 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
293 /* Allow up to 1508 bytes, for RFC4638 */
294 dev->max_mtu = ETH_DATA_LEN + 8;
295 #endif
296 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 25);
297 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
298
299 dev->dev_addr[0] = 0x00;
300 dev->dev_addr[1] = 0x20;
301 dev->dev_addr[2] = 0xda;
302 dev->dev_addr[3] = 0x86;
303 dev->dev_addr[4] = 0x23;
304 dev->dev_addr[5] = 0x75 + ndev;
305 }
306
307 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
308 {
309 int ndev;
310
311 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
312 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
313
314 g_ptm_priv_data.itf[ndev].stats.rx_errors = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu + WAN_MIB_TABLE[ndev].wrx_ethcrc_err_pdu;
315 g_ptm_priv_data.itf[ndev].stats.rx_dropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu + (WAN_MIB_TABLE[ndev].wrx_correct_pdu - g_ptm_priv_data.itf[ndev].stats.rx_packets);
316
317 return &g_ptm_priv_data.itf[ndev].stats;
318 }
319
320 static int ptm_open(struct net_device *dev)
321 {
322 int ndev;
323
324 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
325 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
326
327 napi_enable(&g_ptm_priv_data.itf[ndev].napi);
328
329 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
330
331 netif_start_queue(dev);
332
333 return 0;
334 }
335
336 static int ptm_stop(struct net_device *dev)
337 {
338 int ndev;
339
340 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
341 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
342
343 IFX_REG_W32_MASK((1 << ndev) | (1 << (ndev + 16)), 0, MBOX_IGU1_IER);
344
345 napi_disable(&g_ptm_priv_data.itf[ndev].napi);
346
347 netif_stop_queue(dev);
348
349 return 0;
350 }
351
352 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
353 {
354 unsigned int work_done = 0;
355
356 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
357
358 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes > 0 ) {
359 if ( mailbox_rx_irq_handler(ndev) < 0 )
360 break;
361
362 work_done++;
363 }
364
365 return work_done;
366 }
367 static int ptm_napi_poll(struct napi_struct *napi, int budget)
368 {
369 int ndev;
370 unsigned int work_done;
371
372 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != napi->dev; ndev++ );
373
374 work_done = ptm_poll(ndev, budget);
375
376 // interface down
377 if ( !netif_running(napi->dev) ) {
378 napi_complete(napi);
379 return work_done;
380 }
381
382 // no more traffic
383 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
384 // clear interrupt
385 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_ISRC);
386 // double check
387 if ( WRX_DMA_CHANNEL_CONFIG(ndev)->vlddes == 0 ) {
388 napi_complete(napi);
389 IFX_REG_W32_MASK(0, 1 << ndev, MBOX_IGU1_IER);
390 return work_done;
391 }
392 }
393
394 // next round
395 return work_done;
396 }
397
398 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
399 {
400 int ndev;
401 unsigned int f_full;
402 int desc_base;
403 register struct tx_descriptor reg_desc = {0};
404
405 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
406 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
407
408 if ( !g_showtime ) {
409 err("not in showtime");
410 goto PTM_HARD_START_XMIT_FAIL;
411 }
412
413 /* allocate descriptor */
414 desc_base = get_tx_desc(ndev, &f_full);
415 if ( f_full ) {
416 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
417 netif_trans_update(dev);
418 #else
419 dev->trans_start = jiffies;
420 #endif
421 netif_stop_queue(dev);
422
423 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_ISRC);
424 IFX_REG_W32_MASK(0, 1 << (ndev + 16), MBOX_IGU1_IER);
425 }
426 if ( desc_base < 0 )
427 goto PTM_HARD_START_XMIT_FAIL;
428
429 if ( g_ptm_priv_data.itf[ndev].tx_skb[desc_base] != NULL )
430 dev_kfree_skb_any(g_ptm_priv_data.itf[ndev].tx_skb[desc_base]);
431 g_ptm_priv_data.itf[ndev].tx_skb[desc_base] = skb;
432
433 reg_desc.dataptr = (unsigned int)skb->data >> 2;
434 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
435 reg_desc.byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
436 reg_desc.own = 1;
437 reg_desc.c = 1;
438 reg_desc.sop = reg_desc.eop = 1;
439
440 /* write discriptor to memory and write back cache */
441 g_ptm_priv_data.itf[ndev].tx_desc[desc_base] = reg_desc;
442 dma_cache_wback((unsigned long)skb->data, skb->len);
443 wmb();
444
445 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 1);
446
447 if ( (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ) {
448 skb_swap(skb);
449 }
450
451 g_ptm_priv_data.itf[ndev].stats.tx_packets++;
452 g_ptm_priv_data.itf[ndev].stats.tx_bytes += reg_desc.datalen;
453
454 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
455 netif_trans_update(dev);
456 #else
457 dev->trans_start = jiffies;
458 #endif
459 mailbox_signal(ndev, 1);
460
461 adsl_led_flash();
462
463 return NETDEV_TX_OK;
464
465 PTM_HARD_START_XMIT_FAIL:
466 dev_kfree_skb_any(skb);
467 g_ptm_priv_data.itf[ndev].stats.tx_dropped++;
468 return NETDEV_TX_OK;
469 }
470 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
471 static int ptm_change_mtu(struct net_device *dev, int mtu)
472 {
473 /* Allow up to 1508 bytes, for RFC4638 */
474 if (mtu < 68 || mtu > ETH_DATA_LEN + 8)
475 return -EINVAL;
476 dev->mtu = mtu;
477 return 0;
478 }
479 #endif
480
481 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
482 {
483 int ndev;
484
485 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
486 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
487
488 switch ( cmd )
489 {
490 case IFX_PTM_MIB_CW_GET:
491 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = WAN_MIB_TABLE[ndev].wrx_nonidle_cw;
492 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = WAN_MIB_TABLE[ndev].wrx_idle_cw;
493 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = WAN_MIB_TABLE[ndev].wrx_err_cw;
494 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = 0;
495 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = 0;
496 break;
497 case IFX_PTM_MIB_FRAME_GET:
498 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxCorrect = WAN_MIB_TABLE[ndev].wrx_correct_pdu;
499 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TC_CrcError = WAN_MIB_TABLE[ndev].wrx_tccrc_err_pdu;
500 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->RxDropped = WAN_MIB_TABLE[ndev].wrx_nodesc_drop_pdu + WAN_MIB_TABLE[ndev].wrx_len_violation_drop_pdu;
501 ((PTM_FRAME_MIB_T *)ifr->ifr_data)->TxSend = WAN_MIB_TABLE[ndev].wtx_total_pdu;
502 break;
503 case IFX_PTM_CFG_GET:
504 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = CFG_ETH_EFMTC_CRC->rx_eth_crc_present;
505 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = CFG_ETH_EFMTC_CRC->rx_eth_crc_check;
506 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = CFG_ETH_EFMTC_CRC->rx_tc_crc_check;
507 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = CFG_ETH_EFMTC_CRC->rx_tc_crc_len;
508 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = CFG_ETH_EFMTC_CRC->tx_eth_crc_gen;
509 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = CFG_ETH_EFMTC_CRC->tx_tc_crc_gen;
510 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = CFG_ETH_EFMTC_CRC->tx_tc_crc_len;
511 break;
512 case IFX_PTM_CFG_SET:
513 CFG_ETH_EFMTC_CRC->rx_eth_crc_present = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent ? 1 : 0;
514 CFG_ETH_EFMTC_CRC->rx_eth_crc_check = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 1 : 0;
515 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck && (((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen == 32) )
516 {
517 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 1;
518 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen;
519 }
520 else
521 {
522 CFG_ETH_EFMTC_CRC->rx_tc_crc_check = 0;
523 CFG_ETH_EFMTC_CRC->rx_tc_crc_len = 0;
524 }
525 CFG_ETH_EFMTC_CRC->tx_eth_crc_gen = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 1 : 0;
526 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen && (((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 16 || ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen == 32) )
527 {
528 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 1;
529 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen;
530 }
531 else
532 {
533 CFG_ETH_EFMTC_CRC->tx_tc_crc_gen = 0;
534 CFG_ETH_EFMTC_CRC->tx_tc_crc_len = 0;
535 }
536 break;
537 default:
538 return -EOPNOTSUPP;
539 }
540
541 return 0;
542 }
543
544 static void ptm_tx_timeout(struct net_device *dev)
545 {
546 int ndev;
547
548 for ( ndev = 0; ndev < ARRAY_SIZE(g_net_dev) && g_net_dev[ndev] != dev; ndev++ );
549 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
550
551 /* disable TX irq, release skb when sending new packet */
552 IFX_REG_W32_MASK(1 << (ndev + 16), 0, MBOX_IGU1_IER);
553
554 /* wake up TX queue */
555 netif_wake_queue(dev);
556
557 return;
558 }
559
560 static INLINE void adsl_led_flash(void)
561 {
562 }
563
564 static INLINE struct sk_buff* alloc_skb_rx(void)
565 {
566 struct sk_buff *skb;
567
568 /* allocate memroy including trailer and padding */
569 skb = dev_alloc_skb(rx_max_packet_size + RX_HEAD_MAC_ADDR_ALIGNMENT + DATA_BUFFER_ALIGNMENT);
570 if ( skb != NULL ) {
571 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
572 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
573 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
574 /* pub skb in reserved area "skb->data - 4" */
575 *((struct sk_buff **)skb->data - 1) = skb;
576 wmb();
577 /* write back and invalidate cache */
578 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
579 /* invalidate cache */
580 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
581 }
582
583 return skb;
584 }
585
586 #if 0
587 static INLINE struct sk_buff* alloc_skb_tx(unsigned int size)
588 {
589 struct sk_buff *skb;
590
591 /* allocate memory including padding */
592 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
593 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
594 /* must be burst length alignment */
595 if ( skb != NULL )
596 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
597 return skb;
598 }
599 #endif
600
601 static INLINE struct sk_buff *get_skb_rx_pointer(unsigned int dataptr)
602 {
603 unsigned int skb_dataptr;
604 struct sk_buff *skb;
605
606 skb_dataptr = ((dataptr - 1) << 2) | KSEG1;
607 skb = *(struct sk_buff **)skb_dataptr;
608
609 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
610 ASSERT(((unsigned int)skb->data | KSEG1) == ((dataptr << 2) | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
611
612 return skb;
613 }
614
615 static INLINE int get_tx_desc(unsigned int itf, unsigned int *f_full)
616 {
617 int desc_base = -1;
618 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[itf];
619
620 // assume TX is serial operation
621 // no protection provided
622
623 *f_full = 1;
624
625 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 ) {
626 desc_base = p_itf->tx_desc_pos;
627 if ( ++(p_itf->tx_desc_pos) == dma_tx_descriptor_length )
628 p_itf->tx_desc_pos = 0;
629 if ( p_itf->tx_desc[p_itf->tx_desc_pos].own == 0 )
630 *f_full = 0;
631 }
632
633 return desc_base;
634 }
635
636 static INLINE int mailbox_rx_irq_handler(unsigned int ch) // return: < 0 - descriptor not available, 0 - received one packet
637 {
638 unsigned int ndev = ch;
639 struct sk_buff *skb;
640 struct sk_buff *new_skb;
641 volatile struct rx_descriptor *desc;
642 struct rx_descriptor reg_desc;
643 int netif_rx_ret;
644
645 desc = &g_ptm_priv_data.itf[ndev].rx_desc[g_ptm_priv_data.itf[ndev].rx_desc_pos];
646 if ( desc->own || !desc->c ) // if PP32 hold descriptor or descriptor not completed
647 return -EAGAIN;
648 if ( ++g_ptm_priv_data.itf[ndev].rx_desc_pos == dma_rx_descriptor_length )
649 g_ptm_priv_data.itf[ndev].rx_desc_pos = 0;
650
651 reg_desc = *desc;
652 skb = get_skb_rx_pointer(reg_desc.dataptr);
653
654 if ( !reg_desc.err ) {
655 new_skb = alloc_skb_rx();
656 if ( new_skb != NULL ) {
657 skb_reserve(skb, reg_desc.byteoff);
658 skb_put(skb, reg_desc.datalen);
659
660 dump_skb(skb, DUMP_SKB_LEN, (char *)__func__, ndev, ndev, 0);
661
662 // parse protocol header
663 skb->dev = g_net_dev[ndev];
664 skb->protocol = eth_type_trans(skb, skb->dev);
665
666 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
667 g_net_dev[ndev]->last_rx = jiffies;
668 #endif
669
670 netif_rx_ret = netif_receive_skb(skb);
671
672 if ( netif_rx_ret != NET_RX_DROP ) {
673 g_ptm_priv_data.itf[ndev].stats.rx_packets++;
674 g_ptm_priv_data.itf[ndev].stats.rx_bytes += reg_desc.datalen;
675 }
676
677 reg_desc.dataptr = ((unsigned int)new_skb->data >> 2) & 0x0FFFFFFF;
678 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
679 }
680 }
681 else
682 reg_desc.err = 0;
683
684 reg_desc.datalen = rx_max_packet_size;
685 reg_desc.own = 1;
686 reg_desc.c = 0;
687
688 // update descriptor
689 *desc = reg_desc;
690 wmb();
691
692 mailbox_signal(ndev, 0);
693
694 adsl_led_flash();
695
696 return 0;
697 }
698
699 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
700 {
701 unsigned int isr;
702 int i;
703
704 isr = IFX_REG_R32(MBOX_IGU1_ISR);
705 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
706 isr &= IFX_REG_R32(MBOX_IGU1_IER);
707
708 while ( (i = __fls(isr)) >= 0 ) {
709 isr ^= 1 << i;
710
711 if ( i >= 16 ) {
712 // TX
713 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
714 i -= 16;
715 if ( i < MAX_ITF_NUMBER )
716 netif_wake_queue(g_net_dev[i]);
717 }
718 else {
719 // RX
720 #ifdef CONFIG_IFX_PTM_RX_INTERRUPT
721 while ( WRX_DMA_CHANNEL_CONFIG(i)->vlddes > 0 )
722 mailbox_rx_irq_handler(i);
723 #else
724 IFX_REG_W32_MASK(1 << i, 0, MBOX_IGU1_IER);
725 napi_schedule(&g_ptm_priv_data.itf[i].napi);
726 #endif
727 }
728 }
729
730 return IRQ_HANDLED;
731 }
732
733 static INLINE void mailbox_signal(unsigned int itf, int is_tx)
734 {
735 int count = 1000;
736
737 if ( is_tx ) {
738 while ( MBOX_IGU3_ISR_ISR(itf + 16) && count > 0 )
739 count--;
740 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf + 16), MBOX_IGU3_ISRS);
741 }
742 else {
743 while ( MBOX_IGU3_ISR_ISR(itf) && count > 0 )
744 count--;
745 IFX_REG_W32(MBOX_IGU3_ISRS_SET(itf), MBOX_IGU3_ISRS);
746 }
747
748 ASSERT(count != 0, "MBOX_IGU3_ISR = 0x%08x", IFX_REG_R32(MBOX_IGU3_ISR));
749 }
750
751 #ifdef CONFIG_IFX_PTM_RX_TASKLET
752 static void do_ptm_tasklet(unsigned long arg)
753 {
754 unsigned int work_to_do = 25;
755 unsigned int work_done = 0;
756
757 ASSERT(arg >= 0 && arg < ARRAY_SIZE(g_net_dev), "arg = %lu (wrong value)", arg);
758
759 while ( work_done < work_to_do && WRX_DMA_CHANNEL_CONFIG(arg)->vlddes > 0 ) {
760 if ( mailbox_rx_irq_handler(arg) < 0 )
761 break;
762
763 work_done++;
764 }
765
766 // interface down
767 if ( !netif_running(g_net_dev[arg]) )
768 return;
769
770 // no more traffic
771 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
772 // clear interrupt
773 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_ISRC);
774 // double check
775 if ( WRX_DMA_CHANNEL_CONFIG(arg)->vlddes == 0 ) {
776 IFX_REG_W32_MASK(0, 1 << arg, MBOX_IGU1_IER);
777 return;
778 }
779 }
780
781 // next round
782 tasklet_schedule(&g_ptm_tasklet[arg]);
783 }
784 #endif
785
786 #if defined(DEBUG_DUMP_SKB) && DEBUG_DUMP_SKB
787 static void dump_skb(struct sk_buff *skb, u32 len, char *title, int port, int ch, int is_tx)
788 {
789 int i;
790
791 if ( !(ifx_ptm_dbg_enable & (is_tx ? DBG_ENABLE_MASK_DUMP_SKB_TX : DBG_ENABLE_MASK_DUMP_SKB_RX)) )
792 return;
793
794 if ( skb->len < len )
795 len = skb->len;
796
797 if ( len > rx_max_packet_size ) {
798 printk("too big data length: skb = %08x, skb->data = %08x, skb->len = %d\n", (u32)skb, (u32)skb->data, skb->len);
799 return;
800 }
801
802 if ( ch >= 0 )
803 printk("%s (port %d, ch %d)\n", title, port, ch);
804 else
805 printk("%s\n", title);
806 printk(" skb->data = %08X, skb->tail = %08X, skb->len = %d\n", (u32)skb->data, (u32)skb->tail, (int)skb->len);
807 for ( i = 1; i <= len; i++ ) {
808 if ( i % 16 == 1 )
809 printk(" %4d:", i - 1);
810 printk(" %02X", (int)(*((char*)skb->data + i - 1) & 0xFF));
811 if ( i % 16 == 0 )
812 printk("\n");
813 }
814 if ( (i - 1) % 16 != 0 )
815 printk("\n");
816 }
817 #endif
818
819 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
820 static void skb_swap(struct sk_buff *skb)
821 {
822 unsigned char tmp[8];
823 unsigned char *p = skb->data;
824
825 if ( !(p[0] & 0x01) ) { // bypass broadcast/multicast
826 // swap MAC
827 memcpy(tmp, p, 6);
828 memcpy(p, p + 6, 6);
829 memcpy(p + 6, tmp, 6);
830 p += 12;
831
832 // bypass VLAN
833 while ( p[0] == 0x81 && p[1] == 0x00 )
834 p += 4;
835
836 // IP
837 if ( p[0] == 0x08 && p[1] == 0x00 ) {
838 p += 14;
839 memcpy(tmp, p, 4);
840 memcpy(p, p + 4, 4);
841 memcpy(p + 4, tmp, 4);
842 p += 8;
843 }
844
845 dma_cache_wback((unsigned long)skb->data, (unsigned long)p - (unsigned long)skb->data);
846 }
847 }
848 #endif
849
850 static INLINE void proc_file_create(void)
851 {
852 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
853 struct proc_dir_entry *res;
854
855 g_ptm_dir = proc_mkdir("driver/ifx_ptm", NULL);
856
857 create_proc_read_entry("version",
858 0,
859 g_ptm_dir,
860 proc_read_version,
861 NULL);
862
863 res = create_proc_entry("wanmib",
864 0,
865 g_ptm_dir);
866 if ( res != NULL ) {
867 res->read_proc = proc_read_wanmib;
868 res->write_proc = proc_write_wanmib;
869 }
870
871 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
872 create_proc_read_entry("genconf",
873 0,
874 g_ptm_dir,
875 proc_read_genconf,
876 NULL);
877
878 #ifdef CONFIG_AR9
879 create_proc_read_entry("regs",
880 0,
881 g_ptm_dir,
882 ifx_ptm_proc_read_regs,
883 NULL);
884 #endif
885 #endif
886
887 res = create_proc_entry("dbg",
888 0,
889 g_ptm_dir);
890 if ( res != NULL ) {
891 res->read_proc = proc_read_dbg;
892 res->write_proc = proc_write_dbg;
893 }
894 #endif
895 }
896
897 static INLINE void proc_file_delete(void)
898 {
899 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
900 remove_proc_entry("dbg", g_ptm_dir);
901 #endif
902
903 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
904 #ifdef CONFIG_AR9
905 remove_proc_entry("regs", g_ptm_dir);
906 #endif
907
908 remove_proc_entry("genconf", g_ptm_dir);
909 #endif
910
911 remove_proc_entry("wanmib", g_ptm_dir);
912
913 remove_proc_entry("version", g_ptm_dir);
914
915 remove_proc_entry("driver/ifx_ptm", NULL);
916 }
917
918 static int proc_read_version(char *buf, char **start, off_t offset, int count, int *eof, void *data)
919 {
920 int len = 0;
921
922 len += ifx_ptm_version(buf + len);
923
924 if ( offset >= len ) {
925 *start = buf;
926 *eof = 1;
927 return 0;
928 }
929 *start = buf + offset;
930 if ( (len -= offset) > count )
931 return count;
932 *eof = 1;
933 return len;
934 }
935
936 static int proc_read_wanmib(char *page, char **start, off_t off, int count, int *eof, void *data)
937 {
938 int len = 0;
939 int i;
940 char *title[] = {
941 "dsl0\n",
942 "dslfast0\n"
943 };
944
945 for ( i = 0; i < ARRAY_SIZE(title); i++ ) {
946 len += sprintf(page + off + len, title[i]);
947 len += sprintf(page + off + len, " wrx_correct_pdu = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu);
948 len += sprintf(page + off + len, " wrx_correct_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_correct_pdu_bytes);
949 len += sprintf(page + off + len, " wrx_tccrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu);
950 len += sprintf(page + off + len, " wrx_tccrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_tccrc_err_pdu_bytes);
951 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu);
952 len += sprintf(page + off + len, " wrx_ethcrc_err_pdu_bytes = %d\n", WAN_MIB_TABLE[i].wrx_ethcrc_err_pdu_bytes);
953 len += sprintf(page + off + len, " wrx_nodesc_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_nodesc_drop_pdu);
954 len += sprintf(page + off + len, " wrx_len_violation_drop_pdu = %d\n", WAN_MIB_TABLE[i].wrx_len_violation_drop_pdu);
955 len += sprintf(page + off + len, " wrx_idle_bytes = %d\n", WAN_MIB_TABLE[i].wrx_idle_bytes);
956 len += sprintf(page + off + len, " wrx_nonidle_cw = %d\n", WAN_MIB_TABLE[i].wrx_nonidle_cw);
957 len += sprintf(page + off + len, " wrx_idle_cw = %d\n", WAN_MIB_TABLE[i].wrx_idle_cw);
958 len += sprintf(page + off + len, " wrx_err_cw = %d\n", WAN_MIB_TABLE[i].wrx_err_cw);
959 len += sprintf(page + off + len, " wtx_total_pdu = %d\n", WAN_MIB_TABLE[i].wtx_total_pdu);
960 len += sprintf(page + off + len, " wtx_total_bytes = %d\n", WAN_MIB_TABLE[i].wtx_total_bytes);
961 }
962
963 *eof = 1;
964
965 return len;
966 }
967
968 static int proc_write_wanmib(struct file *file, const char *buf, unsigned long count, void *data)
969 {
970 char str[2048];
971 char *p;
972 int len, rlen;
973
974 int i;
975
976 len = count < sizeof(str) ? count : sizeof(str) - 1;
977 rlen = len - copy_from_user(str, buf, len);
978 while ( rlen && str[rlen - 1] <= ' ' )
979 rlen--;
980 str[rlen] = 0;
981 for ( p = str; *p && *p <= ' '; p++, rlen-- );
982 if ( !*p )
983 return count;
984
985 if ( stricmp(p, "clear") == 0 || stricmp(p, "clean") == 0 ) {
986 for ( i = 0; i < 2; i++ )
987 memset((void*)&WAN_MIB_TABLE[i], 0, sizeof(WAN_MIB_TABLE[i]));
988 }
989
990 return count;
991 }
992
993 #if defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
994
995 static int proc_read_genconf(char *page, char **start, off_t off, int count, int *eof, void *data)
996 {
997 int len = 0;
998 int len_max = off + count;
999 char *pstr;
1000 char str[2048];
1001 int llen = 0;
1002 int i;
1003 unsigned long bit;
1004
1005 pstr = *start = page;
1006
1007 __sync();
1008
1009 llen += sprintf(str + llen, "CFG_WAN_WRDES_DELAY (0x%08X): %d\n", (unsigned int)CFG_WAN_WRDES_DELAY, IFX_REG_R32(CFG_WAN_WRDES_DELAY));
1010 llen += sprintf(str + llen, "CFG_WRX_DMACH_ON (0x%08X):", (unsigned int)CFG_WRX_DMACH_ON);
1011 for ( i = 0, bit = 1; i < MAX_RX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
1012 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WRX_DMACH_ON) & bit) ? "on " : "off");
1013 llen += sprintf(str + llen, "\n");
1014 llen += sprintf(str + llen, "CFG_WTX_DMACH_ON (0x%08X):", (unsigned int)CFG_WTX_DMACH_ON);
1015 for ( i = 0, bit = 1; i < MAX_TX_DMA_CHANNEL_NUMBER; i++, bit <<= 1 )
1016 llen += sprintf(str + llen, " %d - %s", i, (IFX_REG_R32(CFG_WTX_DMACH_ON) & bit) ? "on " : "off");
1017 llen += sprintf(str + llen, "\n");
1018 llen += sprintf(str + llen, "CFG_WRX_LOOK_BITTH (0x%08X): %d\n", (unsigned int)CFG_WRX_LOOK_BITTH, IFX_REG_R32(CFG_WRX_LOOK_BITTH));
1019 llen += sprintf(str + llen, "CFG_ETH_EFMTC_CRC (0x%08X): rx_tc_crc_len - %2d, rx_tc_crc_check - %s\n", (unsigned int)CFG_ETH_EFMTC_CRC, CFG_ETH_EFMTC_CRC->rx_tc_crc_len, CFG_ETH_EFMTC_CRC->rx_tc_crc_check ? " on" : "off");
1020 llen += sprintf(str + llen, " rx_eth_crc_check - %s, rx_eth_crc_present - %s\n", CFG_ETH_EFMTC_CRC->rx_eth_crc_check ? " on" : "off", CFG_ETH_EFMTC_CRC->rx_eth_crc_present ? " on" : "off");
1021 llen += sprintf(str + llen, " tx_tc_crc_len - %2d, tx_tc_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_tc_crc_len, CFG_ETH_EFMTC_CRC->tx_tc_crc_gen ? " on" : "off");
1022 llen += sprintf(str + llen, " tx_eth_crc_gen - %s\n", CFG_ETH_EFMTC_CRC->tx_eth_crc_gen ? " on" : "off");
1023
1024 llen += sprintf(str + llen, "RX Port:\n");
1025 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
1026 llen += sprintf(str + llen, " %d (0x%08X). mfs - %5d, dmach - %d, local_state - %d, partner_state - %d\n", i, (unsigned int)WRX_PORT_CONFIG(i), WRX_PORT_CONFIG(i)->mfs, WRX_PORT_CONFIG(i)->dmach, WRX_PORT_CONFIG(i)->local_state, WRX_PORT_CONFIG(i)->partner_state);
1027 llen += sprintf(str + llen, "RX DMA Channel:\n");
1028 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ )
1029 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WRX_DMA_CHANNEL_CONFIG(i), WRX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WRX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WRX_DMA_CHANNEL_CONFIG(i)->deslen, WRX_DMA_CHANNEL_CONFIG(i)->vlddes);
1030
1031 llen += sprintf(str + llen, "TX Port:\n");
1032 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1033 llen += sprintf(str + llen, " %d (0x%08X). tx_cwth2 - %d, tx_cwth1 - %d\n", i, (unsigned int)WTX_PORT_CONFIG(i), WTX_PORT_CONFIG(i)->tx_cwth2, WTX_PORT_CONFIG(i)->tx_cwth1);
1034 llen += sprintf(str + llen, "TX DMA Channel:\n");
1035 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ )
1036 llen += sprintf(str + llen, " %d (0x%08X). desba - 0x%08X (0x%08X), deslen - %d, vlddes - %d\n", i, (unsigned int)WTX_DMA_CHANNEL_CONFIG(i), WTX_DMA_CHANNEL_CONFIG(i)->desba, ((unsigned int)WTX_DMA_CHANNEL_CONFIG(i)->desba << 2) | KSEG1, WTX_DMA_CHANNEL_CONFIG(i)->deslen, WTX_DMA_CHANNEL_CONFIG(i)->vlddes);
1037
1038 if ( len <= off && len + llen > off )
1039 {
1040 memcpy(pstr, str + off - len, len + llen - off);
1041 pstr += len + llen - off;
1042 }
1043 else if ( len > off )
1044 {
1045 memcpy(pstr, str, llen);
1046 pstr += llen;
1047 }
1048 len += llen;
1049 if ( len >= len_max )
1050 goto PROC_READ_GENCONF_OVERRUN_END;
1051
1052 *eof = 1;
1053
1054 return len - off;
1055
1056 PROC_READ_GENCONF_OVERRUN_END:
1057 return len - llen - off;
1058 }
1059
1060 #endif // defined(ENABLE_FW_PROC) && ENABLE_FW_PROC
1061
1062 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1063
1064 static int proc_read_dbg(char *page, char **start, off_t off, int count, int *eof, void *data)
1065 {
1066 int len = 0;
1067
1068 len += sprintf(page + off + len, "error print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ERR) ? "enabled" : "disabled");
1069 len += sprintf(page + off + len, "debug print - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DEBUG_PRINT) ? "enabled" : "disabled");
1070 len += sprintf(page + off + len, "assert - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_ASSERT) ? "enabled" : "disabled");
1071 len += sprintf(page + off + len, "dump rx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_RX) ? "enabled" : "disabled");
1072 len += sprintf(page + off + len, "dump tx skb - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_DUMP_SKB_TX) ? "enabled" : "disabled");
1073 len += sprintf(page + off + len, "mac swap - %s\n", (ifx_ptm_dbg_enable & DBG_ENABLE_MASK_MAC_SWAP) ? "enabled" : "disabled");
1074
1075 *eof = 1;
1076
1077 return len;
1078 }
1079
1080 static int proc_write_dbg(struct file *file, const char *buf, unsigned long count, void *data)
1081 {
1082 static const char *dbg_enable_mask_str[] = {
1083 " error print",
1084 " err",
1085 " debug print",
1086 " dbg",
1087 " assert",
1088 " assert",
1089 " dump rx skb",
1090 " rx",
1091 " dump tx skb",
1092 " tx",
1093 " dump init",
1094 " init",
1095 " dump qos",
1096 " qos",
1097 " mac swap",
1098 " swap",
1099 " all"
1100 };
1101 static const int dbg_enable_mask_str_len[] = {
1102 12, 4,
1103 12, 4,
1104 7, 7,
1105 12, 3,
1106 12, 3,
1107 10, 5,
1108 9, 4,
1109 9, 5,
1110 4
1111 };
1112 unsigned int dbg_enable_mask[] = {
1113 DBG_ENABLE_MASK_ERR,
1114 DBG_ENABLE_MASK_DEBUG_PRINT,
1115 DBG_ENABLE_MASK_ASSERT,
1116 DBG_ENABLE_MASK_DUMP_SKB_RX,
1117 DBG_ENABLE_MASK_DUMP_SKB_TX,
1118 DBG_ENABLE_MASK_DUMP_INIT,
1119 DBG_ENABLE_MASK_DUMP_QOS,
1120 DBG_ENABLE_MASK_MAC_SWAP,
1121 DBG_ENABLE_MASK_ALL
1122 };
1123
1124 char str[2048];
1125 char *p;
1126
1127 int len, rlen;
1128
1129 int f_enable = 0;
1130 int i;
1131
1132 len = count < sizeof(str) ? count : sizeof(str) - 1;
1133 rlen = len - copy_from_user(str, buf, len);
1134 while ( rlen && str[rlen - 1] <= ' ' )
1135 rlen--;
1136 str[rlen] = 0;
1137 for ( p = str; *p && *p <= ' '; p++, rlen-- );
1138 if ( !*p )
1139 return 0;
1140
1141 // debugging feature for enter/leave showtime
1142 if ( strincmp(p, "enter", 5) == 0 && ifx_mei_atm_showtime_enter != NULL )
1143 ifx_mei_atm_showtime_enter(NULL, NULL);
1144 else if ( strincmp(p, "leave", 5) == 0 && ifx_mei_atm_showtime_exit != NULL )
1145 ifx_mei_atm_showtime_exit();
1146
1147 if ( strincmp(p, "enable", 6) == 0 ) {
1148 p += 6;
1149 f_enable = 1;
1150 }
1151 else if ( strincmp(p, "disable", 7) == 0 ) {
1152 p += 7;
1153 f_enable = -1;
1154 }
1155 else if ( strincmp(p, "help", 4) == 0 || *p == '?' ) {
1156 printk("echo <enable/disable> [err/dbg/assert/rx/tx/init/qos/swap/all] > /proc/driver/ifx_ptm/dbg\n");
1157 }
1158
1159 if ( f_enable ) {
1160 if ( *p == 0 ) {
1161 if ( f_enable > 0 )
1162 ifx_ptm_dbg_enable |= DBG_ENABLE_MASK_ALL & ~DBG_ENABLE_MASK_MAC_SWAP;
1163 else
1164 ifx_ptm_dbg_enable &= ~DBG_ENABLE_MASK_ALL | DBG_ENABLE_MASK_MAC_SWAP;
1165 }
1166 else {
1167 do {
1168 for ( i = 0; i < ARRAY_SIZE(dbg_enable_mask_str); i++ )
1169 if ( strincmp(p, dbg_enable_mask_str[i], dbg_enable_mask_str_len[i]) == 0 ) {
1170 if ( f_enable > 0 )
1171 ifx_ptm_dbg_enable |= dbg_enable_mask[i >> 1];
1172 else
1173 ifx_ptm_dbg_enable &= ~dbg_enable_mask[i >> 1];
1174 p += dbg_enable_mask_str_len[i];
1175 break;
1176 }
1177 } while ( i < ARRAY_SIZE(dbg_enable_mask_str) );
1178 }
1179 }
1180
1181 return count;
1182 }
1183
1184 #endif // defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1185
1186 static INLINE int stricmp(const char *p1, const char *p2)
1187 {
1188 int c1, c2;
1189
1190 while ( *p1 && *p2 )
1191 {
1192 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1193 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1194 if ( (c1 -= c2) )
1195 return c1;
1196 p1++;
1197 p2++;
1198 }
1199
1200 return *p1 - *p2;
1201 }
1202
1203 #if defined(ENABLE_DBG_PROC) && ENABLE_DBG_PROC
1204 static INLINE int strincmp(const char *p1, const char *p2, int n)
1205 {
1206 int c1 = 0, c2;
1207
1208 while ( n && *p1 && *p2 )
1209 {
1210 c1 = *p1 >= 'A' && *p1 <= 'Z' ? *p1 + 'a' - 'A' : *p1;
1211 c2 = *p2 >= 'A' && *p2 <= 'Z' ? *p2 + 'a' - 'A' : *p2;
1212 if ( (c1 -= c2) )
1213 return c1;
1214 p1++;
1215 p2++;
1216 n--;
1217 }
1218
1219 return n ? *p1 - *p2 : c1;
1220 }
1221 #endif
1222
1223 static INLINE int ifx_ptm_version(char *buf)
1224 {
1225 int len = 0;
1226 unsigned int major, minor;
1227
1228 ifx_ptm_get_fw_ver(&major, &minor);
1229
1230 len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
1231 len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
1232
1233 return len;
1234 }
1235
1236 static INLINE void check_parameters(void)
1237 {
1238 /* There is a delay between PPE write descriptor and descriptor is */
1239 /* really stored in memory. Host also has this delay when writing */
1240 /* descriptor. So PPE will use this value to determine if the write */
1241 /* operation makes effect. */
1242 if ( write_desc_delay < 0 )
1243 write_desc_delay = 0;
1244
1245 /* Because of the limitation of length field in descriptors, the packet */
1246 /* size could not be larger than 64K minus overhead size. */
1247 if ( rx_max_packet_size < ETH_MIN_FRAME_LENGTH )
1248 rx_max_packet_size = ETH_MIN_FRAME_LENGTH;
1249 else if ( rx_max_packet_size > 65536 - 1 )
1250 rx_max_packet_size = 65536 - 1;
1251
1252 if ( dma_rx_descriptor_length < 2 )
1253 dma_rx_descriptor_length = 2;
1254 if ( dma_tx_descriptor_length < 2 )
1255 dma_tx_descriptor_length = 2;
1256 }
1257
1258 static INLINE int init_priv_data(void)
1259 {
1260 void *p;
1261 int i;
1262 struct rx_descriptor rx_desc = {0};
1263 struct sk_buff *skb;
1264 volatile struct rx_descriptor *p_rx_desc;
1265 volatile struct tx_descriptor *p_tx_desc;
1266 struct sk_buff **ppskb;
1267
1268 // clear ptm private data structure
1269 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
1270
1271 // allocate memory for RX descriptors
1272 p = kzalloc(MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1273 if ( p == NULL )
1274 return -1;
1275 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_rx_descriptor_length * sizeof(struct rx_descriptor) + DESC_ALIGNMENT);
1276 g_ptm_priv_data.rx_desc_base = p;
1277 //p = (void *)((((unsigned int)p + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1278
1279 // allocate memory for TX descriptors
1280 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT, GFP_KERNEL);
1281 if ( p == NULL )
1282 return -1;
1283 dma_cache_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct tx_descriptor) + DESC_ALIGNMENT);
1284 g_ptm_priv_data.tx_desc_base = p;
1285
1286 // allocate memroy for TX skb pointers
1287 p = kzalloc(MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4, GFP_KERNEL);
1288 if ( p == NULL )
1289 return -1;
1290 dma_cache_wback_inv((unsigned long)p, MAX_ITF_NUMBER * dma_tx_descriptor_length * sizeof(struct sk_buff *) + 4);
1291 g_ptm_priv_data.tx_skb_base = p;
1292
1293 p_rx_desc = (volatile struct rx_descriptor *)((((unsigned int)g_ptm_priv_data.rx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1294 p_tx_desc = (volatile struct tx_descriptor *)((((unsigned int)g_ptm_priv_data.tx_desc_base + DESC_ALIGNMENT - 1) & ~(DESC_ALIGNMENT - 1)) | KSEG1);
1295 ppskb = (struct sk_buff **)(((unsigned int)g_ptm_priv_data.tx_skb_base + 3) & ~3);
1296 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1297 g_ptm_priv_data.itf[i].rx_desc = &p_rx_desc[i * dma_rx_descriptor_length];
1298 g_ptm_priv_data.itf[i].tx_desc = &p_tx_desc[i * dma_tx_descriptor_length];
1299 g_ptm_priv_data.itf[i].tx_skb = &ppskb[i * dma_tx_descriptor_length];
1300 }
1301
1302 rx_desc.own = 1;
1303 rx_desc.c = 0;
1304 rx_desc.sop = 1;
1305 rx_desc.eop = 1;
1306 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
1307 rx_desc.id = 0;
1308 rx_desc.err = 0;
1309 rx_desc.datalen = rx_max_packet_size;
1310 for ( i = 0; i < MAX_ITF_NUMBER * dma_rx_descriptor_length; i++ ) {
1311 skb = alloc_skb_rx();
1312 if ( skb == NULL )
1313 return -1;
1314 rx_desc.dataptr = ((unsigned int)skb->data >> 2) & 0x0FFFFFFF;
1315 p_rx_desc[i] = rx_desc;
1316 }
1317
1318 return 0;
1319 }
1320
1321 static INLINE void clear_priv_data(void)
1322 {
1323 int i, j;
1324 struct sk_buff *skb;
1325
1326 for ( i = 0; i < MAX_ITF_NUMBER; i++ ) {
1327 if ( g_ptm_priv_data.itf[i].tx_skb != NULL ) {
1328 for ( j = 0; j < dma_tx_descriptor_length; j++ )
1329 if ( g_ptm_priv_data.itf[i].tx_skb[j] != NULL )
1330 dev_kfree_skb_any(g_ptm_priv_data.itf[i].tx_skb[j]);
1331 }
1332 if ( g_ptm_priv_data.itf[i].rx_desc != NULL ) {
1333 for ( j = 0; j < dma_rx_descriptor_length; j++ ) {
1334 if ( g_ptm_priv_data.itf[i].rx_desc[j].sop || g_ptm_priv_data.itf[i].rx_desc[j].eop ) { // descriptor initialized
1335 skb = get_skb_rx_pointer(g_ptm_priv_data.itf[i].rx_desc[j].dataptr);
1336 dev_kfree_skb_any(skb);
1337 }
1338 }
1339 }
1340 }
1341
1342 if ( g_ptm_priv_data.rx_desc_base != NULL )
1343 kfree(g_ptm_priv_data.rx_desc_base);
1344
1345 if ( g_ptm_priv_data.tx_desc_base != NULL )
1346 kfree(g_ptm_priv_data.tx_desc_base);
1347
1348 if ( g_ptm_priv_data.tx_skb_base != NULL )
1349 kfree(g_ptm_priv_data.tx_skb_base);
1350 }
1351
1352 static INLINE void init_tables(void)
1353 {
1354 int i;
1355 volatile unsigned int *p;
1356 struct wrx_dma_channel_config rx_config = {0};
1357 struct wtx_dma_channel_config tx_config = {0};
1358 struct wrx_port_cfg_status rx_port_cfg = { 0 };
1359 struct wtx_port_cfg tx_port_cfg = { 0 };
1360
1361 /*
1362 * CDM Block 1
1363 */
1364 IFX_REG_W32(CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00), CDM_CFG); // CDM block 1 must be data memory and mapped to 0x5000 (dword addr)
1365 p = CDM_DATA_MEMORY(0, 0); // Clear CDM block 1
1366 for ( i = 0; i < CDM_DATA_MEMORY_DWLEN; i++, p++ )
1367 IFX_REG_W32(0, p);
1368
1369 /*
1370 * General Registers
1371 */
1372 IFX_REG_W32(write_desc_delay, CFG_WAN_WRDES_DELAY);
1373 IFX_REG_W32((1 << MAX_RX_DMA_CHANNEL_NUMBER) - 1, CFG_WRX_DMACH_ON);
1374 IFX_REG_W32((1 << MAX_TX_DMA_CHANNEL_NUMBER) - 1, CFG_WTX_DMACH_ON);
1375
1376 IFX_REG_W32(8, CFG_WRX_LOOK_BITTH); // WAN RX EFM-TC Looking Threshold
1377
1378 IFX_REG_W32(eth_efmtc_crc_cfg, CFG_ETH_EFMTC_CRC);
1379
1380 /*
1381 * WRX DMA Channel Configuration Table
1382 */
1383 rx_config.deslen = dma_rx_descriptor_length;
1384 rx_port_cfg.mfs = ETH_MAX_FRAME_LENGTH;
1385 rx_port_cfg.local_state = 0; // looking for sync
1386 rx_port_cfg.partner_state = 0; // parter receiver is out of sync
1387
1388 for ( i = 0; i < MAX_RX_DMA_CHANNEL_NUMBER; i++ ) {
1389 rx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].rx_desc >> 2) & 0x0FFFFFFF;
1390 *WRX_DMA_CHANNEL_CONFIG(i) = rx_config;
1391
1392 rx_port_cfg.dmach = i;
1393 *WRX_PORT_CONFIG(i) = rx_port_cfg;
1394 }
1395
1396 /*
1397 * WTX DMA Channel Configuration Table
1398 */
1399 tx_config.deslen = dma_tx_descriptor_length;
1400 tx_port_cfg.tx_cwth1 = 5;
1401 tx_port_cfg.tx_cwth2 = 4;
1402
1403 for ( i = 0; i < MAX_TX_DMA_CHANNEL_NUMBER; i++ ) {
1404 tx_config.desba = ((unsigned int)g_ptm_priv_data.itf[i].tx_desc >> 2) & 0x0FFFFFFF;
1405 *WTX_DMA_CHANNEL_CONFIG(i) = tx_config;
1406
1407 *WTX_PORT_CONFIG(i) = tx_port_cfg;
1408 }
1409 }
1410
1411
1412
1413 /*
1414 * ####################################
1415 * Global Function
1416 * ####################################
1417 */
1418
1419 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
1420 {
1421 int i;
1422
1423 g_showtime = 1;
1424
1425 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1426 netif_carrier_on(g_net_dev[i]);
1427
1428 printk("enter showtime\n");
1429
1430 return 0;
1431 }
1432
1433 static int ptm_showtime_exit(void)
1434 {
1435 int i;
1436
1437 if ( !g_showtime )
1438 return -1;
1439
1440 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1441 netif_carrier_off(g_net_dev[i]);
1442
1443 g_showtime = 0;
1444
1445 printk("leave showtime\n");
1446
1447 return 0;
1448 }
1449
1450
1451
1452 /*
1453 * ####################################
1454 * Init/Cleanup API
1455 * ####################################
1456 */
1457
1458 /*
1459 * Description:
1460 * Initialize global variables, PP32, comunication structures, register IRQ
1461 * and register device.
1462 * Input:
1463 * none
1464 * Output:
1465 * 0 --- successful
1466 * else --- failure, usually it is negative value of error code
1467 */
1468 static int ifx_ptm_init(void)
1469 {
1470 int ret;
1471 struct port_cell_info port_cell = {0};
1472 void *xdata_addr = NULL;
1473 int i;
1474 char ver_str[256];
1475
1476 check_parameters();
1477
1478 ret = init_priv_data();
1479 if ( ret != 0 ) {
1480 err("INIT_PRIV_DATA_FAIL");
1481 goto INIT_PRIV_DATA_FAIL;
1482 }
1483
1484 ifx_ptm_init_chip();
1485 init_tables();
1486
1487 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1488 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
1489 if ( g_net_dev[i] == NULL )
1490 goto ALLOC_NETDEV_FAIL;
1491 ptm_setup(g_net_dev[i], i);
1492 }
1493
1494 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1495 ret = register_netdev(g_net_dev[i]);
1496 if ( ret != 0 )
1497 goto REGISTER_NETDEV_FAIL;
1498 }
1499
1500 /* register interrupt handler */
1501 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
1502 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
1503 #else
1504 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_mailbox_isr", &g_ptm_priv_data);
1505 #endif
1506 if ( ret ) {
1507 if ( ret == -EBUSY ) {
1508 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1509 }
1510 else {
1511 err("request_irq fail");
1512 }
1513 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1514 }
1515 disable_irq(PPE_MAILBOX_IGU1_INT);
1516
1517 ret = ifx_pp32_start(0);
1518 if ( ret ) {
1519 err("ifx_pp32_start fail!");
1520 goto PP32_START_FAIL;
1521 }
1522 IFX_REG_W32(0, MBOX_IGU1_IER);
1523 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1524
1525 enable_irq(PPE_MAILBOX_IGU1_INT);
1526
1527
1528 proc_file_create();
1529
1530 port_cell.port_num = 1;
1531 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &xdata_addr);
1532 if ( g_showtime ) {
1533 ptm_showtime_enter(&port_cell, &xdata_addr);
1534 }
1535
1536 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1537 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1538
1539 ifx_ptm_version(ver_str);
1540 printk(KERN_INFO "%s", ver_str);
1541
1542 printk("ifxmips_ptm: PTM init succeed\n");
1543
1544 return 0;
1545
1546 PP32_START_FAIL:
1547 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1548 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1549 i = ARRAY_SIZE(g_net_dev);
1550 REGISTER_NETDEV_FAIL:
1551 while ( i-- )
1552 unregister_netdev(g_net_dev[i]);
1553 i = ARRAY_SIZE(g_net_dev);
1554 ALLOC_NETDEV_FAIL:
1555 while ( i-- ) {
1556 free_netdev(g_net_dev[i]);
1557 g_net_dev[i] = NULL;
1558 }
1559 INIT_PRIV_DATA_FAIL:
1560 clear_priv_data();
1561 printk("ifxmips_ptm: PTM init failed\n");
1562 return ret;
1563 }
1564
1565 /*
1566 * Description:
1567 * Release memory, free IRQ, and deregister device.
1568 * Input:
1569 * none
1570 * Output:
1571 * none
1572 */
1573 static void __exit ifx_ptm_exit(void)
1574 {
1575 int i;
1576
1577 ifx_mei_atm_showtime_enter = NULL;
1578 ifx_mei_atm_showtime_exit = NULL;
1579
1580 proc_file_delete();
1581
1582
1583 ifx_pp32_stop(0);
1584
1585 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1586
1587 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1588 unregister_netdev(g_net_dev[i]);
1589
1590 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1591 free_netdev(g_net_dev[i]);
1592 g_net_dev[i] = NULL;
1593 }
1594
1595 ifx_ptm_uninit_chip();
1596
1597 clear_priv_data();
1598 }
1599
1600 module_init(ifx_ptm_init);
1601 module_exit(ifx_ptm_exit);