ltq-atm/ltq-ptm: re-enable/fix reset_ppe() functionality for VR9
[openwrt/openwrt.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_vdsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_vdsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for VR9)
10 ** COPYRIGHT : Copyright (c) 2006
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 **
19 ** HISTORY
20 ** $Date $Author $Comment
21 ** 07 JUL 2009 Xu Liang Init Version
22 *******************************************************************************/
23
24 #include <linux/version.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/ctype.h>
29 #include <linux/errno.h>
30 #include <linux/proc_fs.h>
31 #include <linux/init.h>
32 #include <linux/ioctl.h>
33 #include <linux/etherdevice.h>
34 #include <linux/interrupt.h>
35 #include <linux/netdevice.h>
36 #include <linux/platform_device.h>
37 #include <linux/of_device.h>
38
39 #include "ifxmips_ptm_vdsl.h"
40 #include <lantiq_soc.h>
41
42 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
43 #define MODULE_PARM(a, b) module_param(a, int, 0)
44
45 static int wanqos_en = 0;
46 static int queue_gamma_map[4] = {0xFE, 0x01, 0x00, 0x00};
47
48 MODULE_PARM(wanqos_en, "i");
49 MODULE_PARM_DESC(wanqos_en, "WAN QoS support, 1 - enabled, 0 - disabled.");
50
51 MODULE_PARM_ARRAY(queue_gamma_map, "4-4i");
52 MODULE_PARM_DESC(queue_gamma_map, "TX QoS queues mapping to 4 TX Gamma interfaces.");
53
54 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
55 extern int (*ifx_mei_atm_showtime_exit)(void);
56 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
57
58 static int g_showtime = 0;
59 static void *g_xdata_addr = NULL;
60
61
62 #define ENABLE_TMP_DBG 0
63
64 unsigned long cgu_get_pp32_clock(void)
65 {
66 struct clk *c = clk_get_ppe();
67 unsigned long rate = clk_get_rate(c);
68 clk_put(c);
69 return rate;
70 }
71
72 static void ptm_setup(struct net_device *, int);
73 static struct net_device_stats *ptm_get_stats(struct net_device *);
74 static int ptm_open(struct net_device *);
75 static int ptm_stop(struct net_device *);
76 static unsigned int ptm_poll(int, unsigned int);
77 static int ptm_napi_poll(struct napi_struct *, int);
78 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
80 static int ptm_change_mtu(struct net_device *, int);
81 #endif
82 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
83 static void ptm_tx_timeout(struct net_device *);
84
85 static inline struct sk_buff* alloc_skb_rx(void);
86 static inline struct sk_buff* alloc_skb_tx(unsigned int);
87 static inline struct sk_buff *get_skb_pointer(unsigned int);
88 static inline int get_tx_desc(unsigned int, unsigned int *);
89
90 /*
91 * Mailbox handler and signal function
92 */
93 static irqreturn_t mailbox_irq_handler(int, void *);
94
95 /*
96 * Tasklet to Handle Swap Descriptors
97 */
98 static void do_swap_desc_tasklet(unsigned long);
99
100
101 /*
102 * Init & clean-up functions
103 */
104 static inline int init_priv_data(void);
105 static inline void clear_priv_data(void);
106 static inline int init_tables(void);
107 static inline void clear_tables(void);
108
109 static int g_wanqos_en = 0;
110
111 static int g_queue_gamma_map[4];
112
113 static struct ptm_priv_data g_ptm_priv_data;
114
115 static struct net_device_ops g_ptm_netdev_ops = {
116 .ndo_get_stats = ptm_get_stats,
117 .ndo_open = ptm_open,
118 .ndo_stop = ptm_stop,
119 .ndo_start_xmit = ptm_hard_start_xmit,
120 .ndo_validate_addr = eth_validate_addr,
121 .ndo_set_mac_address = eth_mac_addr,
122 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
123 .ndo_change_mtu = ptm_change_mtu,
124 #endif
125 .ndo_do_ioctl = ptm_ioctl,
126 .ndo_tx_timeout = ptm_tx_timeout,
127 };
128
129 static struct net_device *g_net_dev[1] = {0};
130 static char *g_net_dev_name[1] = {"dsl0"};
131
132 static int g_ptm_prio_queue_map[8];
133
134 static DECLARE_TASKLET(g_swap_desc_tasklet, do_swap_desc_tasklet, 0);
135
136
137 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
138
139 /*
140 * ####################################
141 * Local Function
142 * ####################################
143 */
144
145 static void ptm_setup(struct net_device *dev, int ndev)
146 {
147 netif_carrier_off(dev);
148
149 dev->netdev_ops = &g_ptm_netdev_ops;
150 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
151 /* Allow up to 1508 bytes, for RFC4638 */
152 dev->max_mtu = ETH_DATA_LEN + 8;
153 #endif
154 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 16);
155 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
156
157 dev->dev_addr[0] = 0x00;
158 dev->dev_addr[1] = 0x20;
159 dev->dev_addr[2] = 0xda;
160 dev->dev_addr[3] = 0x86;
161 dev->dev_addr[4] = 0x23;
162 dev->dev_addr[5] = 0x75 + ndev;
163 }
164
165 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
166 {
167 struct net_device_stats *s;
168
169 if ( dev != g_net_dev[0] )
170 return NULL;
171 s = &g_ptm_priv_data.itf[0].stats;
172
173 return s;
174 }
175
176 static int ptm_open(struct net_device *dev)
177 {
178 ASSERT(dev == g_net_dev[0], "incorrect device");
179
180 napi_enable(&g_ptm_priv_data.itf[0].napi);
181
182 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
183
184 netif_start_queue(dev);
185
186 return 0;
187 }
188
189 static int ptm_stop(struct net_device *dev)
190 {
191 ASSERT(dev == g_net_dev[0], "incorrect device");
192
193 IFX_REG_W32_MASK(1 | (1 << 17), 0, MBOX_IGU1_IER);
194
195 napi_disable(&g_ptm_priv_data.itf[0].napi);
196
197 netif_stop_queue(dev);
198
199 return 0;
200 }
201
202 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
203 {
204 unsigned int work_done = 0;
205 volatile struct rx_descriptor *desc;
206 struct rx_descriptor reg_desc;
207 struct sk_buff *skb, *new_skb;
208
209 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
210
211 while ( work_done < work_to_do ) {
212 desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
213 if ( desc->own /* || !desc->c */ ) // if PP32 hold descriptor or descriptor not completed
214 break;
215 if ( ++g_ptm_priv_data.itf[0].rx_desc_pos == WAN_RX_DESC_NUM )
216 g_ptm_priv_data.itf[0].rx_desc_pos = 0;
217
218 reg_desc = *desc;
219 skb = get_skb_pointer(reg_desc.dataptr);
220 ASSERT(skb != NULL, "invalid pointer skb == NULL");
221
222 new_skb = alloc_skb_rx();
223 if ( new_skb != NULL ) {
224 skb_reserve(skb, reg_desc.byteoff);
225 skb_put(skb, reg_desc.datalen);
226
227 // parse protocol header
228 skb->dev = g_net_dev[0];
229 skb->protocol = eth_type_trans(skb, skb->dev);
230
231 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
232 g_net_dev[0]->last_rx = jiffies;
233 #endif
234
235 netif_receive_skb(skb);
236
237 g_ptm_priv_data.itf[0].stats.rx_packets++;
238 g_ptm_priv_data.itf[0].stats.rx_bytes += reg_desc.datalen;
239
240 reg_desc.dataptr = (unsigned int)new_skb->data & 0x0FFFFFFF;
241 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
242 }
243
244 reg_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
245 reg_desc.own = 1;
246 reg_desc.c = 0;
247
248 /* write discriptor to memory */
249 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
250 wmb();
251 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
252
253 work_done++;
254 }
255
256 return work_done;
257 }
258
259 static int ptm_napi_poll(struct napi_struct *napi, int budget)
260 {
261 int ndev = 0;
262 unsigned int work_done;
263
264 work_done = ptm_poll(ndev, budget);
265
266 // interface down
267 if ( !netif_running(napi->dev) ) {
268 napi_complete(napi);
269 return work_done;
270 }
271
272 // clear interrupt
273 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_ISRC);
274 // no more traffic
275 if (work_done < budget) {
276 napi_complete(napi);
277 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
278 return work_done;
279 }
280
281 // next round
282 return work_done;
283 }
284
285 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
286 {
287 unsigned int f_full;
288 int desc_base;
289 volatile struct tx_descriptor *desc;
290 struct tx_descriptor reg_desc = {0};
291 struct sk_buff *skb_to_free;
292 unsigned int byteoff;
293
294 ASSERT(dev == g_net_dev[0], "incorrect device");
295
296 if ( !g_showtime ) {
297 err("not in showtime");
298 goto PTM_HARD_START_XMIT_FAIL;
299 }
300
301 /* allocate descriptor */
302 desc_base = get_tx_desc(0, &f_full);
303 if ( f_full ) {
304 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
305 netif_trans_update(dev);
306 #else
307 dev->trans_start = jiffies;
308 #endif
309 netif_stop_queue(dev);
310
311 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_ISRC);
312 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_IER);
313 }
314 if ( desc_base < 0 )
315 goto PTM_HARD_START_XMIT_FAIL;
316 desc = &CPU_TO_WAN_TX_DESC_BASE[desc_base];
317
318 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
319 if ( skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff || skb_cloned(skb) ) {
320 struct sk_buff *new_skb;
321
322 ASSERT(skb_headroom(skb) >= sizeof(struct sk_buff *) + byteoff, "skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff");
323 ASSERT(!skb_cloned(skb), "skb is cloned");
324
325 new_skb = alloc_skb_tx(skb->len);
326 if ( new_skb == NULL ) {
327 dbg("no memory");
328 goto ALLOC_SKB_TX_FAIL;
329 }
330 skb_put(new_skb, skb->len);
331 memcpy(new_skb->data, skb->data, skb->len);
332 dev_kfree_skb_any(skb);
333 skb = new_skb;
334 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
335 /* write back to physical memory */
336 dma_cache_wback((unsigned long)skb->data, skb->len);
337 }
338
339 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
340 /* write back to physical memory */
341 dma_cache_wback((unsigned long)skb->data - byteoff - sizeof(struct sk_buff *), skb->len + byteoff + sizeof(struct sk_buff *));
342
343 /* free previous skb */
344 skb_to_free = get_skb_pointer(desc->dataptr);
345 if ( skb_to_free != NULL )
346 dev_kfree_skb_any(skb_to_free);
347
348 /* update descriptor */
349 reg_desc.small = 0;
350 reg_desc.dataptr = (unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1));
351 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
352 reg_desc.qid = g_ptm_prio_queue_map[skb->priority > 7 ? 7 : skb->priority];
353 reg_desc.byteoff = byteoff;
354 reg_desc.own = 1;
355 reg_desc.c = 1;
356 reg_desc.sop = reg_desc.eop = 1;
357
358 /* update MIB */
359 g_ptm_priv_data.itf[0].stats.tx_packets++;
360 g_ptm_priv_data.itf[0].stats.tx_bytes += reg_desc.datalen;
361
362 /* write discriptor to memory */
363 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
364 wmb();
365 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
366
367 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
368 netif_trans_update(dev);
369 #else
370 dev->trans_start = jiffies;
371 #endif
372
373 return 0;
374
375 ALLOC_SKB_TX_FAIL:
376 PTM_HARD_START_XMIT_FAIL:
377 dev_kfree_skb_any(skb);
378 g_ptm_priv_data.itf[0].stats.tx_dropped++;
379 return 0;
380 }
381
382 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
383 static int ptm_change_mtu(struct net_device *dev, int mtu)
384 {
385 /* Allow up to 1508 bytes, for RFC4638 */
386 if (mtu < 68 || mtu > ETH_DATA_LEN + 8)
387 return -EINVAL;
388 dev->mtu = mtu;
389 return 0;
390 }
391 #endif
392
393 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
394 {
395 ASSERT(dev == g_net_dev[0], "incorrect device");
396
397 switch ( cmd )
398 {
399 case IFX_PTM_MIB_CW_GET:
400 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = IFX_REG_R32(DREG_AR_CELL0) + IFX_REG_R32(DREG_AR_CELL1);
401 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = IFX_REG_R32(DREG_AR_IDLE_CNT0) + IFX_REG_R32(DREG_AR_IDLE_CNT1);
402 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = IFX_REG_R32(DREG_AR_CVN_CNT0) + IFX_REG_R32(DREG_AR_CVN_CNT1) + IFX_REG_R32(DREG_AR_CVNP_CNT0) + IFX_REG_R32(DREG_AR_CVNP_CNT1);
403 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = IFX_REG_R32(DREG_AT_CELL0) + IFX_REG_R32(DREG_AT_CELL1);
404 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = IFX_REG_R32(DREG_AT_IDLE_CNT0) + IFX_REG_R32(DREG_AT_IDLE_CNT1);
405 break;
406 case IFX_PTM_MIB_FRAME_GET:
407 {
408 PTM_FRAME_MIB_T data = {0};
409 int i;
410
411 data.RxCorrect = IFX_REG_R32(DREG_AR_HEC_CNT0) + IFX_REG_R32(DREG_AR_HEC_CNT1) + IFX_REG_R32(DREG_AR_AIIDLE_CNT0) + IFX_REG_R32(DREG_AR_AIIDLE_CNT1);
412 for ( i = 0; i < 4; i++ )
413 data.RxDropped += WAN_RX_MIB_TABLE(i)->wrx_dropdes_pdu;
414 for ( i = 0; i < 8; i++ )
415 data.TxSend += WAN_TX_MIB_TABLE(i)->wtx_total_pdu;
416
417 *((PTM_FRAME_MIB_T *)ifr->ifr_data) = data;
418 }
419 break;
420 case IFX_PTM_CFG_GET:
421 // use bear channel 0 preemption gamma interface settings
422 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = 1;
423 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_eth_fcs_ver_dis == 0 ? 1 : 0;
424 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis == 0 ? 1 : 0;;
425 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size == 0 ? 0 : (RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size * 16);
426 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis == 0 ? 1 : 0;
427 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : 1;
428 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : (TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size * 16);
429 break;
430 case IFX_PTM_CFG_SET:
431 {
432 int i;
433
434 for ( i = 0; i < 4; i++ ) {
435 RX_GAMMA_ITF_CFG(i)->rx_eth_fcs_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 0 : 1;
436
437 RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck ? 0 : 1;
438
439 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen ) {
440 case 16: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 1; break;
441 case 32: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 2; break;
442 default: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 0;
443 }
444
445 TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 0 : 1;
446
447 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen ) {
448 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen ) {
449 case 16: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 1; break;
450 case 32: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 2; break;
451 default: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
452 }
453 }
454 else
455 TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
456 }
457 }
458 break;
459 case IFX_PTM_MAP_PKT_PRIO_TO_Q:
460 {
461 struct ppe_prio_q_map cmd;
462
463 if ( copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd)) )
464 return -EFAULT;
465
466 if ( cmd.pkt_prio < 0 || cmd.pkt_prio >= ARRAY_SIZE(g_ptm_prio_queue_map) )
467 return -EINVAL;
468
469 if ( cmd.qid < 0 || cmd.qid >= g_wanqos_en )
470 return -EINVAL;
471
472 g_ptm_prio_queue_map[cmd.pkt_prio] = cmd.qid;
473 }
474 break;
475 default:
476 return -EOPNOTSUPP;
477 }
478
479 return 0;
480 }
481
482 static void ptm_tx_timeout(struct net_device *dev)
483 {
484 ASSERT(dev == g_net_dev[0], "incorrect device");
485
486 /* disable TX irq, release skb when sending new packet */
487 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
488
489 /* wake up TX queue */
490 netif_wake_queue(dev);
491
492 return;
493 }
494
495 static inline struct sk_buff* alloc_skb_rx(void)
496 {
497 struct sk_buff *skb;
498
499 /* allocate memroy including trailer and padding */
500 skb = dev_alloc_skb(RX_MAX_BUFFER_SIZE + DATA_BUFFER_ALIGNMENT);
501 if ( skb != NULL ) {
502 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
503 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
504 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
505 /* pub skb in reserved area "skb->data - 4" */
506 *((struct sk_buff **)skb->data - 1) = skb;
507 wmb();
508 /* write back and invalidate cache */
509 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
510 /* invalidate cache */
511 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
512 }
513
514 return skb;
515 }
516
517 static inline struct sk_buff* alloc_skb_tx(unsigned int size)
518 {
519 struct sk_buff *skb;
520
521 /* allocate memory including padding */
522 size = RX_MAX_BUFFER_SIZE;
523 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
524 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
525 /* must be burst length alignment */
526 if ( skb != NULL )
527 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
528 return skb;
529 }
530
531 static inline struct sk_buff *get_skb_pointer(unsigned int dataptr)
532 {
533 unsigned int skb_dataptr;
534 struct sk_buff *skb;
535
536 // usually, CPE memory is less than 256M bytes
537 // so NULL means invalid pointer
538 if ( dataptr == 0 ) {
539 dbg("dataptr is 0, it's supposed to be invalid pointer");
540 return NULL;
541 }
542
543 skb_dataptr = (dataptr - 4) | KSEG1;
544 skb = *(struct sk_buff **)skb_dataptr;
545
546 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
547 ASSERT((((unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1))) | KSEG1) == (dataptr | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
548
549 return skb;
550 }
551
552 static inline int get_tx_desc(unsigned int itf, unsigned int *f_full)
553 {
554 int desc_base = -1;
555 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[0];
556
557 // assume TX is serial operation
558 // no protection provided
559
560 *f_full = 1;
561
562 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 ) {
563 desc_base = p_itf->tx_desc_pos;
564 if ( ++(p_itf->tx_desc_pos) == CPU_TO_WAN_TX_DESC_NUM )
565 p_itf->tx_desc_pos = 0;
566 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 )
567 *f_full = 0;
568 }
569
570 return desc_base;
571 }
572
573 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
574 {
575 unsigned int isr;
576 int i;
577
578 isr = IFX_REG_R32(MBOX_IGU1_ISR);
579 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
580 isr &= IFX_REG_R32(MBOX_IGU1_IER);
581
582 if (isr & BIT(0)) {
583 IFX_REG_W32_MASK(1, 0, MBOX_IGU1_IER);
584 napi_schedule(&g_ptm_priv_data.itf[0].napi);
585 #if defined(ENABLE_TMP_DBG) && ENABLE_TMP_DBG
586 {
587 volatile struct rx_descriptor *desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
588
589 if ( desc->own ) { // PP32 hold
590 err("invalid interrupt");
591 }
592 }
593 #endif
594 }
595 if (isr & BIT(16)) {
596 IFX_REG_W32_MASK(1 << 16, 0, MBOX_IGU1_IER);
597 tasklet_hi_schedule(&g_swap_desc_tasklet);
598 }
599 if (isr & BIT(17)) {
600 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
601 netif_wake_queue(g_net_dev[0]);
602 }
603
604 return IRQ_HANDLED;
605 }
606
607 static void do_swap_desc_tasklet(unsigned long arg)
608 {
609 int budget = 32;
610 volatile struct tx_descriptor *desc;
611 struct sk_buff *skb;
612 unsigned int byteoff;
613
614 while ( budget-- > 0 ) {
615 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) // if PP32 hold descriptor
616 break;
617
618 desc = &WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos];
619 if ( ++g_ptm_priv_data.itf[0].tx_swap_desc_pos == WAN_SWAP_DESC_NUM )
620 g_ptm_priv_data.itf[0].tx_swap_desc_pos = 0;
621
622 skb = get_skb_pointer(desc->dataptr);
623 if ( skb != NULL )
624 dev_kfree_skb_any(skb);
625
626 skb = alloc_skb_tx(RX_MAX_BUFFER_SIZE);
627 if ( skb == NULL )
628 panic("can't allocate swap buffer for PPE firmware use\n");
629 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
630 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
631
632 desc->dataptr = (unsigned int)skb->data & 0x0FFFFFFF;
633 desc->own = 1;
634 }
635
636 // clear interrupt
637 IFX_REG_W32_MASK(0, 16, MBOX_IGU1_ISRC);
638 // no more skb to be replaced
639 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) { // if PP32 hold descriptor
640 IFX_REG_W32_MASK(0, 1 << 16, MBOX_IGU1_IER);
641 return;
642 }
643
644 tasklet_hi_schedule(&g_swap_desc_tasklet);
645 return;
646 }
647
648
649 static inline int ifx_ptm_version(char *buf)
650 {
651 int len = 0;
652 unsigned int major, minor;
653
654 ifx_ptm_get_fw_ver(&major, &minor);
655
656 len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
657 len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
658
659 return len;
660 }
661
662 static inline int init_priv_data(void)
663 {
664 int i, j;
665
666 g_wanqos_en = wanqos_en ? wanqos_en : 8;
667 if ( g_wanqos_en > 8 )
668 g_wanqos_en = 8;
669
670 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ )
671 {
672 g_queue_gamma_map[i] = queue_gamma_map[i] & ((1 << g_wanqos_en) - 1);
673 for ( j = 0; j < i; j++ )
674 g_queue_gamma_map[i] &= ~g_queue_gamma_map[j];
675 }
676
677 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
678
679 {
680 int max_packet_priority = ARRAY_SIZE(g_ptm_prio_queue_map);
681 int tx_num_q;
682 int q_step, q_accum, p_step;
683
684 tx_num_q = __ETH_WAN_TX_QUEUE_NUM;
685 q_step = tx_num_q - 1;
686 p_step = max_packet_priority - 1;
687 for ( j = 0, q_accum = 0; j < max_packet_priority; j++, q_accum += q_step )
688 g_ptm_prio_queue_map[j] = q_step - (q_accum + (p_step >> 1)) / p_step;
689 }
690
691 return 0;
692 }
693
694 static inline void clear_priv_data(void)
695 {
696 }
697
698 static inline int init_tables(void)
699 {
700 struct sk_buff *skb_pool[WAN_RX_DESC_NUM] = {0};
701 struct cfg_std_data_len cfg_std_data_len = {0};
702 struct tx_qos_cfg tx_qos_cfg = {0};
703 struct psave_cfg psave_cfg = {0};
704 struct eg_bwctrl_cfg eg_bwctrl_cfg = {0};
705 struct test_mode test_mode = {0};
706 struct rx_bc_cfg rx_bc_cfg = {0};
707 struct tx_bc_cfg tx_bc_cfg = {0};
708 struct gpio_mode gpio_mode = {0};
709 struct gpio_wm_cfg gpio_wm_cfg = {0};
710 struct rx_gamma_itf_cfg rx_gamma_itf_cfg = {0};
711 struct tx_gamma_itf_cfg tx_gamma_itf_cfg = {0};
712 struct wtx_qos_q_desc_cfg wtx_qos_q_desc_cfg = {0};
713 struct rx_descriptor rx_desc = {0};
714 struct tx_descriptor tx_desc = {0};
715 int i;
716
717 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
718 skb_pool[i] = alloc_skb_rx();
719 if ( skb_pool[i] == NULL )
720 goto ALLOC_SKB_RX_FAIL;
721 }
722
723 cfg_std_data_len.byte_off = RX_HEAD_MAC_ADDR_ALIGNMENT; // this field replaces byte_off in rx descriptor of VDSL ingress
724 cfg_std_data_len.data_len = 1600;
725 *CFG_STD_DATA_LEN = cfg_std_data_len;
726
727 tx_qos_cfg.time_tick = cgu_get_pp32_clock() / 62500; // 16 * (cgu_get_pp32_clock() / 1000000)
728 tx_qos_cfg.overhd_bytes = 0;
729 tx_qos_cfg.eth1_eg_qnum = __ETH_WAN_TX_QUEUE_NUM;
730 tx_qos_cfg.eth1_burst_chk = 1;
731 tx_qos_cfg.eth1_qss = 0;
732 tx_qos_cfg.shape_en = 0; // disable
733 tx_qos_cfg.wfq_en = 0; // strict priority
734 *TX_QOS_CFG = tx_qos_cfg;
735
736 psave_cfg.start_state = 0;
737 psave_cfg.sleep_en = 1; // enable sleep mode
738 *PSAVE_CFG = psave_cfg;
739
740 eg_bwctrl_cfg.fdesc_wm = 16;
741 eg_bwctrl_cfg.class_len = 128;
742 *EG_BWCTRL_CFG = eg_bwctrl_cfg;
743
744 //*GPIO_ADDR = (unsigned int)IFX_GPIO_P0_OUT;
745 *GPIO_ADDR = (unsigned int)0x00000000; // disabled by default
746
747 gpio_mode.gpio_bit_bc1 = 2;
748 gpio_mode.gpio_bit_bc0 = 1;
749 gpio_mode.gpio_bc1_en = 0;
750 gpio_mode.gpio_bc0_en = 0;
751 *GPIO_MODE = gpio_mode;
752
753 gpio_wm_cfg.stop_wm_bc1 = 2;
754 gpio_wm_cfg.start_wm_bc1 = 4;
755 gpio_wm_cfg.stop_wm_bc0 = 2;
756 gpio_wm_cfg.start_wm_bc0 = 4;
757 *GPIO_WM_CFG = gpio_wm_cfg;
758
759 test_mode.mib_clear_mode = 0;
760 test_mode.test_mode = 0;
761 *TEST_MODE = test_mode;
762
763 rx_bc_cfg.local_state = 0;
764 rx_bc_cfg.remote_state = 0;
765 rx_bc_cfg.to_false_th = 7;
766 rx_bc_cfg.to_looking_th = 3;
767 *RX_BC_CFG(0) = rx_bc_cfg;
768 *RX_BC_CFG(1) = rx_bc_cfg;
769
770 tx_bc_cfg.fill_wm = 2;
771 tx_bc_cfg.uflw_wm = 2;
772 *TX_BC_CFG(0) = tx_bc_cfg;
773 *TX_BC_CFG(1) = tx_bc_cfg;
774
775 rx_gamma_itf_cfg.receive_state = 0;
776 rx_gamma_itf_cfg.rx_min_len = 60;
777 rx_gamma_itf_cfg.rx_pad_en = 1;
778 rx_gamma_itf_cfg.rx_eth_fcs_ver_dis = 0;
779 rx_gamma_itf_cfg.rx_rm_eth_fcs = 1;
780 rx_gamma_itf_cfg.rx_tc_crc_ver_dis = 0;
781 rx_gamma_itf_cfg.rx_tc_crc_size = 1;
782 rx_gamma_itf_cfg.rx_eth_fcs_result = 0xC704DD7B;
783 rx_gamma_itf_cfg.rx_tc_crc_result = 0x1D0F1D0F;
784 rx_gamma_itf_cfg.rx_crc_cfg = 0x2500;
785 rx_gamma_itf_cfg.rx_eth_fcs_init_value = 0xFFFFFFFF;
786 rx_gamma_itf_cfg.rx_tc_crc_init_value = 0x0000FFFF;
787 rx_gamma_itf_cfg.rx_max_len_sel = 0;
788 rx_gamma_itf_cfg.rx_edit_num2 = 0;
789 rx_gamma_itf_cfg.rx_edit_pos2 = 0;
790 rx_gamma_itf_cfg.rx_edit_type2 = 0;
791 rx_gamma_itf_cfg.rx_edit_en2 = 0;
792 rx_gamma_itf_cfg.rx_edit_num1 = 0;
793 rx_gamma_itf_cfg.rx_edit_pos1 = 0;
794 rx_gamma_itf_cfg.rx_edit_type1 = 0;
795 rx_gamma_itf_cfg.rx_edit_en1 = 0;
796 rx_gamma_itf_cfg.rx_inserted_bytes_1l = 0;
797 rx_gamma_itf_cfg.rx_inserted_bytes_1h = 0;
798 rx_gamma_itf_cfg.rx_inserted_bytes_2l = 0;
799 rx_gamma_itf_cfg.rx_inserted_bytes_2h = 0;
800 rx_gamma_itf_cfg.rx_len_adj = -6;
801 for ( i = 0; i < 4; i++ )
802 *RX_GAMMA_ITF_CFG(i) = rx_gamma_itf_cfg;
803
804 tx_gamma_itf_cfg.tx_len_adj = 6;
805 tx_gamma_itf_cfg.tx_crc_off_adj = 6;
806 tx_gamma_itf_cfg.tx_min_len = 0;
807 tx_gamma_itf_cfg.tx_eth_fcs_gen_dis = 0;
808 tx_gamma_itf_cfg.tx_tc_crc_size = 1;
809 tx_gamma_itf_cfg.tx_crc_cfg = 0x2F00;
810 tx_gamma_itf_cfg.tx_eth_fcs_init_value = 0xFFFFFFFF;
811 tx_gamma_itf_cfg.tx_tc_crc_init_value = 0x0000FFFF;
812 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ ) {
813 tx_gamma_itf_cfg.queue_mapping = g_queue_gamma_map[i];
814 *TX_GAMMA_ITF_CFG(i) = tx_gamma_itf_cfg;
815 }
816
817 for ( i = 0; i < __ETH_WAN_TX_QUEUE_NUM; i++ ) {
818 wtx_qos_q_desc_cfg.length = WAN_TX_DESC_NUM;
819 wtx_qos_q_desc_cfg.addr = __ETH_WAN_TX_DESC_BASE(i);
820 *WTX_QOS_Q_DESC_CFG(i) = wtx_qos_q_desc_cfg;
821 }
822
823 // default TX queue QoS config is all ZERO
824
825 // TX Ctrl K Table
826 IFX_REG_W32(0x90111293, TX_CTRL_K_TABLE(0));
827 IFX_REG_W32(0x14959617, TX_CTRL_K_TABLE(1));
828 IFX_REG_W32(0x18999A1B, TX_CTRL_K_TABLE(2));
829 IFX_REG_W32(0x9C1D1E9F, TX_CTRL_K_TABLE(3));
830 IFX_REG_W32(0xA02122A3, TX_CTRL_K_TABLE(4));
831 IFX_REG_W32(0x24A5A627, TX_CTRL_K_TABLE(5));
832 IFX_REG_W32(0x28A9AA2B, TX_CTRL_K_TABLE(6));
833 IFX_REG_W32(0xAC2D2EAF, TX_CTRL_K_TABLE(7));
834 IFX_REG_W32(0x30B1B233, TX_CTRL_K_TABLE(8));
835 IFX_REG_W32(0xB43536B7, TX_CTRL_K_TABLE(9));
836 IFX_REG_W32(0xB8393ABB, TX_CTRL_K_TABLE(10));
837 IFX_REG_W32(0x3CBDBE3F, TX_CTRL_K_TABLE(11));
838 IFX_REG_W32(0xC04142C3, TX_CTRL_K_TABLE(12));
839 IFX_REG_W32(0x44C5C647, TX_CTRL_K_TABLE(13));
840 IFX_REG_W32(0x48C9CA4B, TX_CTRL_K_TABLE(14));
841 IFX_REG_W32(0xCC4D4ECF, TX_CTRL_K_TABLE(15));
842
843 // init RX descriptor
844 rx_desc.own = 1;
845 rx_desc.c = 0;
846 rx_desc.sop = 1;
847 rx_desc.eop = 1;
848 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
849 rx_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
850 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
851 rx_desc.dataptr = (unsigned int)skb_pool[i]->data & 0x0FFFFFFF;
852 WAN_RX_DESC_BASE[i] = rx_desc;
853 }
854
855 // init TX descriptor
856 tx_desc.own = 0;
857 tx_desc.c = 0;
858 tx_desc.sop = 1;
859 tx_desc.eop = 1;
860 tx_desc.byteoff = 0;
861 tx_desc.qid = 0;
862 tx_desc.datalen = 0;
863 tx_desc.small = 0;
864 tx_desc.dataptr = 0;
865 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ )
866 CPU_TO_WAN_TX_DESC_BASE[i] = tx_desc;
867 for ( i = 0; i < WAN_TX_DESC_NUM_TOTAL; i++ )
868 WAN_TX_DESC_BASE(0)[i] = tx_desc;
869
870 // init Swap descriptor
871 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ )
872 WAN_SWAP_DESC_BASE[i] = tx_desc;
873
874 // init fastpath TX descriptor
875 tx_desc.own = 1;
876 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ )
877 FASTPATH_TO_WAN_TX_DESC_BASE[i] = tx_desc;
878
879 return 0;
880
881 ALLOC_SKB_RX_FAIL:
882 while ( i-- > 0 )
883 dev_kfree_skb_any(skb_pool[i]);
884 return -1;
885 }
886
887 static inline void clear_tables(void)
888 {
889 struct sk_buff *skb;
890 int i, j;
891
892 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
893 skb = get_skb_pointer(WAN_RX_DESC_BASE[i].dataptr);
894 if ( skb != NULL )
895 dev_kfree_skb_any(skb);
896 }
897
898 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ ) {
899 skb = get_skb_pointer(CPU_TO_WAN_TX_DESC_BASE[i].dataptr);
900 if ( skb != NULL )
901 dev_kfree_skb_any(skb);
902 }
903
904 for ( j = 0; j < 8; j++ )
905 for ( i = 0; i < WAN_TX_DESC_NUM; i++ ) {
906 skb = get_skb_pointer(WAN_TX_DESC_BASE(j)[i].dataptr);
907 if ( skb != NULL )
908 dev_kfree_skb_any(skb);
909 }
910
911 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ ) {
912 skb = get_skb_pointer(WAN_SWAP_DESC_BASE[i].dataptr);
913 if ( skb != NULL )
914 dev_kfree_skb_any(skb);
915 }
916
917 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ ) {
918 skb = get_skb_pointer(FASTPATH_TO_WAN_TX_DESC_BASE[i].dataptr);
919 if ( skb != NULL )
920 dev_kfree_skb_any(skb);
921 }
922 }
923
924 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
925 {
926 int i;
927
928 ASSERT(port_cell != NULL, "port_cell is NULL");
929 ASSERT(xdata_addr != NULL, "xdata_addr is NULL");
930
931 // TODO: ReTX set xdata_addr
932 g_xdata_addr = xdata_addr;
933
934 g_showtime = 1;
935
936 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
937 netif_carrier_on(g_net_dev[i]);
938
939 IFX_REG_W32(0x0F, UTP_CFG);
940
941 //#ifdef CONFIG_VR9
942 // IFX_REG_W32_MASK(1 << 17, 0, FFSM_CFG0);
943 //#endif
944
945 printk("enter showtime\n");
946
947 return 0;
948 }
949
950 static int ptm_showtime_exit(void)
951 {
952 int i;
953
954 if ( !g_showtime )
955 return -1;
956
957 //#ifdef CONFIG_VR9
958 // IFX_REG_W32_MASK(0, 1 << 17, FFSM_CFG0);
959 //#endif
960
961 IFX_REG_W32(0x00, UTP_CFG);
962
963 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
964 netif_carrier_off(g_net_dev[i]);
965
966 g_showtime = 0;
967
968 // TODO: ReTX clean state
969 g_xdata_addr = NULL;
970
971 printk("leave showtime\n");
972
973 return 0;
974 }
975
976 static const struct of_device_id ltq_ptm_match[] = {
977 #ifdef CONFIG_DANUBE
978 { .compatible = "lantiq,ppe-danube", .data = NULL },
979 #elif defined CONFIG_AMAZON_SE
980 { .compatible = "lantiq,ppe-ase", .data = NULL },
981 #elif defined CONFIG_AR9
982 { .compatible = "lantiq,ppe-arx100", .data = NULL },
983 #elif defined CONFIG_VR9
984 { .compatible = "lantiq,ppe-xrx200", .data = NULL },
985 #endif
986 {},
987 };
988 MODULE_DEVICE_TABLE(of, ltq_ptm_match);
989
990 static int ltq_ptm_probe(struct platform_device *pdev)
991 {
992 int ret;
993 int i;
994 char ver_str[128];
995 struct port_cell_info port_cell = {0};
996
997 ret = init_priv_data();
998 if ( ret != 0 ) {
999 err("INIT_PRIV_DATA_FAIL");
1000 goto INIT_PRIV_DATA_FAIL;
1001 }
1002
1003 ifx_ptm_init_chip(pdev);
1004 ret = init_tables();
1005 if ( ret != 0 ) {
1006 err("INIT_TABLES_FAIL");
1007 goto INIT_TABLES_FAIL;
1008 }
1009
1010 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1011 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
1012 if ( g_net_dev[i] == NULL )
1013 goto ALLOC_NETDEV_FAIL;
1014 ptm_setup(g_net_dev[i], i);
1015 }
1016
1017 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1018 ret = register_netdev(g_net_dev[i]);
1019 if ( ret != 0 )
1020 goto REGISTER_NETDEV_FAIL;
1021 }
1022
1023 /* register interrupt handler */
1024 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
1025 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
1026 #else
1027 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_mailbox_isr", &g_ptm_priv_data);
1028 #endif
1029 if ( ret ) {
1030 if ( ret == -EBUSY ) {
1031 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1032 }
1033 else {
1034 err("request_irq fail");
1035 }
1036 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1037 }
1038 disable_irq(PPE_MAILBOX_IGU1_INT);
1039
1040 ret = ifx_pp32_start(0);
1041 if ( ret ) {
1042 err("ifx_pp32_start fail!");
1043 goto PP32_START_FAIL;
1044 }
1045 IFX_REG_W32(1 << 16, MBOX_IGU1_IER); // enable SWAP interrupt
1046 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1047
1048 enable_irq(PPE_MAILBOX_IGU1_INT);
1049
1050 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &g_xdata_addr);
1051 if ( g_showtime ) {
1052 ptm_showtime_enter(&port_cell, &g_xdata_addr);
1053 }
1054
1055 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1056 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1057
1058 ifx_ptm_version(ver_str);
1059 printk(KERN_INFO "%s", ver_str);
1060
1061 printk("ifxmips_ptm: PTM init succeed\n");
1062
1063 return 0;
1064
1065 PP32_START_FAIL:
1066 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1067 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1068 i = ARRAY_SIZE(g_net_dev);
1069 REGISTER_NETDEV_FAIL:
1070 while ( i-- )
1071 unregister_netdev(g_net_dev[i]);
1072 i = ARRAY_SIZE(g_net_dev);
1073 ALLOC_NETDEV_FAIL:
1074 while ( i-- ) {
1075 free_netdev(g_net_dev[i]);
1076 g_net_dev[i] = NULL;
1077 }
1078 INIT_TABLES_FAIL:
1079 INIT_PRIV_DATA_FAIL:
1080 clear_priv_data();
1081 printk("ifxmips_ptm: PTM init failed\n");
1082 return ret;
1083 }
1084
1085 static int ltq_ptm_remove(struct platform_device *pdev)
1086 {
1087 int i;
1088 ifx_mei_atm_showtime_enter = NULL;
1089 ifx_mei_atm_showtime_exit = NULL;
1090
1091
1092 ifx_pp32_stop(0);
1093
1094 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1095
1096 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1097 unregister_netdev(g_net_dev[i]);
1098
1099 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1100 free_netdev(g_net_dev[i]);
1101 g_net_dev[i] = NULL;
1102 }
1103
1104 clear_tables();
1105
1106 ifx_ptm_uninit_chip();
1107
1108 clear_priv_data();
1109
1110 return 0;
1111 }
1112
1113 #ifndef MODULE
1114 static int __init wanqos_en_setup(char *line)
1115 {
1116 wanqos_en = simple_strtoul(line, NULL, 0);
1117
1118 if ( wanqos_en < 1 || wanqos_en > 8 )
1119 wanqos_en = 0;
1120
1121 return 0;
1122 }
1123
1124 static int __init queue_gamma_map_setup(char *line)
1125 {
1126 char *p;
1127 int i;
1128
1129 for ( i = 0, p = line; i < ARRAY_SIZE(queue_gamma_map) && isxdigit(*p); i++ )
1130 {
1131 queue_gamma_map[i] = simple_strtoul(p, &p, 0);
1132 if ( *p == ',' || *p == ';' || *p == ':' )
1133 p++;
1134 }
1135
1136 return 0;
1137 }
1138 #endif
1139 static struct platform_driver ltq_ptm_driver = {
1140 .probe = ltq_ptm_probe,
1141 .remove = ltq_ptm_remove,
1142 .driver = {
1143 .name = "ptm",
1144 .owner = THIS_MODULE,
1145 .of_match_table = ltq_ptm_match,
1146 },
1147 };
1148
1149 module_platform_driver(ltq_ptm_driver);
1150 #ifndef MODULE
1151 __setup("wanqos_en=", wanqos_en_setup);
1152 __setup("queue_gamma_map=", queue_gamma_map_setup);
1153 #endif
1154
1155 MODULE_LICENSE("GPL");