44d805049b4880bb4d2c076cf427b6b80f6d432f
[openwrt/staging/pepe2k.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_vdsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_vdsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for VR9)
10 ** COPYRIGHT : Copyright (c) 2006
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 **
19 ** HISTORY
20 ** $Date $Author $Comment
21 ** 07 JUL 2009 Xu Liang Init Version
22 *******************************************************************************/
23
24 #include <linux/version.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/ctype.h>
29 #include <linux/errno.h>
30 #include <linux/proc_fs.h>
31 #include <linux/init.h>
32 #include <linux/ioctl.h>
33 #include <linux/etherdevice.h>
34 #include <linux/interrupt.h>
35 #include <linux/netdevice.h>
36 #include <linux/platform_device.h>
37 #include <linux/of_device.h>
38
39 #include "ifxmips_ptm_vdsl.h"
40 #include <lantiq_soc.h>
41
42 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
43 #define MODULE_PARM(a, b) module_param(a, int, 0)
44
45 static int wanqos_en = 0;
46 static int queue_gamma_map[4] = {0xFE, 0x01, 0x00, 0x00};
47
48 MODULE_PARM(wanqos_en, "i");
49 MODULE_PARM_DESC(wanqos_en, "WAN QoS support, 1 - enabled, 0 - disabled.");
50
51 MODULE_PARM_ARRAY(queue_gamma_map, "4-4i");
52 MODULE_PARM_DESC(queue_gamma_map, "TX QoS queues mapping to 4 TX Gamma interfaces.");
53
54 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
55 extern int (*ifx_mei_atm_showtime_exit)(void);
56 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
57
58 static int g_showtime = 0;
59 static void *g_xdata_addr = NULL;
60
61
62 #define ENABLE_TMP_DBG 0
63
64 unsigned long cgu_get_pp32_clock(void)
65 {
66 struct clk *c = clk_get_ppe();
67 unsigned long rate = clk_get_rate(c);
68 clk_put(c);
69 return rate;
70 }
71
72 static void ptm_setup(struct net_device *, int);
73 static struct net_device_stats *ptm_get_stats(struct net_device *);
74 static int ptm_open(struct net_device *);
75 static int ptm_stop(struct net_device *);
76 static unsigned int ptm_poll(int, unsigned int);
77 static int ptm_napi_poll(struct napi_struct *, int);
78 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
79 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
80 static void ptm_tx_timeout(struct net_device *);
81
82 static inline struct sk_buff* alloc_skb_rx(void);
83 static inline struct sk_buff* alloc_skb_tx(unsigned int);
84 static inline struct sk_buff *get_skb_pointer(unsigned int);
85 static inline int get_tx_desc(unsigned int, unsigned int *);
86
87 /*
88 * Mailbox handler and signal function
89 */
90 static irqreturn_t mailbox_irq_handler(int, void *);
91
92 /*
93 * Tasklet to Handle Swap Descriptors
94 */
95 static void do_swap_desc_tasklet(unsigned long);
96
97
98 /*
99 * Init & clean-up functions
100 */
101 static inline int init_priv_data(void);
102 static inline void clear_priv_data(void);
103 static inline int init_tables(void);
104 static inline void clear_tables(void);
105
106 static int g_wanqos_en = 0;
107
108 static int g_queue_gamma_map[4];
109
110 static struct ptm_priv_data g_ptm_priv_data;
111
112 static struct net_device_ops g_ptm_netdev_ops = {
113 .ndo_get_stats = ptm_get_stats,
114 .ndo_open = ptm_open,
115 .ndo_stop = ptm_stop,
116 .ndo_start_xmit = ptm_hard_start_xmit,
117 .ndo_validate_addr = eth_validate_addr,
118 .ndo_set_mac_address = eth_mac_addr,
119 .ndo_do_ioctl = ptm_ioctl,
120 .ndo_tx_timeout = ptm_tx_timeout,
121 };
122
123 static struct net_device *g_net_dev[1] = {0};
124 static char *g_net_dev_name[1] = {"dsl0"};
125
126 static int g_ptm_prio_queue_map[8];
127
128 static DECLARE_TASKLET(g_swap_desc_tasklet, do_swap_desc_tasklet, 0);
129
130
131 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
132
133 /*
134 * ####################################
135 * Local Function
136 * ####################################
137 */
138
139 static void ptm_setup(struct net_device *dev, int ndev)
140 {
141 netif_carrier_off(dev);
142
143 dev->netdev_ops = &g_ptm_netdev_ops;
144 /* Allow up to 1508 bytes, for RFC4638 */
145 dev->max_mtu = ETH_DATA_LEN + 8;
146 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 16);
147 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
148
149 dev->dev_addr[0] = 0x00;
150 dev->dev_addr[1] = 0x20;
151 dev->dev_addr[2] = 0xda;
152 dev->dev_addr[3] = 0x86;
153 dev->dev_addr[4] = 0x23;
154 dev->dev_addr[5] = 0x75 + ndev;
155 }
156
157 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
158 {
159 struct net_device_stats *s;
160
161 if ( dev != g_net_dev[0] )
162 return NULL;
163 s = &g_ptm_priv_data.itf[0].stats;
164
165 return s;
166 }
167
168 static int ptm_open(struct net_device *dev)
169 {
170 ASSERT(dev == g_net_dev[0], "incorrect device");
171
172 napi_enable(&g_ptm_priv_data.itf[0].napi);
173
174 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
175
176 netif_start_queue(dev);
177
178 return 0;
179 }
180
181 static int ptm_stop(struct net_device *dev)
182 {
183 ASSERT(dev == g_net_dev[0], "incorrect device");
184
185 IFX_REG_W32_MASK(1 | (1 << 17), 0, MBOX_IGU1_IER);
186
187 napi_disable(&g_ptm_priv_data.itf[0].napi);
188
189 netif_stop_queue(dev);
190
191 return 0;
192 }
193
194 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
195 {
196 unsigned int work_done = 0;
197 volatile struct rx_descriptor *desc;
198 struct rx_descriptor reg_desc;
199 struct sk_buff *skb, *new_skb;
200
201 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
202
203 while ( work_done < work_to_do ) {
204 desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
205 if ( desc->own /* || !desc->c */ ) // if PP32 hold descriptor or descriptor not completed
206 break;
207 if ( ++g_ptm_priv_data.itf[0].rx_desc_pos == WAN_RX_DESC_NUM )
208 g_ptm_priv_data.itf[0].rx_desc_pos = 0;
209
210 reg_desc = *desc;
211 skb = get_skb_pointer(reg_desc.dataptr);
212 ASSERT(skb != NULL, "invalid pointer skb == NULL");
213
214 new_skb = alloc_skb_rx();
215 if ( new_skb != NULL ) {
216 skb_reserve(skb, reg_desc.byteoff);
217 skb_put(skb, reg_desc.datalen);
218
219 // parse protocol header
220 skb->dev = g_net_dev[0];
221 skb->protocol = eth_type_trans(skb, skb->dev);
222
223 netif_receive_skb(skb);
224
225 g_ptm_priv_data.itf[0].stats.rx_packets++;
226 g_ptm_priv_data.itf[0].stats.rx_bytes += reg_desc.datalen;
227
228 reg_desc.dataptr = (unsigned int)new_skb->data & 0x0FFFFFFF;
229 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
230 }
231
232 reg_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
233 reg_desc.own = 1;
234 reg_desc.c = 0;
235
236 /* write discriptor to memory */
237 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
238 wmb();
239 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
240
241 work_done++;
242 }
243
244 return work_done;
245 }
246
247 static int ptm_napi_poll(struct napi_struct *napi, int budget)
248 {
249 int ndev = 0;
250 unsigned int work_done;
251
252 work_done = ptm_poll(ndev, budget);
253
254 // interface down
255 if ( !netif_running(napi->dev) ) {
256 napi_complete(napi);
257 return work_done;
258 }
259
260 // clear interrupt
261 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_ISRC);
262 // no more traffic
263 if (work_done < budget) {
264 napi_complete(napi);
265 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
266 return work_done;
267 }
268
269 // next round
270 return work_done;
271 }
272
273 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
274 {
275 unsigned int f_full;
276 int desc_base;
277 volatile struct tx_descriptor *desc;
278 struct tx_descriptor reg_desc = {0};
279 struct sk_buff *skb_to_free;
280 unsigned int byteoff;
281
282 ASSERT(dev == g_net_dev[0], "incorrect device");
283
284 if ( !g_showtime ) {
285 err("not in showtime");
286 goto PTM_HARD_START_XMIT_FAIL;
287 }
288
289 /* allocate descriptor */
290 desc_base = get_tx_desc(0, &f_full);
291 if ( f_full ) {
292 netif_trans_update(dev);
293 netif_stop_queue(dev);
294
295 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_ISRC);
296 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_IER);
297 }
298 if ( desc_base < 0 )
299 goto PTM_HARD_START_XMIT_FAIL;
300 desc = &CPU_TO_WAN_TX_DESC_BASE[desc_base];
301
302 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
303 if ( skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff || skb_cloned(skb) ) {
304 struct sk_buff *new_skb;
305
306 ASSERT(skb_headroom(skb) >= sizeof(struct sk_buff *) + byteoff, "skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff");
307 ASSERT(!skb_cloned(skb), "skb is cloned");
308
309 new_skb = alloc_skb_tx(skb->len);
310 if ( new_skb == NULL ) {
311 dbg("no memory");
312 goto ALLOC_SKB_TX_FAIL;
313 }
314 skb_put(new_skb, skb->len);
315 memcpy(new_skb->data, skb->data, skb->len);
316 dev_kfree_skb_any(skb);
317 skb = new_skb;
318 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
319 /* write back to physical memory */
320 dma_cache_wback((unsigned long)skb->data, skb->len);
321 }
322
323 /* make the skb unowned */
324 skb_orphan(skb);
325
326 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
327 /* write back to physical memory */
328 dma_cache_wback((unsigned long)skb->data - byteoff - sizeof(struct sk_buff *), skb->len + byteoff + sizeof(struct sk_buff *));
329
330 /* free previous skb */
331 skb_to_free = get_skb_pointer(desc->dataptr);
332 if ( skb_to_free != NULL )
333 dev_kfree_skb_any(skb_to_free);
334
335 /* update descriptor */
336 reg_desc.small = 0;
337 reg_desc.dataptr = (unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1));
338 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
339 reg_desc.qid = g_ptm_prio_queue_map[skb->priority > 7 ? 7 : skb->priority];
340 reg_desc.byteoff = byteoff;
341 reg_desc.own = 1;
342 reg_desc.c = 1;
343 reg_desc.sop = reg_desc.eop = 1;
344
345 /* update MIB */
346 g_ptm_priv_data.itf[0].stats.tx_packets++;
347 g_ptm_priv_data.itf[0].stats.tx_bytes += reg_desc.datalen;
348
349 /* write discriptor to memory */
350 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
351 wmb();
352 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
353
354 netif_trans_update(dev);
355
356 return 0;
357
358 ALLOC_SKB_TX_FAIL:
359 PTM_HARD_START_XMIT_FAIL:
360 dev_kfree_skb_any(skb);
361 g_ptm_priv_data.itf[0].stats.tx_dropped++;
362 return 0;
363 }
364
365 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
366 {
367 ASSERT(dev == g_net_dev[0], "incorrect device");
368
369 switch ( cmd )
370 {
371 case IFX_PTM_MIB_CW_GET:
372 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = IFX_REG_R32(DREG_AR_CELL0) + IFX_REG_R32(DREG_AR_CELL1);
373 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = IFX_REG_R32(DREG_AR_IDLE_CNT0) + IFX_REG_R32(DREG_AR_IDLE_CNT1);
374 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = IFX_REG_R32(DREG_AR_CVN_CNT0) + IFX_REG_R32(DREG_AR_CVN_CNT1) + IFX_REG_R32(DREG_AR_CVNP_CNT0) + IFX_REG_R32(DREG_AR_CVNP_CNT1);
375 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = IFX_REG_R32(DREG_AT_CELL0) + IFX_REG_R32(DREG_AT_CELL1);
376 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = IFX_REG_R32(DREG_AT_IDLE_CNT0) + IFX_REG_R32(DREG_AT_IDLE_CNT1);
377 break;
378 case IFX_PTM_MIB_FRAME_GET:
379 {
380 PTM_FRAME_MIB_T data = {0};
381 int i;
382
383 data.RxCorrect = IFX_REG_R32(DREG_AR_HEC_CNT0) + IFX_REG_R32(DREG_AR_HEC_CNT1) + IFX_REG_R32(DREG_AR_AIIDLE_CNT0) + IFX_REG_R32(DREG_AR_AIIDLE_CNT1);
384 for ( i = 0; i < 4; i++ )
385 data.RxDropped += WAN_RX_MIB_TABLE(i)->wrx_dropdes_pdu;
386 for ( i = 0; i < 8; i++ )
387 data.TxSend += WAN_TX_MIB_TABLE(i)->wtx_total_pdu;
388
389 *((PTM_FRAME_MIB_T *)ifr->ifr_data) = data;
390 }
391 break;
392 case IFX_PTM_CFG_GET:
393 // use bear channel 0 preemption gamma interface settings
394 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = 1;
395 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_eth_fcs_ver_dis == 0 ? 1 : 0;
396 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis == 0 ? 1 : 0;;
397 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size == 0 ? 0 : (RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size * 16);
398 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis == 0 ? 1 : 0;
399 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : 1;
400 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : (TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size * 16);
401 break;
402 case IFX_PTM_CFG_SET:
403 {
404 int i;
405
406 for ( i = 0; i < 4; i++ ) {
407 RX_GAMMA_ITF_CFG(i)->rx_eth_fcs_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 0 : 1;
408
409 RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck ? 0 : 1;
410
411 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen ) {
412 case 16: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 1; break;
413 case 32: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 2; break;
414 default: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 0;
415 }
416
417 TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 0 : 1;
418
419 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen ) {
420 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen ) {
421 case 16: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 1; break;
422 case 32: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 2; break;
423 default: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
424 }
425 }
426 else
427 TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
428 }
429 }
430 break;
431 case IFX_PTM_MAP_PKT_PRIO_TO_Q:
432 {
433 struct ppe_prio_q_map cmd;
434
435 if ( copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd)) )
436 return -EFAULT;
437
438 if ( cmd.pkt_prio < 0 || cmd.pkt_prio >= ARRAY_SIZE(g_ptm_prio_queue_map) )
439 return -EINVAL;
440
441 if ( cmd.qid < 0 || cmd.qid >= g_wanqos_en )
442 return -EINVAL;
443
444 g_ptm_prio_queue_map[cmd.pkt_prio] = cmd.qid;
445 }
446 break;
447 default:
448 return -EOPNOTSUPP;
449 }
450
451 return 0;
452 }
453
454 static void ptm_tx_timeout(struct net_device *dev)
455 {
456 ASSERT(dev == g_net_dev[0], "incorrect device");
457
458 /* disable TX irq, release skb when sending new packet */
459 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
460
461 /* wake up TX queue */
462 netif_wake_queue(dev);
463
464 return;
465 }
466
467 static inline struct sk_buff* alloc_skb_rx(void)
468 {
469 struct sk_buff *skb;
470
471 /* allocate memroy including trailer and padding */
472 skb = dev_alloc_skb(RX_MAX_BUFFER_SIZE + DATA_BUFFER_ALIGNMENT);
473 if ( skb != NULL ) {
474 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
475 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
476 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
477 /* pub skb in reserved area "skb->data - 4" */
478 *((struct sk_buff **)skb->data - 1) = skb;
479 wmb();
480 /* write back and invalidate cache */
481 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
482 /* invalidate cache */
483 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
484 }
485
486 return skb;
487 }
488
489 static inline struct sk_buff* alloc_skb_tx(unsigned int size)
490 {
491 struct sk_buff *skb;
492
493 /* allocate memory including padding */
494 size = RX_MAX_BUFFER_SIZE;
495 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
496 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
497 /* must be burst length alignment */
498 if ( skb != NULL )
499 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
500 return skb;
501 }
502
503 static inline struct sk_buff *get_skb_pointer(unsigned int dataptr)
504 {
505 unsigned int skb_dataptr;
506 struct sk_buff *skb;
507
508 // usually, CPE memory is less than 256M bytes
509 // so NULL means invalid pointer
510 if ( dataptr == 0 ) {
511 dbg("dataptr is 0, it's supposed to be invalid pointer");
512 return NULL;
513 }
514
515 skb_dataptr = (dataptr - 4) | KSEG1;
516 skb = *(struct sk_buff **)skb_dataptr;
517
518 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
519 ASSERT((((unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1))) | KSEG1) == (dataptr | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
520
521 return skb;
522 }
523
524 static inline int get_tx_desc(unsigned int itf, unsigned int *f_full)
525 {
526 int desc_base = -1;
527 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[0];
528
529 // assume TX is serial operation
530 // no protection provided
531
532 *f_full = 1;
533
534 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 ) {
535 desc_base = p_itf->tx_desc_pos;
536 if ( ++(p_itf->tx_desc_pos) == CPU_TO_WAN_TX_DESC_NUM )
537 p_itf->tx_desc_pos = 0;
538 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 )
539 *f_full = 0;
540 }
541
542 return desc_base;
543 }
544
545 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
546 {
547 unsigned int isr;
548 int i;
549
550 isr = IFX_REG_R32(MBOX_IGU1_ISR);
551 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
552 isr &= IFX_REG_R32(MBOX_IGU1_IER);
553
554 if (isr & BIT(0)) {
555 IFX_REG_W32_MASK(1, 0, MBOX_IGU1_IER);
556 napi_schedule(&g_ptm_priv_data.itf[0].napi);
557 #if defined(ENABLE_TMP_DBG) && ENABLE_TMP_DBG
558 {
559 volatile struct rx_descriptor *desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
560
561 if ( desc->own ) { // PP32 hold
562 err("invalid interrupt");
563 }
564 }
565 #endif
566 }
567 if (isr & BIT(16)) {
568 IFX_REG_W32_MASK(1 << 16, 0, MBOX_IGU1_IER);
569 tasklet_hi_schedule(&g_swap_desc_tasklet);
570 }
571 if (isr & BIT(17)) {
572 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
573 netif_wake_queue(g_net_dev[0]);
574 }
575
576 return IRQ_HANDLED;
577 }
578
579 static void do_swap_desc_tasklet(unsigned long arg)
580 {
581 int budget = 32;
582 volatile struct tx_descriptor *desc;
583 struct sk_buff *skb;
584 unsigned int byteoff;
585
586 while ( budget-- > 0 ) {
587 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) // if PP32 hold descriptor
588 break;
589
590 desc = &WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos];
591 if ( ++g_ptm_priv_data.itf[0].tx_swap_desc_pos == WAN_SWAP_DESC_NUM )
592 g_ptm_priv_data.itf[0].tx_swap_desc_pos = 0;
593
594 skb = get_skb_pointer(desc->dataptr);
595 if ( skb != NULL )
596 dev_kfree_skb_any(skb);
597
598 skb = alloc_skb_tx(RX_MAX_BUFFER_SIZE);
599 if ( skb == NULL )
600 panic("can't allocate swap buffer for PPE firmware use\n");
601 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
602 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
603
604 desc->dataptr = (unsigned int)skb->data & 0x0FFFFFFF;
605 desc->own = 1;
606 }
607
608 // clear interrupt
609 IFX_REG_W32_MASK(0, 16, MBOX_IGU1_ISRC);
610 // no more skb to be replaced
611 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) { // if PP32 hold descriptor
612 IFX_REG_W32_MASK(0, 1 << 16, MBOX_IGU1_IER);
613 return;
614 }
615
616 tasklet_hi_schedule(&g_swap_desc_tasklet);
617 return;
618 }
619
620
621 static inline int ifx_ptm_version(char *buf)
622 {
623 int len = 0;
624 unsigned int major, mid, minor;
625
626 ifx_ptm_get_fw_ver(&major, &mid, &minor);
627
628 len += ifx_drv_ver(buf + len, "PTM", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
629 if ( mid == ~0 )
630 len += sprintf(buf + len, " PTM (E1) firmware version %u.%u\n", major, minor);
631 else
632 len += sprintf(buf + len, " PTM (E1) firmware version %u.%u.%u\n", major, mid, minor);
633
634 return len;
635 }
636
637 static inline int init_priv_data(void)
638 {
639 int i, j;
640
641 g_wanqos_en = wanqos_en ? wanqos_en : 8;
642 if ( g_wanqos_en > 8 )
643 g_wanqos_en = 8;
644
645 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ )
646 {
647 g_queue_gamma_map[i] = queue_gamma_map[i] & ((1 << g_wanqos_en) - 1);
648 for ( j = 0; j < i; j++ )
649 g_queue_gamma_map[i] &= ~g_queue_gamma_map[j];
650 }
651
652 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
653
654 {
655 int max_packet_priority = ARRAY_SIZE(g_ptm_prio_queue_map);
656 int tx_num_q;
657 int q_step, q_accum, p_step;
658
659 tx_num_q = __ETH_WAN_TX_QUEUE_NUM;
660 q_step = tx_num_q - 1;
661 p_step = max_packet_priority - 1;
662 for ( j = 0, q_accum = 0; j < max_packet_priority; j++, q_accum += q_step )
663 g_ptm_prio_queue_map[j] = q_step - (q_accum + (p_step >> 1)) / p_step;
664 }
665
666 return 0;
667 }
668
669 static inline void clear_priv_data(void)
670 {
671 }
672
673 static inline int init_tables(void)
674 {
675 struct sk_buff *skb_pool[WAN_RX_DESC_NUM] = {0};
676 struct cfg_std_data_len cfg_std_data_len = {0};
677 struct tx_qos_cfg tx_qos_cfg = {0};
678 struct psave_cfg psave_cfg = {0};
679 struct eg_bwctrl_cfg eg_bwctrl_cfg = {0};
680 struct test_mode test_mode = {0};
681 struct rx_bc_cfg rx_bc_cfg = {0};
682 struct tx_bc_cfg tx_bc_cfg = {0};
683 struct gpio_mode gpio_mode = {0};
684 struct gpio_wm_cfg gpio_wm_cfg = {0};
685 struct rx_gamma_itf_cfg rx_gamma_itf_cfg = {0};
686 struct tx_gamma_itf_cfg tx_gamma_itf_cfg = {0};
687 struct wtx_qos_q_desc_cfg wtx_qos_q_desc_cfg = {0};
688 struct rx_descriptor rx_desc = {0};
689 struct tx_descriptor tx_desc = {0};
690 int i;
691
692 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
693 skb_pool[i] = alloc_skb_rx();
694 if ( skb_pool[i] == NULL )
695 goto ALLOC_SKB_RX_FAIL;
696 }
697
698 cfg_std_data_len.byte_off = RX_HEAD_MAC_ADDR_ALIGNMENT; // this field replaces byte_off in rx descriptor of VDSL ingress
699 cfg_std_data_len.data_len = 1600;
700 *CFG_STD_DATA_LEN = cfg_std_data_len;
701
702 tx_qos_cfg.time_tick = cgu_get_pp32_clock() / 62500; // 16 * (cgu_get_pp32_clock() / 1000000)
703 tx_qos_cfg.overhd_bytes = 0;
704 tx_qos_cfg.eth1_eg_qnum = __ETH_WAN_TX_QUEUE_NUM;
705 tx_qos_cfg.eth1_burst_chk = 1;
706 tx_qos_cfg.eth1_qss = 0;
707 tx_qos_cfg.shape_en = 0; // disable
708 tx_qos_cfg.wfq_en = 0; // strict priority
709 *TX_QOS_CFG = tx_qos_cfg;
710
711 psave_cfg.start_state = 0;
712 psave_cfg.sleep_en = 1; // enable sleep mode
713 *PSAVE_CFG = psave_cfg;
714
715 eg_bwctrl_cfg.fdesc_wm = 16;
716 eg_bwctrl_cfg.class_len = 128;
717 *EG_BWCTRL_CFG = eg_bwctrl_cfg;
718
719 //*GPIO_ADDR = (unsigned int)IFX_GPIO_P0_OUT;
720 *GPIO_ADDR = (unsigned int)0x00000000; // disabled by default
721
722 gpio_mode.gpio_bit_bc1 = 2;
723 gpio_mode.gpio_bit_bc0 = 1;
724 gpio_mode.gpio_bc1_en = 0;
725 gpio_mode.gpio_bc0_en = 0;
726 *GPIO_MODE = gpio_mode;
727
728 gpio_wm_cfg.stop_wm_bc1 = 2;
729 gpio_wm_cfg.start_wm_bc1 = 4;
730 gpio_wm_cfg.stop_wm_bc0 = 2;
731 gpio_wm_cfg.start_wm_bc0 = 4;
732 *GPIO_WM_CFG = gpio_wm_cfg;
733
734 test_mode.mib_clear_mode = 0;
735 test_mode.test_mode = 0;
736 *TEST_MODE = test_mode;
737
738 rx_bc_cfg.local_state = 0;
739 rx_bc_cfg.remote_state = 0;
740 rx_bc_cfg.to_false_th = 7;
741 rx_bc_cfg.to_looking_th = 3;
742 *RX_BC_CFG(0) = rx_bc_cfg;
743 *RX_BC_CFG(1) = rx_bc_cfg;
744
745 tx_bc_cfg.fill_wm = 2;
746 tx_bc_cfg.uflw_wm = 2;
747 *TX_BC_CFG(0) = tx_bc_cfg;
748 *TX_BC_CFG(1) = tx_bc_cfg;
749
750 rx_gamma_itf_cfg.receive_state = 0;
751 rx_gamma_itf_cfg.rx_min_len = 60;
752 rx_gamma_itf_cfg.rx_pad_en = 1;
753 rx_gamma_itf_cfg.rx_eth_fcs_ver_dis = 0;
754 rx_gamma_itf_cfg.rx_rm_eth_fcs = 1;
755 rx_gamma_itf_cfg.rx_tc_crc_ver_dis = 0;
756 rx_gamma_itf_cfg.rx_tc_crc_size = 1;
757 rx_gamma_itf_cfg.rx_eth_fcs_result = 0xC704DD7B;
758 rx_gamma_itf_cfg.rx_tc_crc_result = 0x1D0F1D0F;
759 rx_gamma_itf_cfg.rx_crc_cfg = 0x2500;
760 rx_gamma_itf_cfg.rx_eth_fcs_init_value = 0xFFFFFFFF;
761 rx_gamma_itf_cfg.rx_tc_crc_init_value = 0x0000FFFF;
762 rx_gamma_itf_cfg.rx_max_len_sel = 0;
763 rx_gamma_itf_cfg.rx_edit_num2 = 0;
764 rx_gamma_itf_cfg.rx_edit_pos2 = 0;
765 rx_gamma_itf_cfg.rx_edit_type2 = 0;
766 rx_gamma_itf_cfg.rx_edit_en2 = 0;
767 rx_gamma_itf_cfg.rx_edit_num1 = 0;
768 rx_gamma_itf_cfg.rx_edit_pos1 = 0;
769 rx_gamma_itf_cfg.rx_edit_type1 = 0;
770 rx_gamma_itf_cfg.rx_edit_en1 = 0;
771 rx_gamma_itf_cfg.rx_inserted_bytes_1l = 0;
772 rx_gamma_itf_cfg.rx_inserted_bytes_1h = 0;
773 rx_gamma_itf_cfg.rx_inserted_bytes_2l = 0;
774 rx_gamma_itf_cfg.rx_inserted_bytes_2h = 0;
775 rx_gamma_itf_cfg.rx_len_adj = -6;
776 for ( i = 0; i < 4; i++ )
777 *RX_GAMMA_ITF_CFG(i) = rx_gamma_itf_cfg;
778
779 tx_gamma_itf_cfg.tx_len_adj = 6;
780 tx_gamma_itf_cfg.tx_crc_off_adj = 6;
781 tx_gamma_itf_cfg.tx_min_len = 0;
782 tx_gamma_itf_cfg.tx_eth_fcs_gen_dis = 0;
783 tx_gamma_itf_cfg.tx_tc_crc_size = 1;
784 tx_gamma_itf_cfg.tx_crc_cfg = 0x2F00;
785 tx_gamma_itf_cfg.tx_eth_fcs_init_value = 0xFFFFFFFF;
786 tx_gamma_itf_cfg.tx_tc_crc_init_value = 0x0000FFFF;
787 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ ) {
788 tx_gamma_itf_cfg.queue_mapping = g_queue_gamma_map[i];
789 *TX_GAMMA_ITF_CFG(i) = tx_gamma_itf_cfg;
790 }
791
792 for ( i = 0; i < __ETH_WAN_TX_QUEUE_NUM; i++ ) {
793 wtx_qos_q_desc_cfg.length = WAN_TX_DESC_NUM;
794 wtx_qos_q_desc_cfg.addr = __ETH_WAN_TX_DESC_BASE(i);
795 *WTX_QOS_Q_DESC_CFG(i) = wtx_qos_q_desc_cfg;
796 }
797
798 // default TX queue QoS config is all ZERO
799
800 // TX Ctrl K Table
801 IFX_REG_W32(0x90111293, TX_CTRL_K_TABLE(0));
802 IFX_REG_W32(0x14959617, TX_CTRL_K_TABLE(1));
803 IFX_REG_W32(0x18999A1B, TX_CTRL_K_TABLE(2));
804 IFX_REG_W32(0x9C1D1E9F, TX_CTRL_K_TABLE(3));
805 IFX_REG_W32(0xA02122A3, TX_CTRL_K_TABLE(4));
806 IFX_REG_W32(0x24A5A627, TX_CTRL_K_TABLE(5));
807 IFX_REG_W32(0x28A9AA2B, TX_CTRL_K_TABLE(6));
808 IFX_REG_W32(0xAC2D2EAF, TX_CTRL_K_TABLE(7));
809 IFX_REG_W32(0x30B1B233, TX_CTRL_K_TABLE(8));
810 IFX_REG_W32(0xB43536B7, TX_CTRL_K_TABLE(9));
811 IFX_REG_W32(0xB8393ABB, TX_CTRL_K_TABLE(10));
812 IFX_REG_W32(0x3CBDBE3F, TX_CTRL_K_TABLE(11));
813 IFX_REG_W32(0xC04142C3, TX_CTRL_K_TABLE(12));
814 IFX_REG_W32(0x44C5C647, TX_CTRL_K_TABLE(13));
815 IFX_REG_W32(0x48C9CA4B, TX_CTRL_K_TABLE(14));
816 IFX_REG_W32(0xCC4D4ECF, TX_CTRL_K_TABLE(15));
817
818 // init RX descriptor
819 rx_desc.own = 1;
820 rx_desc.c = 0;
821 rx_desc.sop = 1;
822 rx_desc.eop = 1;
823 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
824 rx_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
825 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
826 rx_desc.dataptr = (unsigned int)skb_pool[i]->data & 0x0FFFFFFF;
827 WAN_RX_DESC_BASE[i] = rx_desc;
828 }
829
830 // init TX descriptor
831 tx_desc.own = 0;
832 tx_desc.c = 0;
833 tx_desc.sop = 1;
834 tx_desc.eop = 1;
835 tx_desc.byteoff = 0;
836 tx_desc.qid = 0;
837 tx_desc.datalen = 0;
838 tx_desc.small = 0;
839 tx_desc.dataptr = 0;
840 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ )
841 CPU_TO_WAN_TX_DESC_BASE[i] = tx_desc;
842 for ( i = 0; i < WAN_TX_DESC_NUM_TOTAL; i++ )
843 WAN_TX_DESC_BASE(0)[i] = tx_desc;
844
845 // init Swap descriptor
846 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ )
847 WAN_SWAP_DESC_BASE[i] = tx_desc;
848
849 // init fastpath TX descriptor
850 tx_desc.own = 1;
851 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ )
852 FASTPATH_TO_WAN_TX_DESC_BASE[i] = tx_desc;
853
854 return 0;
855
856 ALLOC_SKB_RX_FAIL:
857 while ( i-- > 0 )
858 dev_kfree_skb_any(skb_pool[i]);
859 return -1;
860 }
861
862 static inline void clear_tables(void)
863 {
864 struct sk_buff *skb;
865 int i, j;
866
867 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
868 skb = get_skb_pointer(WAN_RX_DESC_BASE[i].dataptr);
869 if ( skb != NULL )
870 dev_kfree_skb_any(skb);
871 }
872
873 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ ) {
874 skb = get_skb_pointer(CPU_TO_WAN_TX_DESC_BASE[i].dataptr);
875 if ( skb != NULL )
876 dev_kfree_skb_any(skb);
877 }
878
879 for ( j = 0; j < 8; j++ )
880 for ( i = 0; i < WAN_TX_DESC_NUM; i++ ) {
881 skb = get_skb_pointer(WAN_TX_DESC_BASE(j)[i].dataptr);
882 if ( skb != NULL )
883 dev_kfree_skb_any(skb);
884 }
885
886 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ ) {
887 skb = get_skb_pointer(WAN_SWAP_DESC_BASE[i].dataptr);
888 if ( skb != NULL )
889 dev_kfree_skb_any(skb);
890 }
891
892 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ ) {
893 skb = get_skb_pointer(FASTPATH_TO_WAN_TX_DESC_BASE[i].dataptr);
894 if ( skb != NULL )
895 dev_kfree_skb_any(skb);
896 }
897 }
898
899 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
900 {
901 int i;
902
903 ASSERT(port_cell != NULL, "port_cell is NULL");
904 ASSERT(xdata_addr != NULL, "xdata_addr is NULL");
905
906 // TODO: ReTX set xdata_addr
907 g_xdata_addr = xdata_addr;
908
909 g_showtime = 1;
910
911 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
912 netif_carrier_on(g_net_dev[i]);
913
914 IFX_REG_W32(0x0F, UTP_CFG);
915
916 //#ifdef CONFIG_VR9
917 // IFX_REG_W32_MASK(1 << 17, 0, FFSM_CFG0);
918 //#endif
919
920 printk("enter showtime\n");
921
922 return 0;
923 }
924
925 static int ptm_showtime_exit(void)
926 {
927 int i;
928
929 if ( !g_showtime )
930 return -1;
931
932 //#ifdef CONFIG_VR9
933 // IFX_REG_W32_MASK(0, 1 << 17, FFSM_CFG0);
934 //#endif
935
936 IFX_REG_W32(0x00, UTP_CFG);
937
938 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
939 netif_carrier_off(g_net_dev[i]);
940
941 g_showtime = 0;
942
943 // TODO: ReTX clean state
944 g_xdata_addr = NULL;
945
946 printk("leave showtime\n");
947
948 return 0;
949 }
950
951 static const struct of_device_id ltq_ptm_match[] = {
952 #ifdef CONFIG_DANUBE
953 { .compatible = "lantiq,ppe-danube", .data = NULL },
954 #elif defined CONFIG_AMAZON_SE
955 { .compatible = "lantiq,ppe-ase", .data = NULL },
956 #elif defined CONFIG_AR9
957 { .compatible = "lantiq,ppe-arx100", .data = NULL },
958 #elif defined CONFIG_VR9
959 { .compatible = "lantiq,ppe-xrx200", .data = NULL },
960 #endif
961 {},
962 };
963 MODULE_DEVICE_TABLE(of, ltq_ptm_match);
964
965 static int ltq_ptm_probe(struct platform_device *pdev)
966 {
967 int ret;
968 int i;
969 char ver_str[256];
970 struct port_cell_info port_cell = {0};
971
972 ret = init_priv_data();
973 if ( ret != 0 ) {
974 err("INIT_PRIV_DATA_FAIL");
975 goto INIT_PRIV_DATA_FAIL;
976 }
977
978 ifx_ptm_init_chip(pdev);
979 ret = init_tables();
980 if ( ret != 0 ) {
981 err("INIT_TABLES_FAIL");
982 goto INIT_TABLES_FAIL;
983 }
984
985 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
986 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
987 if ( g_net_dev[i] == NULL )
988 goto ALLOC_NETDEV_FAIL;
989 ptm_setup(g_net_dev[i], i);
990 }
991
992 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
993 ret = register_netdev(g_net_dev[i]);
994 if ( ret != 0 )
995 goto REGISTER_NETDEV_FAIL;
996 }
997
998 /* register interrupt handler */
999 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
1000 if ( ret ) {
1001 if ( ret == -EBUSY ) {
1002 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1003 }
1004 else {
1005 err("request_irq fail");
1006 }
1007 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1008 }
1009 disable_irq(PPE_MAILBOX_IGU1_INT);
1010
1011 ret = ifx_pp32_start(0);
1012 if ( ret ) {
1013 err("ifx_pp32_start fail!");
1014 goto PP32_START_FAIL;
1015 }
1016 IFX_REG_W32(1 << 16, MBOX_IGU1_IER); // enable SWAP interrupt
1017 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1018
1019 enable_irq(PPE_MAILBOX_IGU1_INT);
1020
1021 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &g_xdata_addr);
1022 if ( g_showtime ) {
1023 ptm_showtime_enter(&port_cell, &g_xdata_addr);
1024 }
1025
1026 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1027 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1028
1029 ifx_ptm_version(ver_str);
1030 printk(KERN_INFO "%s", ver_str);
1031
1032 printk("ifxmips_ptm: PTM init succeed\n");
1033
1034 return 0;
1035
1036 PP32_START_FAIL:
1037 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1038 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1039 i = ARRAY_SIZE(g_net_dev);
1040 REGISTER_NETDEV_FAIL:
1041 while ( i-- )
1042 unregister_netdev(g_net_dev[i]);
1043 i = ARRAY_SIZE(g_net_dev);
1044 ALLOC_NETDEV_FAIL:
1045 while ( i-- ) {
1046 free_netdev(g_net_dev[i]);
1047 g_net_dev[i] = NULL;
1048 }
1049 INIT_TABLES_FAIL:
1050 INIT_PRIV_DATA_FAIL:
1051 clear_priv_data();
1052 printk("ifxmips_ptm: PTM init failed\n");
1053 return ret;
1054 }
1055
1056 static int ltq_ptm_remove(struct platform_device *pdev)
1057 {
1058 int i;
1059 ifx_mei_atm_showtime_enter = NULL;
1060 ifx_mei_atm_showtime_exit = NULL;
1061
1062
1063 ifx_pp32_stop(0);
1064
1065 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1066
1067 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1068 unregister_netdev(g_net_dev[i]);
1069
1070 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1071 free_netdev(g_net_dev[i]);
1072 g_net_dev[i] = NULL;
1073 }
1074
1075 clear_tables();
1076
1077 ifx_ptm_uninit_chip();
1078
1079 clear_priv_data();
1080
1081 return 0;
1082 }
1083
1084 #ifndef MODULE
1085 static int __init wanqos_en_setup(char *line)
1086 {
1087 wanqos_en = simple_strtoul(line, NULL, 0);
1088
1089 if ( wanqos_en < 1 || wanqos_en > 8 )
1090 wanqos_en = 0;
1091
1092 return 0;
1093 }
1094
1095 static int __init queue_gamma_map_setup(char *line)
1096 {
1097 char *p;
1098 int i;
1099
1100 for ( i = 0, p = line; i < ARRAY_SIZE(queue_gamma_map) && isxdigit(*p); i++ )
1101 {
1102 queue_gamma_map[i] = simple_strtoul(p, &p, 0);
1103 if ( *p == ',' || *p == ';' || *p == ':' )
1104 p++;
1105 }
1106
1107 return 0;
1108 }
1109 #endif
1110 static struct platform_driver ltq_ptm_driver = {
1111 .probe = ltq_ptm_probe,
1112 .remove = ltq_ptm_remove,
1113 .driver = {
1114 .name = "ptm",
1115 .owner = THIS_MODULE,
1116 .of_match_table = ltq_ptm_match,
1117 },
1118 };
1119
1120 module_platform_driver(ltq_ptm_driver);
1121 #ifndef MODULE
1122 __setup("wanqos_en=", wanqos_en_setup);
1123 __setup("queue_gamma_map=", queue_gamma_map_setup);
1124 #endif
1125
1126 MODULE_LICENSE("GPL");