bc27c270d825543dfab270ed9767402af5f6ceb4
[openwrt/openwrt.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_vdsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_vdsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for VR9)
10 ** COPYRIGHT : Copyright (c) 2006
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 **
19 ** HISTORY
20 ** $Date $Author $Comment
21 ** 07 JUL 2009 Xu Liang Init Version
22 *******************************************************************************/
23
24 #include <linux/version.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/ctype.h>
29 #include <linux/errno.h>
30 #include <linux/proc_fs.h>
31 #include <linux/init.h>
32 #include <linux/ioctl.h>
33 #include <linux/etherdevice.h>
34 #include <linux/interrupt.h>
35 #include <linux/netdevice.h>
36
37 #include "ifxmips_ptm_vdsl.h"
38 #include <lantiq_soc.h>
39
40 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
41 #define MODULE_PARM(a, b) module_param(a, int, 0)
42
43 static int wanqos_en = 0;
44 static int queue_gamma_map[4] = {0xFE, 0x01, 0x00, 0x00};
45
46 MODULE_PARM(wanqos_en, "i");
47 MODULE_PARM_DESC(wanqos_en, "WAN QoS support, 1 - enabled, 0 - disabled.");
48
49 MODULE_PARM_ARRAY(queue_gamma_map, "4-4i");
50 MODULE_PARM_DESC(queue_gamma_map, "TX QoS queues mapping to 4 TX Gamma interfaces.");
51
52 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
53 extern int (*ifx_mei_atm_showtime_exit)(void);
54 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
55
56 static int g_showtime = 0;
57 static void *g_xdata_addr = NULL;
58
59
60 #define ENABLE_TMP_DBG 0
61
62 unsigned long cgu_get_pp32_clock(void)
63 {
64 struct clk *c = clk_get_ppe();
65 unsigned long rate = clk_get_rate(c);
66 clk_put(c);
67 return rate;
68 }
69
70 static void ptm_setup(struct net_device *, int);
71 static struct net_device_stats *ptm_get_stats(struct net_device *);
72 static int ptm_open(struct net_device *);
73 static int ptm_stop(struct net_device *);
74 static unsigned int ptm_poll(int, unsigned int);
75 static int ptm_napi_poll(struct napi_struct *, int);
76 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
77 static int ptm_change_mtu(struct net_device *, int);
78 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
79 static void ptm_tx_timeout(struct net_device *);
80
81 static inline struct sk_buff* alloc_skb_rx(void);
82 static inline struct sk_buff* alloc_skb_tx(unsigned int);
83 static inline struct sk_buff *get_skb_pointer(unsigned int);
84 static inline int get_tx_desc(unsigned int, unsigned int *);
85
86 /*
87 * Mailbox handler and signal function
88 */
89 static irqreturn_t mailbox_irq_handler(int, void *);
90
91 /*
92 * Tasklet to Handle Swap Descriptors
93 */
94 static void do_swap_desc_tasklet(unsigned long);
95
96
97 /*
98 * Init & clean-up functions
99 */
100 static inline int init_priv_data(void);
101 static inline void clear_priv_data(void);
102 static inline int init_tables(void);
103 static inline void clear_tables(void);
104
105 static int g_wanqos_en = 0;
106
107 static int g_queue_gamma_map[4];
108
109 static struct ptm_priv_data g_ptm_priv_data;
110
111 static struct net_device_ops g_ptm_netdev_ops = {
112 .ndo_get_stats = ptm_get_stats,
113 .ndo_open = ptm_open,
114 .ndo_stop = ptm_stop,
115 .ndo_start_xmit = ptm_hard_start_xmit,
116 .ndo_validate_addr = eth_validate_addr,
117 .ndo_set_mac_address = eth_mac_addr,
118 .ndo_change_mtu = ptm_change_mtu,
119 .ndo_do_ioctl = ptm_ioctl,
120 .ndo_tx_timeout = ptm_tx_timeout,
121 };
122
123 static struct net_device *g_net_dev[1] = {0};
124 static char *g_net_dev_name[1] = {"ptm0"};
125
126 static int g_ptm_prio_queue_map[8];
127
128 static DECLARE_TASKLET(g_swap_desc_tasklet, do_swap_desc_tasklet, 0);
129
130
131 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
132
133 /*
134 * ####################################
135 * Local Function
136 * ####################################
137 */
138
139 static void ptm_setup(struct net_device *dev, int ndev)
140 {
141 netif_carrier_off(dev);
142
143 dev->netdev_ops = &g_ptm_netdev_ops;
144 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 16);
145 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
146
147 dev->dev_addr[0] = 0x00;
148 dev->dev_addr[1] = 0x20;
149 dev->dev_addr[2] = 0xda;
150 dev->dev_addr[3] = 0x86;
151 dev->dev_addr[4] = 0x23;
152 dev->dev_addr[5] = 0x75 + ndev;
153 }
154
155 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
156 {
157 struct net_device_stats *s;
158
159 if ( dev != g_net_dev[0] )
160 return NULL;
161 s = &g_ptm_priv_data.itf[0].stats;
162
163 return s;
164 }
165
166 static int ptm_open(struct net_device *dev)
167 {
168 ASSERT(dev == g_net_dev[0], "incorrect device");
169
170 napi_enable(&g_ptm_priv_data.itf[0].napi);
171
172 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
173
174 netif_start_queue(dev);
175
176 return 0;
177 }
178
179 static int ptm_stop(struct net_device *dev)
180 {
181 ASSERT(dev == g_net_dev[0], "incorrect device");
182
183 IFX_REG_W32_MASK(1 | (1 << 17), 0, MBOX_IGU1_IER);
184
185 napi_disable(&g_ptm_priv_data.itf[0].napi);
186
187 netif_stop_queue(dev);
188
189 return 0;
190 }
191
192 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
193 {
194 unsigned int work_done = 0;
195 volatile struct rx_descriptor *desc;
196 struct rx_descriptor reg_desc;
197 struct sk_buff *skb, *new_skb;
198
199 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
200
201 while ( work_done < work_to_do ) {
202 desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
203 if ( desc->own /* || !desc->c */ ) // if PP32 hold descriptor or descriptor not completed
204 break;
205 if ( ++g_ptm_priv_data.itf[0].rx_desc_pos == WAN_RX_DESC_NUM )
206 g_ptm_priv_data.itf[0].rx_desc_pos = 0;
207
208 reg_desc = *desc;
209 skb = get_skb_pointer(reg_desc.dataptr);
210 ASSERT(skb != NULL, "invalid pointer skb == NULL");
211
212 new_skb = alloc_skb_rx();
213 if ( new_skb != NULL ) {
214 skb_reserve(skb, reg_desc.byteoff);
215 skb_put(skb, reg_desc.datalen);
216
217 // parse protocol header
218 skb->dev = g_net_dev[0];
219 skb->protocol = eth_type_trans(skb, skb->dev);
220
221 g_net_dev[0]->last_rx = jiffies;
222
223 netif_receive_skb(skb);
224
225 g_ptm_priv_data.itf[0].stats.rx_packets++;
226 g_ptm_priv_data.itf[0].stats.rx_bytes += reg_desc.datalen;
227
228 reg_desc.dataptr = (unsigned int)new_skb->data & 0x0FFFFFFF;
229 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
230 }
231
232 reg_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
233 reg_desc.own = 1;
234 reg_desc.c = 0;
235
236 /* write discriptor to memory */
237 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
238 wmb();
239 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
240
241 work_done++;
242 }
243
244 return work_done;
245 }
246
247 static int ptm_napi_poll(struct napi_struct *napi, int budget)
248 {
249 int ndev = 0;
250 unsigned int work_done;
251
252 work_done = ptm_poll(ndev, budget);
253
254 // interface down
255 if ( !netif_running(napi->dev) ) {
256 napi_complete(napi);
257 return work_done;
258 }
259
260 // clear interrupt
261 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_ISRC);
262 // no more traffic
263 if (work_done < budget) {
264 napi_complete(napi);
265 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
266 return work_done;
267 }
268
269 // next round
270 return work_done;
271 }
272
273 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
274 {
275 unsigned int f_full;
276 int desc_base;
277 volatile struct tx_descriptor *desc;
278 struct tx_descriptor reg_desc = {0};
279 struct sk_buff *skb_to_free;
280 unsigned int byteoff;
281
282 ASSERT(dev == g_net_dev[0], "incorrect device");
283
284 if ( !g_showtime ) {
285 err("not in showtime");
286 goto PTM_HARD_START_XMIT_FAIL;
287 }
288
289 /* allocate descriptor */
290 desc_base = get_tx_desc(0, &f_full);
291 if ( f_full ) {
292 dev->trans_start = jiffies;
293 netif_stop_queue(dev);
294
295 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_ISRC);
296 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_IER);
297 }
298 if ( desc_base < 0 )
299 goto PTM_HARD_START_XMIT_FAIL;
300 desc = &CPU_TO_WAN_TX_DESC_BASE[desc_base];
301
302 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
303 if ( skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff || skb_cloned(skb) ) {
304 struct sk_buff *new_skb;
305
306 ASSERT(skb_headroom(skb) >= sizeof(struct sk_buff *) + byteoff, "skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff");
307 ASSERT(!skb_cloned(skb), "skb is cloned");
308
309 new_skb = alloc_skb_tx(skb->len);
310 if ( new_skb == NULL ) {
311 dbg("no memory");
312 goto ALLOC_SKB_TX_FAIL;
313 }
314 skb_put(new_skb, skb->len);
315 memcpy(new_skb->data, skb->data, skb->len);
316 dev_kfree_skb_any(skb);
317 skb = new_skb;
318 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
319 /* write back to physical memory */
320 dma_cache_wback((unsigned long)skb->data, skb->len);
321 }
322
323 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
324 /* write back to physical memory */
325 dma_cache_wback((unsigned long)skb->data - byteoff - sizeof(struct sk_buff *), skb->len + byteoff + sizeof(struct sk_buff *));
326
327 /* free previous skb */
328 skb_to_free = get_skb_pointer(desc->dataptr);
329 if ( skb_to_free != NULL )
330 dev_kfree_skb_any(skb_to_free);
331
332 /* update descriptor */
333 reg_desc.small = 0;
334 reg_desc.dataptr = (unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1));
335 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
336 reg_desc.qid = g_ptm_prio_queue_map[skb->priority > 7 ? 7 : skb->priority];
337 reg_desc.byteoff = byteoff;
338 reg_desc.own = 1;
339 reg_desc.c = 1;
340 reg_desc.sop = reg_desc.eop = 1;
341
342 /* update MIB */
343 g_ptm_priv_data.itf[0].stats.tx_packets++;
344 g_ptm_priv_data.itf[0].stats.tx_bytes += reg_desc.datalen;
345
346 /* write discriptor to memory */
347 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
348 wmb();
349 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
350
351 dev->trans_start = jiffies;
352
353 return 0;
354
355 ALLOC_SKB_TX_FAIL:
356 PTM_HARD_START_XMIT_FAIL:
357 dev_kfree_skb_any(skb);
358 g_ptm_priv_data.itf[0].stats.tx_dropped++;
359 return 0;
360 }
361
362 static int ptm_change_mtu(struct net_device *dev, int mtu)
363 {
364 /* Allow up to 1508 bytes, for RFC4638 */
365 if (mtu < 68 || mtu > ETH_DATA_LEN + 8)
366 return -EINVAL;
367 dev->mtu = mtu;
368 return 0;
369 }
370
371 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
372 {
373 ASSERT(dev == g_net_dev[0], "incorrect device");
374
375 switch ( cmd )
376 {
377 case IFX_PTM_MIB_CW_GET:
378 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = IFX_REG_R32(DREG_AR_CELL0) + IFX_REG_R32(DREG_AR_CELL1);
379 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = IFX_REG_R32(DREG_AR_IDLE_CNT0) + IFX_REG_R32(DREG_AR_IDLE_CNT1);
380 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = IFX_REG_R32(DREG_AR_CVN_CNT0) + IFX_REG_R32(DREG_AR_CVN_CNT1) + IFX_REG_R32(DREG_AR_CVNP_CNT0) + IFX_REG_R32(DREG_AR_CVNP_CNT1);
381 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = IFX_REG_R32(DREG_AT_CELL0) + IFX_REG_R32(DREG_AT_CELL1);
382 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = IFX_REG_R32(DREG_AT_IDLE_CNT0) + IFX_REG_R32(DREG_AT_IDLE_CNT1);
383 break;
384 case IFX_PTM_MIB_FRAME_GET:
385 {
386 PTM_FRAME_MIB_T data = {0};
387 int i;
388
389 data.RxCorrect = IFX_REG_R32(DREG_AR_HEC_CNT0) + IFX_REG_R32(DREG_AR_HEC_CNT1) + IFX_REG_R32(DREG_AR_AIIDLE_CNT0) + IFX_REG_R32(DREG_AR_AIIDLE_CNT1);
390 for ( i = 0; i < 4; i++ )
391 data.RxDropped += WAN_RX_MIB_TABLE(i)->wrx_dropdes_pdu;
392 for ( i = 0; i < 8; i++ )
393 data.TxSend += WAN_TX_MIB_TABLE(i)->wtx_total_pdu;
394
395 *((PTM_FRAME_MIB_T *)ifr->ifr_data) = data;
396 }
397 break;
398 case IFX_PTM_CFG_GET:
399 // use bear channel 0 preemption gamma interface settings
400 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = 1;
401 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_eth_fcs_ver_dis == 0 ? 1 : 0;
402 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis == 0 ? 1 : 0;;
403 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size == 0 ? 0 : (RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size * 16);
404 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis == 0 ? 1 : 0;
405 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : 1;
406 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : (TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size * 16);
407 break;
408 case IFX_PTM_CFG_SET:
409 {
410 int i;
411
412 for ( i = 0; i < 4; i++ ) {
413 RX_GAMMA_ITF_CFG(i)->rx_eth_fcs_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 0 : 1;
414
415 RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck ? 0 : 1;
416
417 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen ) {
418 case 16: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 1; break;
419 case 32: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 2; break;
420 default: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 0;
421 }
422
423 TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 0 : 1;
424
425 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen ) {
426 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen ) {
427 case 16: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 1; break;
428 case 32: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 2; break;
429 default: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
430 }
431 }
432 else
433 TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
434 }
435 }
436 break;
437 case IFX_PTM_MAP_PKT_PRIO_TO_Q:
438 {
439 struct ppe_prio_q_map cmd;
440
441 if ( copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd)) )
442 return -EFAULT;
443
444 if ( cmd.pkt_prio < 0 || cmd.pkt_prio >= ARRAY_SIZE(g_ptm_prio_queue_map) )
445 return -EINVAL;
446
447 if ( cmd.qid < 0 || cmd.qid >= g_wanqos_en )
448 return -EINVAL;
449
450 g_ptm_prio_queue_map[cmd.pkt_prio] = cmd.qid;
451 }
452 break;
453 default:
454 return -EOPNOTSUPP;
455 }
456
457 return 0;
458 }
459
460 static void ptm_tx_timeout(struct net_device *dev)
461 {
462 ASSERT(dev == g_net_dev[0], "incorrect device");
463
464 /* disable TX irq, release skb when sending new packet */
465 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
466
467 /* wake up TX queue */
468 netif_wake_queue(dev);
469
470 return;
471 }
472
473 static inline struct sk_buff* alloc_skb_rx(void)
474 {
475 struct sk_buff *skb;
476
477 /* allocate memroy including trailer and padding */
478 skb = dev_alloc_skb(RX_MAX_BUFFER_SIZE + DATA_BUFFER_ALIGNMENT);
479 if ( skb != NULL ) {
480 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
481 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
482 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
483 /* pub skb in reserved area "skb->data - 4" */
484 *((struct sk_buff **)skb->data - 1) = skb;
485 wmb();
486 /* write back and invalidate cache */
487 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
488 /* invalidate cache */
489 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
490 }
491
492 return skb;
493 }
494
495 static inline struct sk_buff* alloc_skb_tx(unsigned int size)
496 {
497 struct sk_buff *skb;
498
499 /* allocate memory including padding */
500 size = RX_MAX_BUFFER_SIZE;
501 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
502 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
503 /* must be burst length alignment */
504 if ( skb != NULL )
505 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
506 return skb;
507 }
508
509 static inline struct sk_buff *get_skb_pointer(unsigned int dataptr)
510 {
511 unsigned int skb_dataptr;
512 struct sk_buff *skb;
513
514 // usually, CPE memory is less than 256M bytes
515 // so NULL means invalid pointer
516 if ( dataptr == 0 ) {
517 dbg("dataptr is 0, it's supposed to be invalid pointer");
518 return NULL;
519 }
520
521 skb_dataptr = (dataptr - 4) | KSEG1;
522 skb = *(struct sk_buff **)skb_dataptr;
523
524 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
525 ASSERT((((unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1))) | KSEG1) == (dataptr | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
526
527 return skb;
528 }
529
530 static inline int get_tx_desc(unsigned int itf, unsigned int *f_full)
531 {
532 int desc_base = -1;
533 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[0];
534
535 // assume TX is serial operation
536 // no protection provided
537
538 *f_full = 1;
539
540 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 ) {
541 desc_base = p_itf->tx_desc_pos;
542 if ( ++(p_itf->tx_desc_pos) == CPU_TO_WAN_TX_DESC_NUM )
543 p_itf->tx_desc_pos = 0;
544 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 )
545 *f_full = 0;
546 }
547
548 return desc_base;
549 }
550
551 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
552 {
553 unsigned int isr;
554 int i;
555
556 isr = IFX_REG_R32(MBOX_IGU1_ISR);
557 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
558 isr &= IFX_REG_R32(MBOX_IGU1_IER);
559
560 if (isr & BIT(0)) {
561 IFX_REG_W32_MASK(1, 0, MBOX_IGU1_IER);
562 napi_schedule(&g_ptm_priv_data.itf[0].napi);
563 #if defined(ENABLE_TMP_DBG) && ENABLE_TMP_DBG
564 {
565 volatile struct rx_descriptor *desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
566
567 if ( desc->own ) { // PP32 hold
568 err("invalid interrupt");
569 }
570 }
571 #endif
572 }
573 if (isr & BIT(16)) {
574 IFX_REG_W32_MASK(1 << 16, 0, MBOX_IGU1_IER);
575 tasklet_hi_schedule(&g_swap_desc_tasklet);
576 }
577 if (isr & BIT(17)) {
578 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
579 netif_wake_queue(g_net_dev[0]);
580 }
581
582 return IRQ_HANDLED;
583 }
584
585 static void do_swap_desc_tasklet(unsigned long arg)
586 {
587 int budget = 32;
588 volatile struct tx_descriptor *desc;
589 struct sk_buff *skb;
590 unsigned int byteoff;
591
592 while ( budget-- > 0 ) {
593 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) // if PP32 hold descriptor
594 break;
595
596 desc = &WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos];
597 if ( ++g_ptm_priv_data.itf[0].tx_swap_desc_pos == WAN_SWAP_DESC_NUM )
598 g_ptm_priv_data.itf[0].tx_swap_desc_pos = 0;
599
600 skb = get_skb_pointer(desc->dataptr);
601 if ( skb != NULL )
602 dev_kfree_skb_any(skb);
603
604 skb = alloc_skb_tx(RX_MAX_BUFFER_SIZE);
605 if ( skb == NULL )
606 panic("can't allocate swap buffer for PPE firmware use\n");
607 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
608 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
609
610 desc->dataptr = (unsigned int)skb->data & 0x0FFFFFFF;
611 desc->own = 1;
612 }
613
614 // clear interrupt
615 IFX_REG_W32_MASK(0, 16, MBOX_IGU1_ISRC);
616 // no more skb to be replaced
617 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) { // if PP32 hold descriptor
618 IFX_REG_W32_MASK(0, 1 << 16, MBOX_IGU1_IER);
619 return;
620 }
621
622 tasklet_hi_schedule(&g_swap_desc_tasklet);
623 return;
624 }
625
626
627 static inline int ifx_ptm_version(char *buf)
628 {
629 int len = 0;
630 unsigned int major, minor;
631
632 ifx_ptm_get_fw_ver(&major, &minor);
633
634 len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
635 len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
636
637 return len;
638 }
639
640 static inline int init_priv_data(void)
641 {
642 int i, j;
643
644 g_wanqos_en = wanqos_en ? wanqos_en : 8;
645 if ( g_wanqos_en > 8 )
646 g_wanqos_en = 8;
647
648 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ )
649 {
650 g_queue_gamma_map[i] = queue_gamma_map[i] & ((1 << g_wanqos_en) - 1);
651 for ( j = 0; j < i; j++ )
652 g_queue_gamma_map[i] &= ~g_queue_gamma_map[j];
653 }
654
655 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
656
657 {
658 int max_packet_priority = ARRAY_SIZE(g_ptm_prio_queue_map);
659 int tx_num_q;
660 int q_step, q_accum, p_step;
661
662 tx_num_q = __ETH_WAN_TX_QUEUE_NUM;
663 q_step = tx_num_q - 1;
664 p_step = max_packet_priority - 1;
665 for ( j = 0, q_accum = 0; j < max_packet_priority; j++, q_accum += q_step )
666 g_ptm_prio_queue_map[j] = q_step - (q_accum + (p_step >> 1)) / p_step;
667 }
668
669 return 0;
670 }
671
672 static inline void clear_priv_data(void)
673 {
674 }
675
676 static inline int init_tables(void)
677 {
678 struct sk_buff *skb_pool[WAN_RX_DESC_NUM] = {0};
679 struct cfg_std_data_len cfg_std_data_len = {0};
680 struct tx_qos_cfg tx_qos_cfg = {0};
681 struct psave_cfg psave_cfg = {0};
682 struct eg_bwctrl_cfg eg_bwctrl_cfg = {0};
683 struct test_mode test_mode = {0};
684 struct rx_bc_cfg rx_bc_cfg = {0};
685 struct tx_bc_cfg tx_bc_cfg = {0};
686 struct gpio_mode gpio_mode = {0};
687 struct gpio_wm_cfg gpio_wm_cfg = {0};
688 struct rx_gamma_itf_cfg rx_gamma_itf_cfg = {0};
689 struct tx_gamma_itf_cfg tx_gamma_itf_cfg = {0};
690 struct wtx_qos_q_desc_cfg wtx_qos_q_desc_cfg = {0};
691 struct rx_descriptor rx_desc = {0};
692 struct tx_descriptor tx_desc = {0};
693 int i;
694
695 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
696 skb_pool[i] = alloc_skb_rx();
697 if ( skb_pool[i] == NULL )
698 goto ALLOC_SKB_RX_FAIL;
699 }
700
701 cfg_std_data_len.byte_off = RX_HEAD_MAC_ADDR_ALIGNMENT; // this field replaces byte_off in rx descriptor of VDSL ingress
702 cfg_std_data_len.data_len = 1600;
703 *CFG_STD_DATA_LEN = cfg_std_data_len;
704
705 tx_qos_cfg.time_tick = cgu_get_pp32_clock() / 62500; // 16 * (cgu_get_pp32_clock() / 1000000)
706 tx_qos_cfg.overhd_bytes = 0;
707 tx_qos_cfg.eth1_eg_qnum = __ETH_WAN_TX_QUEUE_NUM;
708 tx_qos_cfg.eth1_burst_chk = 1;
709 tx_qos_cfg.eth1_qss = 0;
710 tx_qos_cfg.shape_en = 0; // disable
711 tx_qos_cfg.wfq_en = 0; // strict priority
712 *TX_QOS_CFG = tx_qos_cfg;
713
714 psave_cfg.start_state = 0;
715 psave_cfg.sleep_en = 1; // enable sleep mode
716 *PSAVE_CFG = psave_cfg;
717
718 eg_bwctrl_cfg.fdesc_wm = 16;
719 eg_bwctrl_cfg.class_len = 128;
720 *EG_BWCTRL_CFG = eg_bwctrl_cfg;
721
722 //*GPIO_ADDR = (unsigned int)IFX_GPIO_P0_OUT;
723 *GPIO_ADDR = (unsigned int)0x00000000; // disabled by default
724
725 gpio_mode.gpio_bit_bc1 = 2;
726 gpio_mode.gpio_bit_bc0 = 1;
727 gpio_mode.gpio_bc1_en = 0;
728 gpio_mode.gpio_bc0_en = 0;
729 *GPIO_MODE = gpio_mode;
730
731 gpio_wm_cfg.stop_wm_bc1 = 2;
732 gpio_wm_cfg.start_wm_bc1 = 4;
733 gpio_wm_cfg.stop_wm_bc0 = 2;
734 gpio_wm_cfg.start_wm_bc0 = 4;
735 *GPIO_WM_CFG = gpio_wm_cfg;
736
737 test_mode.mib_clear_mode = 0;
738 test_mode.test_mode = 0;
739 *TEST_MODE = test_mode;
740
741 rx_bc_cfg.local_state = 0;
742 rx_bc_cfg.remote_state = 0;
743 rx_bc_cfg.to_false_th = 7;
744 rx_bc_cfg.to_looking_th = 3;
745 *RX_BC_CFG(0) = rx_bc_cfg;
746 *RX_BC_CFG(1) = rx_bc_cfg;
747
748 tx_bc_cfg.fill_wm = 2;
749 tx_bc_cfg.uflw_wm = 2;
750 *TX_BC_CFG(0) = tx_bc_cfg;
751 *TX_BC_CFG(1) = tx_bc_cfg;
752
753 rx_gamma_itf_cfg.receive_state = 0;
754 rx_gamma_itf_cfg.rx_min_len = 60;
755 rx_gamma_itf_cfg.rx_pad_en = 1;
756 rx_gamma_itf_cfg.rx_eth_fcs_ver_dis = 0;
757 rx_gamma_itf_cfg.rx_rm_eth_fcs = 1;
758 rx_gamma_itf_cfg.rx_tc_crc_ver_dis = 0;
759 rx_gamma_itf_cfg.rx_tc_crc_size = 1;
760 rx_gamma_itf_cfg.rx_eth_fcs_result = 0xC704DD7B;
761 rx_gamma_itf_cfg.rx_tc_crc_result = 0x1D0F1D0F;
762 rx_gamma_itf_cfg.rx_crc_cfg = 0x2500;
763 rx_gamma_itf_cfg.rx_eth_fcs_init_value = 0xFFFFFFFF;
764 rx_gamma_itf_cfg.rx_tc_crc_init_value = 0x0000FFFF;
765 rx_gamma_itf_cfg.rx_max_len_sel = 0;
766 rx_gamma_itf_cfg.rx_edit_num2 = 0;
767 rx_gamma_itf_cfg.rx_edit_pos2 = 0;
768 rx_gamma_itf_cfg.rx_edit_type2 = 0;
769 rx_gamma_itf_cfg.rx_edit_en2 = 0;
770 rx_gamma_itf_cfg.rx_edit_num1 = 0;
771 rx_gamma_itf_cfg.rx_edit_pos1 = 0;
772 rx_gamma_itf_cfg.rx_edit_type1 = 0;
773 rx_gamma_itf_cfg.rx_edit_en1 = 0;
774 rx_gamma_itf_cfg.rx_inserted_bytes_1l = 0;
775 rx_gamma_itf_cfg.rx_inserted_bytes_1h = 0;
776 rx_gamma_itf_cfg.rx_inserted_bytes_2l = 0;
777 rx_gamma_itf_cfg.rx_inserted_bytes_2h = 0;
778 rx_gamma_itf_cfg.rx_len_adj = -6;
779 for ( i = 0; i < 4; i++ )
780 *RX_GAMMA_ITF_CFG(i) = rx_gamma_itf_cfg;
781
782 tx_gamma_itf_cfg.tx_len_adj = 6;
783 tx_gamma_itf_cfg.tx_crc_off_adj = 6;
784 tx_gamma_itf_cfg.tx_min_len = 0;
785 tx_gamma_itf_cfg.tx_eth_fcs_gen_dis = 0;
786 tx_gamma_itf_cfg.tx_tc_crc_size = 1;
787 tx_gamma_itf_cfg.tx_crc_cfg = 0x2F00;
788 tx_gamma_itf_cfg.tx_eth_fcs_init_value = 0xFFFFFFFF;
789 tx_gamma_itf_cfg.tx_tc_crc_init_value = 0x0000FFFF;
790 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ ) {
791 tx_gamma_itf_cfg.queue_mapping = g_queue_gamma_map[i];
792 *TX_GAMMA_ITF_CFG(i) = tx_gamma_itf_cfg;
793 }
794
795 for ( i = 0; i < __ETH_WAN_TX_QUEUE_NUM; i++ ) {
796 wtx_qos_q_desc_cfg.length = WAN_TX_DESC_NUM;
797 wtx_qos_q_desc_cfg.addr = __ETH_WAN_TX_DESC_BASE(i);
798 *WTX_QOS_Q_DESC_CFG(i) = wtx_qos_q_desc_cfg;
799 }
800
801 // default TX queue QoS config is all ZERO
802
803 // TX Ctrl K Table
804 IFX_REG_W32(0x90111293, TX_CTRL_K_TABLE(0));
805 IFX_REG_W32(0x14959617, TX_CTRL_K_TABLE(1));
806 IFX_REG_W32(0x18999A1B, TX_CTRL_K_TABLE(2));
807 IFX_REG_W32(0x9C1D1E9F, TX_CTRL_K_TABLE(3));
808 IFX_REG_W32(0xA02122A3, TX_CTRL_K_TABLE(4));
809 IFX_REG_W32(0x24A5A627, TX_CTRL_K_TABLE(5));
810 IFX_REG_W32(0x28A9AA2B, TX_CTRL_K_TABLE(6));
811 IFX_REG_W32(0xAC2D2EAF, TX_CTRL_K_TABLE(7));
812 IFX_REG_W32(0x30B1B233, TX_CTRL_K_TABLE(8));
813 IFX_REG_W32(0xB43536B7, TX_CTRL_K_TABLE(9));
814 IFX_REG_W32(0xB8393ABB, TX_CTRL_K_TABLE(10));
815 IFX_REG_W32(0x3CBDBE3F, TX_CTRL_K_TABLE(11));
816 IFX_REG_W32(0xC04142C3, TX_CTRL_K_TABLE(12));
817 IFX_REG_W32(0x44C5C647, TX_CTRL_K_TABLE(13));
818 IFX_REG_W32(0x48C9CA4B, TX_CTRL_K_TABLE(14));
819 IFX_REG_W32(0xCC4D4ECF, TX_CTRL_K_TABLE(15));
820
821 // init RX descriptor
822 rx_desc.own = 1;
823 rx_desc.c = 0;
824 rx_desc.sop = 1;
825 rx_desc.eop = 1;
826 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
827 rx_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
828 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
829 rx_desc.dataptr = (unsigned int)skb_pool[i]->data & 0x0FFFFFFF;
830 WAN_RX_DESC_BASE[i] = rx_desc;
831 }
832
833 // init TX descriptor
834 tx_desc.own = 0;
835 tx_desc.c = 0;
836 tx_desc.sop = 1;
837 tx_desc.eop = 1;
838 tx_desc.byteoff = 0;
839 tx_desc.qid = 0;
840 tx_desc.datalen = 0;
841 tx_desc.small = 0;
842 tx_desc.dataptr = 0;
843 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ )
844 CPU_TO_WAN_TX_DESC_BASE[i] = tx_desc;
845 for ( i = 0; i < WAN_TX_DESC_NUM_TOTAL; i++ )
846 WAN_TX_DESC_BASE(0)[i] = tx_desc;
847
848 // init Swap descriptor
849 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ )
850 WAN_SWAP_DESC_BASE[i] = tx_desc;
851
852 // init fastpath TX descriptor
853 tx_desc.own = 1;
854 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ )
855 FASTPATH_TO_WAN_TX_DESC_BASE[i] = tx_desc;
856
857 return 0;
858
859 ALLOC_SKB_RX_FAIL:
860 while ( i-- > 0 )
861 dev_kfree_skb_any(skb_pool[i]);
862 return -1;
863 }
864
865 static inline void clear_tables(void)
866 {
867 struct sk_buff *skb;
868 int i, j;
869
870 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
871 skb = get_skb_pointer(WAN_RX_DESC_BASE[i].dataptr);
872 if ( skb != NULL )
873 dev_kfree_skb_any(skb);
874 }
875
876 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ ) {
877 skb = get_skb_pointer(CPU_TO_WAN_TX_DESC_BASE[i].dataptr);
878 if ( skb != NULL )
879 dev_kfree_skb_any(skb);
880 }
881
882 for ( j = 0; j < 8; j++ )
883 for ( i = 0; i < WAN_TX_DESC_NUM; i++ ) {
884 skb = get_skb_pointer(WAN_TX_DESC_BASE(j)[i].dataptr);
885 if ( skb != NULL )
886 dev_kfree_skb_any(skb);
887 }
888
889 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ ) {
890 skb = get_skb_pointer(WAN_SWAP_DESC_BASE[i].dataptr);
891 if ( skb != NULL )
892 dev_kfree_skb_any(skb);
893 }
894
895 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ ) {
896 skb = get_skb_pointer(FASTPATH_TO_WAN_TX_DESC_BASE[i].dataptr);
897 if ( skb != NULL )
898 dev_kfree_skb_any(skb);
899 }
900 }
901
902 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
903 {
904 int i;
905
906 ASSERT(port_cell != NULL, "port_cell is NULL");
907 ASSERT(xdata_addr != NULL, "xdata_addr is NULL");
908
909 // TODO: ReTX set xdata_addr
910 g_xdata_addr = xdata_addr;
911
912 g_showtime = 1;
913
914 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
915 netif_carrier_on(g_net_dev[i]);
916
917 IFX_REG_W32(0x0F, UTP_CFG);
918
919 //#ifdef CONFIG_VR9
920 // IFX_REG_W32_MASK(1 << 17, 0, FFSM_CFG0);
921 //#endif
922
923 printk("enter showtime\n");
924
925 return 0;
926 }
927
928 static int ptm_showtime_exit(void)
929 {
930 int i;
931
932 if ( !g_showtime )
933 return -1;
934
935 //#ifdef CONFIG_VR9
936 // IFX_REG_W32_MASK(0, 1 << 17, FFSM_CFG0);
937 //#endif
938
939 IFX_REG_W32(0x00, UTP_CFG);
940
941 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
942 netif_carrier_off(g_net_dev[i]);
943
944 g_showtime = 0;
945
946 // TODO: ReTX clean state
947 g_xdata_addr = NULL;
948
949 printk("leave showtime\n");
950
951 return 0;
952 }
953
954
955
956 static int ifx_ptm_init(void)
957 {
958 int ret;
959 int i;
960 char ver_str[128];
961 struct port_cell_info port_cell = {0};
962
963 ret = init_priv_data();
964 if ( ret != 0 ) {
965 err("INIT_PRIV_DATA_FAIL");
966 goto INIT_PRIV_DATA_FAIL;
967 }
968
969 ifx_ptm_init_chip();
970 ret = init_tables();
971 if ( ret != 0 ) {
972 err("INIT_TABLES_FAIL");
973 goto INIT_TABLES_FAIL;
974 }
975
976 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
977 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
978 if ( g_net_dev[i] == NULL )
979 goto ALLOC_NETDEV_FAIL;
980 ptm_setup(g_net_dev[i], i);
981 }
982
983 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
984 ret = register_netdev(g_net_dev[i]);
985 if ( ret != 0 )
986 goto REGISTER_NETDEV_FAIL;
987 }
988
989 /* register interrupt handler */
990 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
991 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
992 #else
993 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_mailbox_isr", &g_ptm_priv_data);
994 #endif
995 if ( ret ) {
996 if ( ret == -EBUSY ) {
997 err("IRQ may be occupied by other driver, please reconfig to disable it.");
998 }
999 else {
1000 err("request_irq fail");
1001 }
1002 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1003 }
1004 disable_irq(PPE_MAILBOX_IGU1_INT);
1005
1006 ret = ifx_pp32_start(0);
1007 if ( ret ) {
1008 err("ifx_pp32_start fail!");
1009 goto PP32_START_FAIL;
1010 }
1011 IFX_REG_W32(1 << 16, MBOX_IGU1_IER); // enable SWAP interrupt
1012 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1013
1014 enable_irq(PPE_MAILBOX_IGU1_INT);
1015
1016 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &g_xdata_addr);
1017
1018 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1019 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1020
1021 ifx_ptm_version(ver_str);
1022 printk(KERN_INFO "%s", ver_str);
1023
1024 printk("ifxmips_ptm: PTM init succeed\n");
1025
1026 return 0;
1027
1028 PP32_START_FAIL:
1029 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1030 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1031 i = ARRAY_SIZE(g_net_dev);
1032 REGISTER_NETDEV_FAIL:
1033 while ( i-- )
1034 unregister_netdev(g_net_dev[i]);
1035 i = ARRAY_SIZE(g_net_dev);
1036 ALLOC_NETDEV_FAIL:
1037 while ( i-- ) {
1038 free_netdev(g_net_dev[i]);
1039 g_net_dev[i] = NULL;
1040 }
1041 INIT_TABLES_FAIL:
1042 INIT_PRIV_DATA_FAIL:
1043 clear_priv_data();
1044 printk("ifxmips_ptm: PTM init failed\n");
1045 return ret;
1046 }
1047
1048 static void __exit ifx_ptm_exit(void)
1049 {
1050 int i;
1051 ifx_mei_atm_showtime_enter = NULL;
1052 ifx_mei_atm_showtime_exit = NULL;
1053
1054
1055 ifx_pp32_stop(0);
1056
1057 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1058
1059 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1060 unregister_netdev(g_net_dev[i]);
1061
1062 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1063 free_netdev(g_net_dev[i]);
1064 g_net_dev[i] = NULL;
1065 }
1066
1067 clear_tables();
1068
1069 ifx_ptm_uninit_chip();
1070
1071 clear_priv_data();
1072 }
1073
1074 #ifndef MODULE
1075 static int __init wanqos_en_setup(char *line)
1076 {
1077 wanqos_en = simple_strtoul(line, NULL, 0);
1078
1079 if ( wanqos_en < 1 || wanqos_en > 8 )
1080 wanqos_en = 0;
1081
1082 return 0;
1083 }
1084
1085 static int __init queue_gamma_map_setup(char *line)
1086 {
1087 char *p;
1088 int i;
1089
1090 for ( i = 0, p = line; i < ARRAY_SIZE(queue_gamma_map) && isxdigit(*p); i++ )
1091 {
1092 queue_gamma_map[i] = simple_strtoul(p, &p, 0);
1093 if ( *p == ',' || *p == ';' || *p == ':' )
1094 p++;
1095 }
1096
1097 return 0;
1098 }
1099 #endif
1100 module_init(ifx_ptm_init);
1101 module_exit(ifx_ptm_exit);
1102 #ifndef MODULE
1103 __setup("wanqos_en=", wanqos_en_setup);
1104 __setup("queue_gamma_map=", queue_gamma_map_setup);
1105 #endif
1106
1107 MODULE_LICENSE("GPL");