9cfeefd80ef08f7987788def7a783e00e686e369
[openwrt/staging/jogo.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_vdsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_vdsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for VR9)
10 ** COPYRIGHT : Copyright (c) 2006
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 **
19 ** HISTORY
20 ** $Date $Author $Comment
21 ** 07 JUL 2009 Xu Liang Init Version
22 *******************************************************************************/
23
24 #include <linux/version.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/ctype.h>
29 #include <linux/errno.h>
30 #include <linux/proc_fs.h>
31 #include <linux/init.h>
32 #include <linux/ioctl.h>
33 #include <linux/etherdevice.h>
34 #include <linux/interrupt.h>
35 #include <linux/netdevice.h>
36
37 #include "ifxmips_ptm_vdsl.h"
38 #include <lantiq_soc.h>
39
40 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
41 #define MODULE_PARM(a, b) module_param(a, int, 0)
42
43 static int wanqos_en = 0;
44 static int queue_gamma_map[4] = {0xFE, 0x01, 0x00, 0x00};
45
46 MODULE_PARM(wanqos_en, "i");
47 MODULE_PARM_DESC(wanqos_en, "WAN QoS support, 1 - enabled, 0 - disabled.");
48
49 MODULE_PARM_ARRAY(queue_gamma_map, "4-4i");
50 MODULE_PARM_DESC(queue_gamma_map, "TX QoS queues mapping to 4 TX Gamma interfaces.");
51
52 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
53 extern int (*ifx_mei_atm_showtime_exit)(void);
54 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
55
56 static int g_showtime = 0;
57 static void *g_xdata_addr = NULL;
58
59
60 #define ENABLE_TMP_DBG 0
61
62 unsigned long cgu_get_pp32_clock(void)
63 {
64 struct clk *c = clk_get_ppe();
65 unsigned long rate = clk_get_rate(c);
66 clk_put(c);
67 return rate;
68 }
69
70 static void ptm_setup(struct net_device *, int);
71 static struct net_device_stats *ptm_get_stats(struct net_device *);
72 static int ptm_open(struct net_device *);
73 static int ptm_stop(struct net_device *);
74 static unsigned int ptm_poll(int, unsigned int);
75 static int ptm_napi_poll(struct napi_struct *, int);
76 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
77 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
78 static int ptm_change_mtu(struct net_device *, int);
79 #endif
80 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
81 static void ptm_tx_timeout(struct net_device *);
82
83 static inline struct sk_buff* alloc_skb_rx(void);
84 static inline struct sk_buff* alloc_skb_tx(unsigned int);
85 static inline struct sk_buff *get_skb_pointer(unsigned int);
86 static inline int get_tx_desc(unsigned int, unsigned int *);
87
88 /*
89 * Mailbox handler and signal function
90 */
91 static irqreturn_t mailbox_irq_handler(int, void *);
92
93 /*
94 * Tasklet to Handle Swap Descriptors
95 */
96 static void do_swap_desc_tasklet(unsigned long);
97
98
99 /*
100 * Init & clean-up functions
101 */
102 static inline int init_priv_data(void);
103 static inline void clear_priv_data(void);
104 static inline int init_tables(void);
105 static inline void clear_tables(void);
106
107 static int g_wanqos_en = 0;
108
109 static int g_queue_gamma_map[4];
110
111 static struct ptm_priv_data g_ptm_priv_data;
112
113 static struct net_device_ops g_ptm_netdev_ops = {
114 .ndo_get_stats = ptm_get_stats,
115 .ndo_open = ptm_open,
116 .ndo_stop = ptm_stop,
117 .ndo_start_xmit = ptm_hard_start_xmit,
118 .ndo_validate_addr = eth_validate_addr,
119 .ndo_set_mac_address = eth_mac_addr,
120 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
121 .ndo_change_mtu = ptm_change_mtu,
122 #endif
123 .ndo_do_ioctl = ptm_ioctl,
124 .ndo_tx_timeout = ptm_tx_timeout,
125 };
126
127 static struct net_device *g_net_dev[1] = {0};
128 static char *g_net_dev_name[1] = {"dsl0"};
129
130 static int g_ptm_prio_queue_map[8];
131
132 static DECLARE_TASKLET(g_swap_desc_tasklet, do_swap_desc_tasklet, 0);
133
134
135 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
136
137 /*
138 * ####################################
139 * Local Function
140 * ####################################
141 */
142
143 static void ptm_setup(struct net_device *dev, int ndev)
144 {
145 netif_carrier_off(dev);
146
147 dev->netdev_ops = &g_ptm_netdev_ops;
148 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
149 /* Allow up to 1508 bytes, for RFC4638 */
150 dev->max_mtu = ETH_DATA_LEN + 8;
151 #endif
152 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 16);
153 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
154
155 dev->dev_addr[0] = 0x00;
156 dev->dev_addr[1] = 0x20;
157 dev->dev_addr[2] = 0xda;
158 dev->dev_addr[3] = 0x86;
159 dev->dev_addr[4] = 0x23;
160 dev->dev_addr[5] = 0x75 + ndev;
161 }
162
163 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
164 {
165 struct net_device_stats *s;
166
167 if ( dev != g_net_dev[0] )
168 return NULL;
169 s = &g_ptm_priv_data.itf[0].stats;
170
171 return s;
172 }
173
174 static int ptm_open(struct net_device *dev)
175 {
176 ASSERT(dev == g_net_dev[0], "incorrect device");
177
178 napi_enable(&g_ptm_priv_data.itf[0].napi);
179
180 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
181
182 netif_start_queue(dev);
183
184 return 0;
185 }
186
187 static int ptm_stop(struct net_device *dev)
188 {
189 ASSERT(dev == g_net_dev[0], "incorrect device");
190
191 IFX_REG_W32_MASK(1 | (1 << 17), 0, MBOX_IGU1_IER);
192
193 napi_disable(&g_ptm_priv_data.itf[0].napi);
194
195 netif_stop_queue(dev);
196
197 return 0;
198 }
199
200 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
201 {
202 unsigned int work_done = 0;
203 volatile struct rx_descriptor *desc;
204 struct rx_descriptor reg_desc;
205 struct sk_buff *skb, *new_skb;
206
207 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
208
209 while ( work_done < work_to_do ) {
210 desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
211 if ( desc->own /* || !desc->c */ ) // if PP32 hold descriptor or descriptor not completed
212 break;
213 if ( ++g_ptm_priv_data.itf[0].rx_desc_pos == WAN_RX_DESC_NUM )
214 g_ptm_priv_data.itf[0].rx_desc_pos = 0;
215
216 reg_desc = *desc;
217 skb = get_skb_pointer(reg_desc.dataptr);
218 ASSERT(skb != NULL, "invalid pointer skb == NULL");
219
220 new_skb = alloc_skb_rx();
221 if ( new_skb != NULL ) {
222 skb_reserve(skb, reg_desc.byteoff);
223 skb_put(skb, reg_desc.datalen);
224
225 // parse protocol header
226 skb->dev = g_net_dev[0];
227 skb->protocol = eth_type_trans(skb, skb->dev);
228
229 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0))
230 g_net_dev[0]->last_rx = jiffies;
231 #endif
232
233 netif_receive_skb(skb);
234
235 g_ptm_priv_data.itf[0].stats.rx_packets++;
236 g_ptm_priv_data.itf[0].stats.rx_bytes += reg_desc.datalen;
237
238 reg_desc.dataptr = (unsigned int)new_skb->data & 0x0FFFFFFF;
239 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
240 }
241
242 reg_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
243 reg_desc.own = 1;
244 reg_desc.c = 0;
245
246 /* write discriptor to memory */
247 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
248 wmb();
249 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
250
251 work_done++;
252 }
253
254 return work_done;
255 }
256
257 static int ptm_napi_poll(struct napi_struct *napi, int budget)
258 {
259 int ndev = 0;
260 unsigned int work_done;
261
262 work_done = ptm_poll(ndev, budget);
263
264 // interface down
265 if ( !netif_running(napi->dev) ) {
266 napi_complete(napi);
267 return work_done;
268 }
269
270 // clear interrupt
271 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_ISRC);
272 // no more traffic
273 if (work_done < budget) {
274 napi_complete(napi);
275 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
276 return work_done;
277 }
278
279 // next round
280 return work_done;
281 }
282
283 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
284 {
285 unsigned int f_full;
286 int desc_base;
287 volatile struct tx_descriptor *desc;
288 struct tx_descriptor reg_desc = {0};
289 struct sk_buff *skb_to_free;
290 unsigned int byteoff;
291
292 ASSERT(dev == g_net_dev[0], "incorrect device");
293
294 if ( !g_showtime ) {
295 err("not in showtime");
296 goto PTM_HARD_START_XMIT_FAIL;
297 }
298
299 /* allocate descriptor */
300 desc_base = get_tx_desc(0, &f_full);
301 if ( f_full ) {
302 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
303 netif_trans_update(dev);
304 #else
305 dev->trans_start = jiffies;
306 #endif
307 netif_stop_queue(dev);
308
309 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_ISRC);
310 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_IER);
311 }
312 if ( desc_base < 0 )
313 goto PTM_HARD_START_XMIT_FAIL;
314 desc = &CPU_TO_WAN_TX_DESC_BASE[desc_base];
315
316 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
317 if ( skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff || skb_cloned(skb) ) {
318 struct sk_buff *new_skb;
319
320 ASSERT(skb_headroom(skb) >= sizeof(struct sk_buff *) + byteoff, "skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff");
321 ASSERT(!skb_cloned(skb), "skb is cloned");
322
323 new_skb = alloc_skb_tx(skb->len);
324 if ( new_skb == NULL ) {
325 dbg("no memory");
326 goto ALLOC_SKB_TX_FAIL;
327 }
328 skb_put(new_skb, skb->len);
329 memcpy(new_skb->data, skb->data, skb->len);
330 dev_kfree_skb_any(skb);
331 skb = new_skb;
332 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
333 /* write back to physical memory */
334 dma_cache_wback((unsigned long)skb->data, skb->len);
335 }
336
337 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
338 /* write back to physical memory */
339 dma_cache_wback((unsigned long)skb->data - byteoff - sizeof(struct sk_buff *), skb->len + byteoff + sizeof(struct sk_buff *));
340
341 /* free previous skb */
342 skb_to_free = get_skb_pointer(desc->dataptr);
343 if ( skb_to_free != NULL )
344 dev_kfree_skb_any(skb_to_free);
345
346 /* update descriptor */
347 reg_desc.small = 0;
348 reg_desc.dataptr = (unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1));
349 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
350 reg_desc.qid = g_ptm_prio_queue_map[skb->priority > 7 ? 7 : skb->priority];
351 reg_desc.byteoff = byteoff;
352 reg_desc.own = 1;
353 reg_desc.c = 1;
354 reg_desc.sop = reg_desc.eop = 1;
355
356 /* update MIB */
357 g_ptm_priv_data.itf[0].stats.tx_packets++;
358 g_ptm_priv_data.itf[0].stats.tx_bytes += reg_desc.datalen;
359
360 /* write discriptor to memory */
361 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
362 wmb();
363 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
364
365 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
366 netif_trans_update(dev);
367 #else
368 dev->trans_start = jiffies;
369 #endif
370
371 return 0;
372
373 ALLOC_SKB_TX_FAIL:
374 PTM_HARD_START_XMIT_FAIL:
375 dev_kfree_skb_any(skb);
376 g_ptm_priv_data.itf[0].stats.tx_dropped++;
377 return 0;
378 }
379
380 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0))
381 static int ptm_change_mtu(struct net_device *dev, int mtu)
382 {
383 /* Allow up to 1508 bytes, for RFC4638 */
384 if (mtu < 68 || mtu > ETH_DATA_LEN + 8)
385 return -EINVAL;
386 dev->mtu = mtu;
387 return 0;
388 }
389 #endif
390
391 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
392 {
393 ASSERT(dev == g_net_dev[0], "incorrect device");
394
395 switch ( cmd )
396 {
397 case IFX_PTM_MIB_CW_GET:
398 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = IFX_REG_R32(DREG_AR_CELL0) + IFX_REG_R32(DREG_AR_CELL1);
399 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = IFX_REG_R32(DREG_AR_IDLE_CNT0) + IFX_REG_R32(DREG_AR_IDLE_CNT1);
400 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = IFX_REG_R32(DREG_AR_CVN_CNT0) + IFX_REG_R32(DREG_AR_CVN_CNT1) + IFX_REG_R32(DREG_AR_CVNP_CNT0) + IFX_REG_R32(DREG_AR_CVNP_CNT1);
401 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = IFX_REG_R32(DREG_AT_CELL0) + IFX_REG_R32(DREG_AT_CELL1);
402 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = IFX_REG_R32(DREG_AT_IDLE_CNT0) + IFX_REG_R32(DREG_AT_IDLE_CNT1);
403 break;
404 case IFX_PTM_MIB_FRAME_GET:
405 {
406 PTM_FRAME_MIB_T data = {0};
407 int i;
408
409 data.RxCorrect = IFX_REG_R32(DREG_AR_HEC_CNT0) + IFX_REG_R32(DREG_AR_HEC_CNT1) + IFX_REG_R32(DREG_AR_AIIDLE_CNT0) + IFX_REG_R32(DREG_AR_AIIDLE_CNT1);
410 for ( i = 0; i < 4; i++ )
411 data.RxDropped += WAN_RX_MIB_TABLE(i)->wrx_dropdes_pdu;
412 for ( i = 0; i < 8; i++ )
413 data.TxSend += WAN_TX_MIB_TABLE(i)->wtx_total_pdu;
414
415 *((PTM_FRAME_MIB_T *)ifr->ifr_data) = data;
416 }
417 break;
418 case IFX_PTM_CFG_GET:
419 // use bear channel 0 preemption gamma interface settings
420 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = 1;
421 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_eth_fcs_ver_dis == 0 ? 1 : 0;
422 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis == 0 ? 1 : 0;;
423 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size == 0 ? 0 : (RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size * 16);
424 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis == 0 ? 1 : 0;
425 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : 1;
426 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : (TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size * 16);
427 break;
428 case IFX_PTM_CFG_SET:
429 {
430 int i;
431
432 for ( i = 0; i < 4; i++ ) {
433 RX_GAMMA_ITF_CFG(i)->rx_eth_fcs_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 0 : 1;
434
435 RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck ? 0 : 1;
436
437 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen ) {
438 case 16: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 1; break;
439 case 32: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 2; break;
440 default: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 0;
441 }
442
443 TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 0 : 1;
444
445 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen ) {
446 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen ) {
447 case 16: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 1; break;
448 case 32: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 2; break;
449 default: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
450 }
451 }
452 else
453 TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
454 }
455 }
456 break;
457 case IFX_PTM_MAP_PKT_PRIO_TO_Q:
458 {
459 struct ppe_prio_q_map cmd;
460
461 if ( copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd)) )
462 return -EFAULT;
463
464 if ( cmd.pkt_prio < 0 || cmd.pkt_prio >= ARRAY_SIZE(g_ptm_prio_queue_map) )
465 return -EINVAL;
466
467 if ( cmd.qid < 0 || cmd.qid >= g_wanqos_en )
468 return -EINVAL;
469
470 g_ptm_prio_queue_map[cmd.pkt_prio] = cmd.qid;
471 }
472 break;
473 default:
474 return -EOPNOTSUPP;
475 }
476
477 return 0;
478 }
479
480 static void ptm_tx_timeout(struct net_device *dev)
481 {
482 ASSERT(dev == g_net_dev[0], "incorrect device");
483
484 /* disable TX irq, release skb when sending new packet */
485 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
486
487 /* wake up TX queue */
488 netif_wake_queue(dev);
489
490 return;
491 }
492
493 static inline struct sk_buff* alloc_skb_rx(void)
494 {
495 struct sk_buff *skb;
496
497 /* allocate memroy including trailer and padding */
498 skb = dev_alloc_skb(RX_MAX_BUFFER_SIZE + DATA_BUFFER_ALIGNMENT);
499 if ( skb != NULL ) {
500 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
501 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
502 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
503 /* pub skb in reserved area "skb->data - 4" */
504 *((struct sk_buff **)skb->data - 1) = skb;
505 wmb();
506 /* write back and invalidate cache */
507 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
508 /* invalidate cache */
509 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
510 }
511
512 return skb;
513 }
514
515 static inline struct sk_buff* alloc_skb_tx(unsigned int size)
516 {
517 struct sk_buff *skb;
518
519 /* allocate memory including padding */
520 size = RX_MAX_BUFFER_SIZE;
521 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
522 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
523 /* must be burst length alignment */
524 if ( skb != NULL )
525 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
526 return skb;
527 }
528
529 static inline struct sk_buff *get_skb_pointer(unsigned int dataptr)
530 {
531 unsigned int skb_dataptr;
532 struct sk_buff *skb;
533
534 // usually, CPE memory is less than 256M bytes
535 // so NULL means invalid pointer
536 if ( dataptr == 0 ) {
537 dbg("dataptr is 0, it's supposed to be invalid pointer");
538 return NULL;
539 }
540
541 skb_dataptr = (dataptr - 4) | KSEG1;
542 skb = *(struct sk_buff **)skb_dataptr;
543
544 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
545 ASSERT((((unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1))) | KSEG1) == (dataptr | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
546
547 return skb;
548 }
549
550 static inline int get_tx_desc(unsigned int itf, unsigned int *f_full)
551 {
552 int desc_base = -1;
553 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[0];
554
555 // assume TX is serial operation
556 // no protection provided
557
558 *f_full = 1;
559
560 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 ) {
561 desc_base = p_itf->tx_desc_pos;
562 if ( ++(p_itf->tx_desc_pos) == CPU_TO_WAN_TX_DESC_NUM )
563 p_itf->tx_desc_pos = 0;
564 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 )
565 *f_full = 0;
566 }
567
568 return desc_base;
569 }
570
571 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
572 {
573 unsigned int isr;
574 int i;
575
576 isr = IFX_REG_R32(MBOX_IGU1_ISR);
577 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
578 isr &= IFX_REG_R32(MBOX_IGU1_IER);
579
580 if (isr & BIT(0)) {
581 IFX_REG_W32_MASK(1, 0, MBOX_IGU1_IER);
582 napi_schedule(&g_ptm_priv_data.itf[0].napi);
583 #if defined(ENABLE_TMP_DBG) && ENABLE_TMP_DBG
584 {
585 volatile struct rx_descriptor *desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
586
587 if ( desc->own ) { // PP32 hold
588 err("invalid interrupt");
589 }
590 }
591 #endif
592 }
593 if (isr & BIT(16)) {
594 IFX_REG_W32_MASK(1 << 16, 0, MBOX_IGU1_IER);
595 tasklet_hi_schedule(&g_swap_desc_tasklet);
596 }
597 if (isr & BIT(17)) {
598 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
599 netif_wake_queue(g_net_dev[0]);
600 }
601
602 return IRQ_HANDLED;
603 }
604
605 static void do_swap_desc_tasklet(unsigned long arg)
606 {
607 int budget = 32;
608 volatile struct tx_descriptor *desc;
609 struct sk_buff *skb;
610 unsigned int byteoff;
611
612 while ( budget-- > 0 ) {
613 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) // if PP32 hold descriptor
614 break;
615
616 desc = &WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos];
617 if ( ++g_ptm_priv_data.itf[0].tx_swap_desc_pos == WAN_SWAP_DESC_NUM )
618 g_ptm_priv_data.itf[0].tx_swap_desc_pos = 0;
619
620 skb = get_skb_pointer(desc->dataptr);
621 if ( skb != NULL )
622 dev_kfree_skb_any(skb);
623
624 skb = alloc_skb_tx(RX_MAX_BUFFER_SIZE);
625 if ( skb == NULL )
626 panic("can't allocate swap buffer for PPE firmware use\n");
627 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
628 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
629
630 desc->dataptr = (unsigned int)skb->data & 0x0FFFFFFF;
631 desc->own = 1;
632 }
633
634 // clear interrupt
635 IFX_REG_W32_MASK(0, 16, MBOX_IGU1_ISRC);
636 // no more skb to be replaced
637 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) { // if PP32 hold descriptor
638 IFX_REG_W32_MASK(0, 1 << 16, MBOX_IGU1_IER);
639 return;
640 }
641
642 tasklet_hi_schedule(&g_swap_desc_tasklet);
643 return;
644 }
645
646
647 static inline int ifx_ptm_version(char *buf)
648 {
649 int len = 0;
650 unsigned int major, minor;
651
652 ifx_ptm_get_fw_ver(&major, &minor);
653
654 len += sprintf(buf + len, "PTM %d.%d.%d", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
655 len += sprintf(buf + len, " PTM (E1) firmware version %d.%d\n", major, minor);
656
657 return len;
658 }
659
660 static inline int init_priv_data(void)
661 {
662 int i, j;
663
664 g_wanqos_en = wanqos_en ? wanqos_en : 8;
665 if ( g_wanqos_en > 8 )
666 g_wanqos_en = 8;
667
668 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ )
669 {
670 g_queue_gamma_map[i] = queue_gamma_map[i] & ((1 << g_wanqos_en) - 1);
671 for ( j = 0; j < i; j++ )
672 g_queue_gamma_map[i] &= ~g_queue_gamma_map[j];
673 }
674
675 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
676
677 {
678 int max_packet_priority = ARRAY_SIZE(g_ptm_prio_queue_map);
679 int tx_num_q;
680 int q_step, q_accum, p_step;
681
682 tx_num_q = __ETH_WAN_TX_QUEUE_NUM;
683 q_step = tx_num_q - 1;
684 p_step = max_packet_priority - 1;
685 for ( j = 0, q_accum = 0; j < max_packet_priority; j++, q_accum += q_step )
686 g_ptm_prio_queue_map[j] = q_step - (q_accum + (p_step >> 1)) / p_step;
687 }
688
689 return 0;
690 }
691
692 static inline void clear_priv_data(void)
693 {
694 }
695
696 static inline int init_tables(void)
697 {
698 struct sk_buff *skb_pool[WAN_RX_DESC_NUM] = {0};
699 struct cfg_std_data_len cfg_std_data_len = {0};
700 struct tx_qos_cfg tx_qos_cfg = {0};
701 struct psave_cfg psave_cfg = {0};
702 struct eg_bwctrl_cfg eg_bwctrl_cfg = {0};
703 struct test_mode test_mode = {0};
704 struct rx_bc_cfg rx_bc_cfg = {0};
705 struct tx_bc_cfg tx_bc_cfg = {0};
706 struct gpio_mode gpio_mode = {0};
707 struct gpio_wm_cfg gpio_wm_cfg = {0};
708 struct rx_gamma_itf_cfg rx_gamma_itf_cfg = {0};
709 struct tx_gamma_itf_cfg tx_gamma_itf_cfg = {0};
710 struct wtx_qos_q_desc_cfg wtx_qos_q_desc_cfg = {0};
711 struct rx_descriptor rx_desc = {0};
712 struct tx_descriptor tx_desc = {0};
713 int i;
714
715 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
716 skb_pool[i] = alloc_skb_rx();
717 if ( skb_pool[i] == NULL )
718 goto ALLOC_SKB_RX_FAIL;
719 }
720
721 cfg_std_data_len.byte_off = RX_HEAD_MAC_ADDR_ALIGNMENT; // this field replaces byte_off in rx descriptor of VDSL ingress
722 cfg_std_data_len.data_len = 1600;
723 *CFG_STD_DATA_LEN = cfg_std_data_len;
724
725 tx_qos_cfg.time_tick = cgu_get_pp32_clock() / 62500; // 16 * (cgu_get_pp32_clock() / 1000000)
726 tx_qos_cfg.overhd_bytes = 0;
727 tx_qos_cfg.eth1_eg_qnum = __ETH_WAN_TX_QUEUE_NUM;
728 tx_qos_cfg.eth1_burst_chk = 1;
729 tx_qos_cfg.eth1_qss = 0;
730 tx_qos_cfg.shape_en = 0; // disable
731 tx_qos_cfg.wfq_en = 0; // strict priority
732 *TX_QOS_CFG = tx_qos_cfg;
733
734 psave_cfg.start_state = 0;
735 psave_cfg.sleep_en = 1; // enable sleep mode
736 *PSAVE_CFG = psave_cfg;
737
738 eg_bwctrl_cfg.fdesc_wm = 16;
739 eg_bwctrl_cfg.class_len = 128;
740 *EG_BWCTRL_CFG = eg_bwctrl_cfg;
741
742 //*GPIO_ADDR = (unsigned int)IFX_GPIO_P0_OUT;
743 *GPIO_ADDR = (unsigned int)0x00000000; // disabled by default
744
745 gpio_mode.gpio_bit_bc1 = 2;
746 gpio_mode.gpio_bit_bc0 = 1;
747 gpio_mode.gpio_bc1_en = 0;
748 gpio_mode.gpio_bc0_en = 0;
749 *GPIO_MODE = gpio_mode;
750
751 gpio_wm_cfg.stop_wm_bc1 = 2;
752 gpio_wm_cfg.start_wm_bc1 = 4;
753 gpio_wm_cfg.stop_wm_bc0 = 2;
754 gpio_wm_cfg.start_wm_bc0 = 4;
755 *GPIO_WM_CFG = gpio_wm_cfg;
756
757 test_mode.mib_clear_mode = 0;
758 test_mode.test_mode = 0;
759 *TEST_MODE = test_mode;
760
761 rx_bc_cfg.local_state = 0;
762 rx_bc_cfg.remote_state = 0;
763 rx_bc_cfg.to_false_th = 7;
764 rx_bc_cfg.to_looking_th = 3;
765 *RX_BC_CFG(0) = rx_bc_cfg;
766 *RX_BC_CFG(1) = rx_bc_cfg;
767
768 tx_bc_cfg.fill_wm = 2;
769 tx_bc_cfg.uflw_wm = 2;
770 *TX_BC_CFG(0) = tx_bc_cfg;
771 *TX_BC_CFG(1) = tx_bc_cfg;
772
773 rx_gamma_itf_cfg.receive_state = 0;
774 rx_gamma_itf_cfg.rx_min_len = 60;
775 rx_gamma_itf_cfg.rx_pad_en = 1;
776 rx_gamma_itf_cfg.rx_eth_fcs_ver_dis = 0;
777 rx_gamma_itf_cfg.rx_rm_eth_fcs = 1;
778 rx_gamma_itf_cfg.rx_tc_crc_ver_dis = 0;
779 rx_gamma_itf_cfg.rx_tc_crc_size = 1;
780 rx_gamma_itf_cfg.rx_eth_fcs_result = 0xC704DD7B;
781 rx_gamma_itf_cfg.rx_tc_crc_result = 0x1D0F1D0F;
782 rx_gamma_itf_cfg.rx_crc_cfg = 0x2500;
783 rx_gamma_itf_cfg.rx_eth_fcs_init_value = 0xFFFFFFFF;
784 rx_gamma_itf_cfg.rx_tc_crc_init_value = 0x0000FFFF;
785 rx_gamma_itf_cfg.rx_max_len_sel = 0;
786 rx_gamma_itf_cfg.rx_edit_num2 = 0;
787 rx_gamma_itf_cfg.rx_edit_pos2 = 0;
788 rx_gamma_itf_cfg.rx_edit_type2 = 0;
789 rx_gamma_itf_cfg.rx_edit_en2 = 0;
790 rx_gamma_itf_cfg.rx_edit_num1 = 0;
791 rx_gamma_itf_cfg.rx_edit_pos1 = 0;
792 rx_gamma_itf_cfg.rx_edit_type1 = 0;
793 rx_gamma_itf_cfg.rx_edit_en1 = 0;
794 rx_gamma_itf_cfg.rx_inserted_bytes_1l = 0;
795 rx_gamma_itf_cfg.rx_inserted_bytes_1h = 0;
796 rx_gamma_itf_cfg.rx_inserted_bytes_2l = 0;
797 rx_gamma_itf_cfg.rx_inserted_bytes_2h = 0;
798 rx_gamma_itf_cfg.rx_len_adj = -6;
799 for ( i = 0; i < 4; i++ )
800 *RX_GAMMA_ITF_CFG(i) = rx_gamma_itf_cfg;
801
802 tx_gamma_itf_cfg.tx_len_adj = 6;
803 tx_gamma_itf_cfg.tx_crc_off_adj = 6;
804 tx_gamma_itf_cfg.tx_min_len = 0;
805 tx_gamma_itf_cfg.tx_eth_fcs_gen_dis = 0;
806 tx_gamma_itf_cfg.tx_tc_crc_size = 1;
807 tx_gamma_itf_cfg.tx_crc_cfg = 0x2F00;
808 tx_gamma_itf_cfg.tx_eth_fcs_init_value = 0xFFFFFFFF;
809 tx_gamma_itf_cfg.tx_tc_crc_init_value = 0x0000FFFF;
810 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ ) {
811 tx_gamma_itf_cfg.queue_mapping = g_queue_gamma_map[i];
812 *TX_GAMMA_ITF_CFG(i) = tx_gamma_itf_cfg;
813 }
814
815 for ( i = 0; i < __ETH_WAN_TX_QUEUE_NUM; i++ ) {
816 wtx_qos_q_desc_cfg.length = WAN_TX_DESC_NUM;
817 wtx_qos_q_desc_cfg.addr = __ETH_WAN_TX_DESC_BASE(i);
818 *WTX_QOS_Q_DESC_CFG(i) = wtx_qos_q_desc_cfg;
819 }
820
821 // default TX queue QoS config is all ZERO
822
823 // TX Ctrl K Table
824 IFX_REG_W32(0x90111293, TX_CTRL_K_TABLE(0));
825 IFX_REG_W32(0x14959617, TX_CTRL_K_TABLE(1));
826 IFX_REG_W32(0x18999A1B, TX_CTRL_K_TABLE(2));
827 IFX_REG_W32(0x9C1D1E9F, TX_CTRL_K_TABLE(3));
828 IFX_REG_W32(0xA02122A3, TX_CTRL_K_TABLE(4));
829 IFX_REG_W32(0x24A5A627, TX_CTRL_K_TABLE(5));
830 IFX_REG_W32(0x28A9AA2B, TX_CTRL_K_TABLE(6));
831 IFX_REG_W32(0xAC2D2EAF, TX_CTRL_K_TABLE(7));
832 IFX_REG_W32(0x30B1B233, TX_CTRL_K_TABLE(8));
833 IFX_REG_W32(0xB43536B7, TX_CTRL_K_TABLE(9));
834 IFX_REG_W32(0xB8393ABB, TX_CTRL_K_TABLE(10));
835 IFX_REG_W32(0x3CBDBE3F, TX_CTRL_K_TABLE(11));
836 IFX_REG_W32(0xC04142C3, TX_CTRL_K_TABLE(12));
837 IFX_REG_W32(0x44C5C647, TX_CTRL_K_TABLE(13));
838 IFX_REG_W32(0x48C9CA4B, TX_CTRL_K_TABLE(14));
839 IFX_REG_W32(0xCC4D4ECF, TX_CTRL_K_TABLE(15));
840
841 // init RX descriptor
842 rx_desc.own = 1;
843 rx_desc.c = 0;
844 rx_desc.sop = 1;
845 rx_desc.eop = 1;
846 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
847 rx_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
848 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
849 rx_desc.dataptr = (unsigned int)skb_pool[i]->data & 0x0FFFFFFF;
850 WAN_RX_DESC_BASE[i] = rx_desc;
851 }
852
853 // init TX descriptor
854 tx_desc.own = 0;
855 tx_desc.c = 0;
856 tx_desc.sop = 1;
857 tx_desc.eop = 1;
858 tx_desc.byteoff = 0;
859 tx_desc.qid = 0;
860 tx_desc.datalen = 0;
861 tx_desc.small = 0;
862 tx_desc.dataptr = 0;
863 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ )
864 CPU_TO_WAN_TX_DESC_BASE[i] = tx_desc;
865 for ( i = 0; i < WAN_TX_DESC_NUM_TOTAL; i++ )
866 WAN_TX_DESC_BASE(0)[i] = tx_desc;
867
868 // init Swap descriptor
869 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ )
870 WAN_SWAP_DESC_BASE[i] = tx_desc;
871
872 // init fastpath TX descriptor
873 tx_desc.own = 1;
874 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ )
875 FASTPATH_TO_WAN_TX_DESC_BASE[i] = tx_desc;
876
877 return 0;
878
879 ALLOC_SKB_RX_FAIL:
880 while ( i-- > 0 )
881 dev_kfree_skb_any(skb_pool[i]);
882 return -1;
883 }
884
885 static inline void clear_tables(void)
886 {
887 struct sk_buff *skb;
888 int i, j;
889
890 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
891 skb = get_skb_pointer(WAN_RX_DESC_BASE[i].dataptr);
892 if ( skb != NULL )
893 dev_kfree_skb_any(skb);
894 }
895
896 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ ) {
897 skb = get_skb_pointer(CPU_TO_WAN_TX_DESC_BASE[i].dataptr);
898 if ( skb != NULL )
899 dev_kfree_skb_any(skb);
900 }
901
902 for ( j = 0; j < 8; j++ )
903 for ( i = 0; i < WAN_TX_DESC_NUM; i++ ) {
904 skb = get_skb_pointer(WAN_TX_DESC_BASE(j)[i].dataptr);
905 if ( skb != NULL )
906 dev_kfree_skb_any(skb);
907 }
908
909 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ ) {
910 skb = get_skb_pointer(WAN_SWAP_DESC_BASE[i].dataptr);
911 if ( skb != NULL )
912 dev_kfree_skb_any(skb);
913 }
914
915 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ ) {
916 skb = get_skb_pointer(FASTPATH_TO_WAN_TX_DESC_BASE[i].dataptr);
917 if ( skb != NULL )
918 dev_kfree_skb_any(skb);
919 }
920 }
921
922 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
923 {
924 int i;
925
926 ASSERT(port_cell != NULL, "port_cell is NULL");
927 ASSERT(xdata_addr != NULL, "xdata_addr is NULL");
928
929 // TODO: ReTX set xdata_addr
930 g_xdata_addr = xdata_addr;
931
932 g_showtime = 1;
933
934 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
935 netif_carrier_on(g_net_dev[i]);
936
937 IFX_REG_W32(0x0F, UTP_CFG);
938
939 //#ifdef CONFIG_VR9
940 // IFX_REG_W32_MASK(1 << 17, 0, FFSM_CFG0);
941 //#endif
942
943 printk("enter showtime\n");
944
945 return 0;
946 }
947
948 static int ptm_showtime_exit(void)
949 {
950 int i;
951
952 if ( !g_showtime )
953 return -1;
954
955 //#ifdef CONFIG_VR9
956 // IFX_REG_W32_MASK(0, 1 << 17, FFSM_CFG0);
957 //#endif
958
959 IFX_REG_W32(0x00, UTP_CFG);
960
961 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
962 netif_carrier_off(g_net_dev[i]);
963
964 g_showtime = 0;
965
966 // TODO: ReTX clean state
967 g_xdata_addr = NULL;
968
969 printk("leave showtime\n");
970
971 return 0;
972 }
973
974
975
976 static int ifx_ptm_init(void)
977 {
978 int ret;
979 int i;
980 char ver_str[128];
981 struct port_cell_info port_cell = {0};
982
983 ret = init_priv_data();
984 if ( ret != 0 ) {
985 err("INIT_PRIV_DATA_FAIL");
986 goto INIT_PRIV_DATA_FAIL;
987 }
988
989 ifx_ptm_init_chip();
990 ret = init_tables();
991 if ( ret != 0 ) {
992 err("INIT_TABLES_FAIL");
993 goto INIT_TABLES_FAIL;
994 }
995
996 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
997 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
998 if ( g_net_dev[i] == NULL )
999 goto ALLOC_NETDEV_FAIL;
1000 ptm_setup(g_net_dev[i], i);
1001 }
1002
1003 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1004 ret = register_netdev(g_net_dev[i]);
1005 if ( ret != 0 )
1006 goto REGISTER_NETDEV_FAIL;
1007 }
1008
1009 /* register interrupt handler */
1010 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)
1011 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
1012 #else
1013 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, IRQF_DISABLED, "ptm_mailbox_isr", &g_ptm_priv_data);
1014 #endif
1015 if ( ret ) {
1016 if ( ret == -EBUSY ) {
1017 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1018 }
1019 else {
1020 err("request_irq fail");
1021 }
1022 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1023 }
1024 disable_irq(PPE_MAILBOX_IGU1_INT);
1025
1026 ret = ifx_pp32_start(0);
1027 if ( ret ) {
1028 err("ifx_pp32_start fail!");
1029 goto PP32_START_FAIL;
1030 }
1031 IFX_REG_W32(1 << 16, MBOX_IGU1_IER); // enable SWAP interrupt
1032 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1033
1034 enable_irq(PPE_MAILBOX_IGU1_INT);
1035
1036 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &g_xdata_addr);
1037 if ( g_showtime ) {
1038 ptm_showtime_enter(&port_cell, &g_xdata_addr);
1039 }
1040
1041 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1042 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1043
1044 ifx_ptm_version(ver_str);
1045 printk(KERN_INFO "%s", ver_str);
1046
1047 printk("ifxmips_ptm: PTM init succeed\n");
1048
1049 return 0;
1050
1051 PP32_START_FAIL:
1052 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1053 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1054 i = ARRAY_SIZE(g_net_dev);
1055 REGISTER_NETDEV_FAIL:
1056 while ( i-- )
1057 unregister_netdev(g_net_dev[i]);
1058 i = ARRAY_SIZE(g_net_dev);
1059 ALLOC_NETDEV_FAIL:
1060 while ( i-- ) {
1061 free_netdev(g_net_dev[i]);
1062 g_net_dev[i] = NULL;
1063 }
1064 INIT_TABLES_FAIL:
1065 INIT_PRIV_DATA_FAIL:
1066 clear_priv_data();
1067 printk("ifxmips_ptm: PTM init failed\n");
1068 return ret;
1069 }
1070
1071 static void __exit ifx_ptm_exit(void)
1072 {
1073 int i;
1074 ifx_mei_atm_showtime_enter = NULL;
1075 ifx_mei_atm_showtime_exit = NULL;
1076
1077
1078 ifx_pp32_stop(0);
1079
1080 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1081
1082 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1083 unregister_netdev(g_net_dev[i]);
1084
1085 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1086 free_netdev(g_net_dev[i]);
1087 g_net_dev[i] = NULL;
1088 }
1089
1090 clear_tables();
1091
1092 ifx_ptm_uninit_chip();
1093
1094 clear_priv_data();
1095 }
1096
1097 #ifndef MODULE
1098 static int __init wanqos_en_setup(char *line)
1099 {
1100 wanqos_en = simple_strtoul(line, NULL, 0);
1101
1102 if ( wanqos_en < 1 || wanqos_en > 8 )
1103 wanqos_en = 0;
1104
1105 return 0;
1106 }
1107
1108 static int __init queue_gamma_map_setup(char *line)
1109 {
1110 char *p;
1111 int i;
1112
1113 for ( i = 0, p = line; i < ARRAY_SIZE(queue_gamma_map) && isxdigit(*p); i++ )
1114 {
1115 queue_gamma_map[i] = simple_strtoul(p, &p, 0);
1116 if ( *p == ',' || *p == ';' || *p == ':' )
1117 p++;
1118 }
1119
1120 return 0;
1121 }
1122 #endif
1123 module_init(ifx_ptm_init);
1124 module_exit(ifx_ptm_exit);
1125 #ifndef MODULE
1126 __setup("wanqos_en=", wanqos_en_setup);
1127 __setup("queue_gamma_map=", queue_gamma_map_setup);
1128 #endif
1129
1130 MODULE_LICENSE("GPL");