kernel: move mv88e6xxx fix to generic backports
[openwrt/openwrt.git] / package / kernel / lantiq / ltq-ptm / src / ifxmips_ptm_vdsl.c
1 /******************************************************************************
2 **
3 ** FILE NAME : ifxmips_ptm_vdsl.c
4 ** PROJECT : UEIP
5 ** MODULES : PTM
6 **
7 ** DATE : 7 Jul 2009
8 ** AUTHOR : Xu Liang
9 ** DESCRIPTION : PTM driver common source file (core functions for VR9)
10 ** COPYRIGHT : Copyright (c) 2006
11 ** Infineon Technologies AG
12 ** Am Campeon 1-12, 85579 Neubiberg, Germany
13 **
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License as published by
16 ** the Free Software Foundation; either version 2 of the License, or
17 ** (at your option) any later version.
18 **
19 ** HISTORY
20 ** $Date $Author $Comment
21 ** 07 JUL 2009 Xu Liang Init Version
22 *******************************************************************************/
23
24 #include <linux/version.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/types.h>
28 #include <linux/ctype.h>
29 #include <linux/errno.h>
30 #include <linux/proc_fs.h>
31 #include <linux/init.h>
32 #include <linux/ioctl.h>
33 #include <linux/etherdevice.h>
34 #include <linux/interrupt.h>
35 #include <linux/netdevice.h>
36 #include <linux/platform_device.h>
37 #include <linux/of_device.h>
38
39 #include "ifxmips_ptm_vdsl.h"
40 #include <lantiq_soc.h>
41
42 #define MODULE_PARM_ARRAY(a, b) module_param_array(a, int, NULL, 0)
43 #define MODULE_PARM(a, b) module_param(a, int, 0)
44
45 static int wanqos_en = 0;
46 static int queue_gamma_map[4] = {0xFE, 0x01, 0x00, 0x00};
47
48 MODULE_PARM(wanqos_en, "i");
49 MODULE_PARM_DESC(wanqos_en, "WAN QoS support, 1 - enabled, 0 - disabled.");
50
51 MODULE_PARM_ARRAY(queue_gamma_map, "4-4i");
52 MODULE_PARM_DESC(queue_gamma_map, "TX QoS queues mapping to 4 TX Gamma interfaces.");
53
54 extern int (*ifx_mei_atm_showtime_enter)(struct port_cell_info *, void *);
55 extern int (*ifx_mei_atm_showtime_exit)(void);
56 extern int ifx_mei_atm_showtime_check(int *is_showtime, struct port_cell_info *port_cell, void **xdata_addr);
57
58 static int g_showtime = 0;
59 static void *g_xdata_addr = NULL;
60
61
62 #define ENABLE_TMP_DBG 0
63
64 unsigned long cgu_get_pp32_clock(void)
65 {
66 struct clk *c = clk_get_ppe();
67 unsigned long rate = clk_get_rate(c);
68 clk_put(c);
69 return rate;
70 }
71
72 static void ptm_setup(struct net_device *, int);
73 static struct net_device_stats *ptm_get_stats(struct net_device *);
74 static int ptm_open(struct net_device *);
75 static int ptm_stop(struct net_device *);
76 static unsigned int ptm_poll(int, unsigned int);
77 static int ptm_napi_poll(struct napi_struct *, int);
78 static int ptm_hard_start_xmit(struct sk_buff *, struct net_device *);
79 static int ptm_ioctl(struct net_device *, struct ifreq *, int);
80 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
81 static void ptm_tx_timeout(struct net_device *);
82 #else
83 static void ptm_tx_timeout(struct net_device *, unsigned int txqueue);
84 #endif
85
86 static inline struct sk_buff* alloc_skb_rx(void);
87 static inline struct sk_buff* alloc_skb_tx(unsigned int);
88 static inline struct sk_buff *get_skb_pointer(unsigned int);
89 static inline int get_tx_desc(unsigned int, unsigned int *);
90
91 /*
92 * Mailbox handler and signal function
93 */
94 static irqreturn_t mailbox_irq_handler(int, void *);
95
96 /*
97 * Tasklet to Handle Swap Descriptors
98 */
99 static void do_swap_desc_tasklet(unsigned long);
100
101
102 /*
103 * Init & clean-up functions
104 */
105 static inline int init_priv_data(void);
106 static inline void clear_priv_data(void);
107 static inline int init_tables(void);
108 static inline void clear_tables(void);
109
110 static int g_wanqos_en = 0;
111
112 static int g_queue_gamma_map[4];
113
114 static struct ptm_priv_data g_ptm_priv_data;
115
116 static struct net_device_ops g_ptm_netdev_ops = {
117 .ndo_get_stats = ptm_get_stats,
118 .ndo_open = ptm_open,
119 .ndo_stop = ptm_stop,
120 .ndo_start_xmit = ptm_hard_start_xmit,
121 .ndo_validate_addr = eth_validate_addr,
122 .ndo_set_mac_address = eth_mac_addr,
123 .ndo_do_ioctl = ptm_ioctl,
124 .ndo_tx_timeout = ptm_tx_timeout,
125 };
126
127 static struct net_device *g_net_dev[1] = {0};
128 static char *g_net_dev_name[1] = {"dsl0"};
129
130 static int g_ptm_prio_queue_map[8];
131
132 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)
133 static DECLARE_TASKLET(g_swap_desc_tasklet, do_swap_desc_tasklet, 0);
134 #else
135 static DECLARE_TASKLET_OLD(g_swap_desc_tasklet, do_swap_desc_tasklet);
136 #endif
137
138
139 unsigned int ifx_ptm_dbg_enable = DBG_ENABLE_MASK_ERR;
140
141 /*
142 * ####################################
143 * Local Function
144 * ####################################
145 */
146
147 static void ptm_setup(struct net_device *dev, int ndev)
148 {
149 u8 addr[ETH_ALEN];
150
151 netif_carrier_off(dev);
152
153 dev->netdev_ops = &g_ptm_netdev_ops;
154 /* Allow up to 1508 bytes, for RFC4638 */
155 dev->max_mtu = ETH_DATA_LEN + 8;
156 netif_napi_add(dev, &g_ptm_priv_data.itf[ndev].napi, ptm_napi_poll, 16);
157 dev->watchdog_timeo = ETH_WATCHDOG_TIMEOUT;
158
159 addr[0] = 0x00;
160 addr[1] = 0x20;
161 addr[2] = 0xda;
162 addr[3] = 0x86;
163 addr[4] = 0x23;
164 addr[5] = 0x75 + ndev;
165 eth_hw_addr_set(dev, addr);
166 }
167
168 static struct net_device_stats *ptm_get_stats(struct net_device *dev)
169 {
170 struct net_device_stats *s;
171
172 if ( dev != g_net_dev[0] )
173 return NULL;
174 s = &g_ptm_priv_data.itf[0].stats;
175
176 return s;
177 }
178
179 static int ptm_open(struct net_device *dev)
180 {
181 ASSERT(dev == g_net_dev[0], "incorrect device");
182
183 napi_enable(&g_ptm_priv_data.itf[0].napi);
184
185 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
186
187 netif_start_queue(dev);
188
189 return 0;
190 }
191
192 static int ptm_stop(struct net_device *dev)
193 {
194 ASSERT(dev == g_net_dev[0], "incorrect device");
195
196 IFX_REG_W32_MASK(1 | (1 << 17), 0, MBOX_IGU1_IER);
197
198 napi_disable(&g_ptm_priv_data.itf[0].napi);
199
200 netif_stop_queue(dev);
201
202 return 0;
203 }
204
205 static unsigned int ptm_poll(int ndev, unsigned int work_to_do)
206 {
207 unsigned int work_done = 0;
208 volatile struct rx_descriptor *desc;
209 struct rx_descriptor reg_desc;
210 struct sk_buff *skb, *new_skb;
211
212 ASSERT(ndev >= 0 && ndev < ARRAY_SIZE(g_net_dev), "ndev = %d (wrong value)", ndev);
213
214 while ( work_done < work_to_do ) {
215 desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
216 if ( desc->own /* || !desc->c */ ) // if PP32 hold descriptor or descriptor not completed
217 break;
218 if ( ++g_ptm_priv_data.itf[0].rx_desc_pos == WAN_RX_DESC_NUM )
219 g_ptm_priv_data.itf[0].rx_desc_pos = 0;
220
221 reg_desc = *desc;
222 skb = get_skb_pointer(reg_desc.dataptr);
223 ASSERT(skb != NULL, "invalid pointer skb == NULL");
224
225 new_skb = alloc_skb_rx();
226 if ( new_skb != NULL ) {
227 skb_reserve(skb, reg_desc.byteoff);
228 skb_put(skb, reg_desc.datalen);
229
230 // parse protocol header
231 skb->dev = g_net_dev[0];
232 skb->protocol = eth_type_trans(skb, skb->dev);
233
234 netif_receive_skb(skb);
235
236 g_ptm_priv_data.itf[0].stats.rx_packets++;
237 g_ptm_priv_data.itf[0].stats.rx_bytes += reg_desc.datalen;
238
239 reg_desc.dataptr = (unsigned int)new_skb->data & 0x0FFFFFFF;
240 reg_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
241 }
242
243 reg_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
244 reg_desc.own = 1;
245 reg_desc.c = 0;
246
247 /* write discriptor to memory */
248 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
249 wmb();
250 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
251
252 work_done++;
253 }
254
255 return work_done;
256 }
257
258 static int ptm_napi_poll(struct napi_struct *napi, int budget)
259 {
260 int ndev = 0;
261 unsigned int work_done;
262
263 work_done = ptm_poll(ndev, budget);
264
265 // interface down
266 if ( !netif_running(napi->dev) ) {
267 napi_complete(napi);
268 return work_done;
269 }
270
271 // clear interrupt
272 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_ISRC);
273 // no more traffic
274 if (work_done < budget) {
275 napi_complete(napi);
276 IFX_REG_W32_MASK(0, 1, MBOX_IGU1_IER);
277 return work_done;
278 }
279
280 // next round
281 return work_done;
282 }
283
284 static int ptm_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
285 {
286 unsigned int f_full;
287 int desc_base;
288 volatile struct tx_descriptor *desc;
289 struct tx_descriptor reg_desc = {0};
290 struct sk_buff *skb_to_free;
291 unsigned int byteoff;
292
293 ASSERT(dev == g_net_dev[0], "incorrect device");
294
295 if ( !g_showtime ) {
296 err("not in showtime");
297 goto PTM_HARD_START_XMIT_FAIL;
298 }
299
300 /* allocate descriptor */
301 desc_base = get_tx_desc(0, &f_full);
302 if ( f_full ) {
303 netif_trans_update(dev);
304 netif_stop_queue(dev);
305
306 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_ISRC);
307 IFX_REG_W32_MASK(0, 1 << 17, MBOX_IGU1_IER);
308 }
309 if ( desc_base < 0 )
310 goto PTM_HARD_START_XMIT_FAIL;
311 desc = &CPU_TO_WAN_TX_DESC_BASE[desc_base];
312
313 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
314 if ( skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff || skb_cloned(skb) ) {
315 struct sk_buff *new_skb;
316
317 ASSERT(skb_headroom(skb) >= sizeof(struct sk_buff *) + byteoff, "skb_headroom(skb) < sizeof(struct sk_buff *) + byteoff");
318 ASSERT(!skb_cloned(skb), "skb is cloned");
319
320 new_skb = alloc_skb_tx(skb->len);
321 if ( new_skb == NULL ) {
322 dbg("no memory");
323 goto ALLOC_SKB_TX_FAIL;
324 }
325 skb_put(new_skb, skb->len);
326 memcpy(new_skb->data, skb->data, skb->len);
327 dev_kfree_skb_any(skb);
328 skb = new_skb;
329 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
330 /* write back to physical memory */
331 dma_cache_wback((unsigned long)skb->data, skb->len);
332 }
333
334 /* make the skb unowned */
335 skb_orphan(skb);
336
337 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
338 /* write back to physical memory */
339 dma_cache_wback((unsigned long)skb->data - byteoff - sizeof(struct sk_buff *), skb->len + byteoff + sizeof(struct sk_buff *));
340
341 /* free previous skb */
342 skb_to_free = get_skb_pointer(desc->dataptr);
343 if ( skb_to_free != NULL )
344 dev_kfree_skb_any(skb_to_free);
345
346 /* update descriptor */
347 reg_desc.small = 0;
348 reg_desc.dataptr = (unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1));
349 reg_desc.datalen = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
350 reg_desc.qid = g_ptm_prio_queue_map[skb->priority > 7 ? 7 : skb->priority];
351 reg_desc.byteoff = byteoff;
352 reg_desc.own = 1;
353 reg_desc.c = 1;
354 reg_desc.sop = reg_desc.eop = 1;
355
356 /* update MIB */
357 g_ptm_priv_data.itf[0].stats.tx_packets++;
358 g_ptm_priv_data.itf[0].stats.tx_bytes += reg_desc.datalen;
359
360 /* write discriptor to memory */
361 *((volatile unsigned int *)desc + 1) = *((unsigned int *)&reg_desc + 1);
362 wmb();
363 *(volatile unsigned int *)desc = *(unsigned int *)&reg_desc;
364
365 netif_trans_update(dev);
366
367 return 0;
368
369 ALLOC_SKB_TX_FAIL:
370 PTM_HARD_START_XMIT_FAIL:
371 dev_kfree_skb_any(skb);
372 g_ptm_priv_data.itf[0].stats.tx_dropped++;
373 return 0;
374 }
375
376 static int ptm_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
377 {
378 ASSERT(dev == g_net_dev[0], "incorrect device");
379
380 switch ( cmd )
381 {
382 case IFX_PTM_MIB_CW_GET:
383 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxNoIdleCodewords = IFX_REG_R32(DREG_AR_CELL0) + IFX_REG_R32(DREG_AR_CELL1);
384 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxIdleCodewords = IFX_REG_R32(DREG_AR_IDLE_CNT0) + IFX_REG_R32(DREG_AR_IDLE_CNT1);
385 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifRxCodingViolation = IFX_REG_R32(DREG_AR_CVN_CNT0) + IFX_REG_R32(DREG_AR_CVN_CNT1) + IFX_REG_R32(DREG_AR_CVNP_CNT0) + IFX_REG_R32(DREG_AR_CVNP_CNT1);
386 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxNoIdleCodewords = IFX_REG_R32(DREG_AT_CELL0) + IFX_REG_R32(DREG_AT_CELL1);
387 ((PTM_CW_IF_ENTRY_T *)ifr->ifr_data)->ifTxIdleCodewords = IFX_REG_R32(DREG_AT_IDLE_CNT0) + IFX_REG_R32(DREG_AT_IDLE_CNT1);
388 break;
389 case IFX_PTM_MIB_FRAME_GET:
390 {
391 PTM_FRAME_MIB_T data = {0};
392 int i;
393
394 data.RxCorrect = IFX_REG_R32(DREG_AR_HEC_CNT0) + IFX_REG_R32(DREG_AR_HEC_CNT1) + IFX_REG_R32(DREG_AR_AIIDLE_CNT0) + IFX_REG_R32(DREG_AR_AIIDLE_CNT1);
395 for ( i = 0; i < 4; i++ )
396 data.RxDropped += WAN_RX_MIB_TABLE(i)->wrx_dropdes_pdu;
397 for ( i = 0; i < 8; i++ )
398 data.TxSend += WAN_TX_MIB_TABLE(i)->wtx_total_pdu;
399
400 *((PTM_FRAME_MIB_T *)ifr->ifr_data) = data;
401 }
402 break;
403 case IFX_PTM_CFG_GET:
404 // use bear channel 0 preemption gamma interface settings
405 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcPresent = 1;
406 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_eth_fcs_ver_dis == 0 ? 1 : 0;
407 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis == 0 ? 1 : 0;;
408 ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen = RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size == 0 ? 0 : (RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size * 16);
409 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen = TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis == 0 ? 1 : 0;
410 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : 1;
411 ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen = TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size == 0 ? 0 : (TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size * 16);
412 break;
413 case IFX_PTM_CFG_SET:
414 {
415 int i;
416
417 for ( i = 0; i < 4; i++ ) {
418 RX_GAMMA_ITF_CFG(i)->rx_eth_fcs_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxEthCrcCheck ? 0 : 1;
419
420 RX_GAMMA_ITF_CFG(0)->rx_tc_crc_ver_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcCheck ? 0 : 1;
421
422 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->RxTcCrcLen ) {
423 case 16: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 1; break;
424 case 32: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 2; break;
425 default: RX_GAMMA_ITF_CFG(0)->rx_tc_crc_size = 0;
426 }
427
428 TX_GAMMA_ITF_CFG(0)->tx_eth_fcs_gen_dis = ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxEthCrcGen ? 0 : 1;
429
430 if ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcGen ) {
431 switch ( ((IFX_PTM_CFG_T *)ifr->ifr_data)->TxTcCrcLen ) {
432 case 16: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 1; break;
433 case 32: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 2; break;
434 default: TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
435 }
436 }
437 else
438 TX_GAMMA_ITF_CFG(0)->tx_tc_crc_size = 0;
439 }
440 }
441 break;
442 case IFX_PTM_MAP_PKT_PRIO_TO_Q:
443 {
444 struct ppe_prio_q_map cmd;
445
446 if ( copy_from_user(&cmd, ifr->ifr_data, sizeof(cmd)) )
447 return -EFAULT;
448
449 if ( cmd.pkt_prio < 0 || cmd.pkt_prio >= ARRAY_SIZE(g_ptm_prio_queue_map) )
450 return -EINVAL;
451
452 if ( cmd.qid < 0 || cmd.qid >= g_wanqos_en )
453 return -EINVAL;
454
455 g_ptm_prio_queue_map[cmd.pkt_prio] = cmd.qid;
456 }
457 break;
458 default:
459 return -EOPNOTSUPP;
460 }
461
462 return 0;
463 }
464
465 #if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
466 static void ptm_tx_timeout(struct net_device *dev)
467 #else
468 static void ptm_tx_timeout(struct net_device *dev, unsigned int txqueue)
469 #endif
470 {
471 ASSERT(dev == g_net_dev[0], "incorrect device");
472
473 /* disable TX irq, release skb when sending new packet */
474 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
475
476 /* wake up TX queue */
477 netif_wake_queue(dev);
478
479 return;
480 }
481
482 static inline struct sk_buff* alloc_skb_rx(void)
483 {
484 struct sk_buff *skb;
485
486 /* allocate memroy including trailer and padding */
487 skb = dev_alloc_skb(RX_MAX_BUFFER_SIZE + DATA_BUFFER_ALIGNMENT);
488 if ( skb != NULL ) {
489 /* must be burst length alignment and reserve two more bytes for MAC address alignment */
490 if ( ((unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1)) != 0 )
491 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
492 /* pub skb in reserved area "skb->data - 4" */
493 *((struct sk_buff **)skb->data - 1) = skb;
494 wmb();
495 /* write back and invalidate cache */
496 dma_cache_wback_inv((unsigned long)skb->data - sizeof(skb), sizeof(skb));
497 /* invalidate cache */
498 dma_cache_inv((unsigned long)skb->data, (unsigned int)skb->end - (unsigned int)skb->data);
499 }
500
501 return skb;
502 }
503
504 static inline struct sk_buff* alloc_skb_tx(unsigned int size)
505 {
506 struct sk_buff *skb;
507
508 /* allocate memory including padding */
509 size = RX_MAX_BUFFER_SIZE;
510 size = (size + DATA_BUFFER_ALIGNMENT - 1) & ~(DATA_BUFFER_ALIGNMENT - 1);
511 skb = dev_alloc_skb(size + DATA_BUFFER_ALIGNMENT);
512 /* must be burst length alignment */
513 if ( skb != NULL )
514 skb_reserve(skb, ~((unsigned int)skb->data + (DATA_BUFFER_ALIGNMENT - 1)) & (DATA_BUFFER_ALIGNMENT - 1));
515 return skb;
516 }
517
518 static inline struct sk_buff *get_skb_pointer(unsigned int dataptr)
519 {
520 unsigned int skb_dataptr;
521 struct sk_buff *skb;
522
523 // usually, CPE memory is less than 256M bytes
524 // so NULL means invalid pointer
525 if ( dataptr == 0 ) {
526 dbg("dataptr is 0, it's supposed to be invalid pointer");
527 return NULL;
528 }
529
530 skb_dataptr = (dataptr - 4) | KSEG1;
531 skb = *(struct sk_buff **)skb_dataptr;
532
533 ASSERT((unsigned int)skb >= KSEG0, "invalid skb - skb = %#08x, dataptr = %#08x", (unsigned int)skb, dataptr);
534 ASSERT((((unsigned int)skb->data & (0x0FFFFFFF ^ (DATA_BUFFER_ALIGNMENT - 1))) | KSEG1) == (dataptr | KSEG1), "invalid skb - skb = %#08x, skb->data = %#08x, dataptr = %#08x", (unsigned int)skb, (unsigned int)skb->data, dataptr);
535
536 return skb;
537 }
538
539 static inline int get_tx_desc(unsigned int itf, unsigned int *f_full)
540 {
541 int desc_base = -1;
542 struct ptm_itf *p_itf = &g_ptm_priv_data.itf[0];
543
544 // assume TX is serial operation
545 // no protection provided
546
547 *f_full = 1;
548
549 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 ) {
550 desc_base = p_itf->tx_desc_pos;
551 if ( ++(p_itf->tx_desc_pos) == CPU_TO_WAN_TX_DESC_NUM )
552 p_itf->tx_desc_pos = 0;
553 if ( CPU_TO_WAN_TX_DESC_BASE[p_itf->tx_desc_pos].own == 0 )
554 *f_full = 0;
555 }
556
557 return desc_base;
558 }
559
560 static irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
561 {
562 unsigned int isr;
563
564 isr = IFX_REG_R32(MBOX_IGU1_ISR);
565 IFX_REG_W32(isr, MBOX_IGU1_ISRC);
566 isr &= IFX_REG_R32(MBOX_IGU1_IER);
567
568 if (isr & BIT(0)) {
569 IFX_REG_W32_MASK(1, 0, MBOX_IGU1_IER);
570 napi_schedule(&g_ptm_priv_data.itf[0].napi);
571 #if defined(ENABLE_TMP_DBG) && ENABLE_TMP_DBG
572 {
573 volatile struct rx_descriptor *desc = &WAN_RX_DESC_BASE[g_ptm_priv_data.itf[0].rx_desc_pos];
574
575 if ( desc->own ) { // PP32 hold
576 err("invalid interrupt");
577 }
578 }
579 #endif
580 }
581 if (isr & BIT(16)) {
582 IFX_REG_W32_MASK(1 << 16, 0, MBOX_IGU1_IER);
583 tasklet_hi_schedule(&g_swap_desc_tasklet);
584 }
585 if (isr & BIT(17)) {
586 IFX_REG_W32_MASK(1 << 17, 0, MBOX_IGU1_IER);
587 netif_wake_queue(g_net_dev[0]);
588 }
589
590 return IRQ_HANDLED;
591 }
592
593 static void do_swap_desc_tasklet(unsigned long arg)
594 {
595 int budget = 32;
596 volatile struct tx_descriptor *desc;
597 struct sk_buff *skb;
598 unsigned int byteoff;
599
600 while ( budget-- > 0 ) {
601 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) // if PP32 hold descriptor
602 break;
603
604 desc = &WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos];
605 if ( ++g_ptm_priv_data.itf[0].tx_swap_desc_pos == WAN_SWAP_DESC_NUM )
606 g_ptm_priv_data.itf[0].tx_swap_desc_pos = 0;
607
608 skb = get_skb_pointer(desc->dataptr);
609 if ( skb != NULL )
610 dev_kfree_skb_any(skb);
611
612 skb = alloc_skb_tx(RX_MAX_BUFFER_SIZE);
613 if ( skb == NULL )
614 panic("can't allocate swap buffer for PPE firmware use\n");
615 byteoff = (unsigned int)skb->data & (DATA_BUFFER_ALIGNMENT - 1);
616 *(struct sk_buff **)((unsigned int)skb->data - byteoff - sizeof(struct sk_buff *)) = skb;
617
618 desc->dataptr = (unsigned int)skb->data & 0x0FFFFFFF;
619 desc->own = 1;
620 }
621
622 // clear interrupt
623 IFX_REG_W32_MASK(0, 16, MBOX_IGU1_ISRC);
624 // no more skb to be replaced
625 if ( WAN_SWAP_DESC_BASE[g_ptm_priv_data.itf[0].tx_swap_desc_pos].own ) { // if PP32 hold descriptor
626 IFX_REG_W32_MASK(0, 1 << 16, MBOX_IGU1_IER);
627 return;
628 }
629
630 tasklet_hi_schedule(&g_swap_desc_tasklet);
631 return;
632 }
633
634
635 static inline int ifx_ptm_version(char *buf)
636 {
637 int len = 0;
638 unsigned int major, mid, minor;
639
640 ifx_ptm_get_fw_ver(&major, &mid, &minor);
641
642 len += ifx_drv_ver(buf + len, "PTM", IFX_PTM_VER_MAJOR, IFX_PTM_VER_MID, IFX_PTM_VER_MINOR);
643 if ( mid == ~0 )
644 len += sprintf(buf + len, " PTM (E1) firmware version %u.%u\n", major, minor);
645 else
646 len += sprintf(buf + len, " PTM (E1) firmware version %u.%u.%u\n", major, mid, minor);
647
648 return len;
649 }
650
651 static inline int init_priv_data(void)
652 {
653 int i, j;
654
655 g_wanqos_en = wanqos_en ? wanqos_en : 8;
656 if ( g_wanqos_en > 8 )
657 g_wanqos_en = 8;
658
659 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ )
660 {
661 g_queue_gamma_map[i] = queue_gamma_map[i] & ((1 << g_wanqos_en) - 1);
662 for ( j = 0; j < i; j++ )
663 g_queue_gamma_map[i] &= ~g_queue_gamma_map[j];
664 }
665
666 memset(&g_ptm_priv_data, 0, sizeof(g_ptm_priv_data));
667
668 {
669 int max_packet_priority = ARRAY_SIZE(g_ptm_prio_queue_map);
670 int tx_num_q;
671 int q_step, q_accum, p_step;
672
673 tx_num_q = __ETH_WAN_TX_QUEUE_NUM;
674 q_step = tx_num_q - 1;
675 p_step = max_packet_priority - 1;
676 for ( j = 0, q_accum = 0; j < max_packet_priority; j++, q_accum += q_step )
677 g_ptm_prio_queue_map[j] = q_step - (q_accum + (p_step >> 1)) / p_step;
678 }
679
680 return 0;
681 }
682
683 static inline void clear_priv_data(void)
684 {
685 }
686
687 static inline int init_tables(void)
688 {
689 struct sk_buff *skb_pool[WAN_RX_DESC_NUM] = {0};
690 struct cfg_std_data_len cfg_std_data_len = {0};
691 struct tx_qos_cfg tx_qos_cfg = {0};
692 struct psave_cfg psave_cfg = {0};
693 struct eg_bwctrl_cfg eg_bwctrl_cfg = {0};
694 struct test_mode test_mode = {0};
695 struct rx_bc_cfg rx_bc_cfg = {0};
696 struct tx_bc_cfg tx_bc_cfg = {0};
697 struct gpio_mode gpio_mode = {0};
698 struct gpio_wm_cfg gpio_wm_cfg = {0};
699 struct rx_gamma_itf_cfg rx_gamma_itf_cfg = {0};
700 struct tx_gamma_itf_cfg tx_gamma_itf_cfg = {0};
701 struct wtx_qos_q_desc_cfg wtx_qos_q_desc_cfg = {0};
702 struct rx_descriptor rx_desc = {0};
703 struct tx_descriptor tx_desc = {0};
704 int i;
705
706 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
707 skb_pool[i] = alloc_skb_rx();
708 if ( skb_pool[i] == NULL )
709 goto ALLOC_SKB_RX_FAIL;
710 }
711
712 cfg_std_data_len.byte_off = RX_HEAD_MAC_ADDR_ALIGNMENT; // this field replaces byte_off in rx descriptor of VDSL ingress
713 cfg_std_data_len.data_len = 1600;
714 *CFG_STD_DATA_LEN = cfg_std_data_len;
715
716 tx_qos_cfg.time_tick = cgu_get_pp32_clock() / 62500; // 16 * (cgu_get_pp32_clock() / 1000000)
717 tx_qos_cfg.overhd_bytes = 0;
718 tx_qos_cfg.eth1_eg_qnum = __ETH_WAN_TX_QUEUE_NUM;
719 tx_qos_cfg.eth1_burst_chk = 1;
720 tx_qos_cfg.eth1_qss = 0;
721 tx_qos_cfg.shape_en = 0; // disable
722 tx_qos_cfg.wfq_en = 0; // strict priority
723 *TX_QOS_CFG = tx_qos_cfg;
724
725 psave_cfg.start_state = 0;
726 psave_cfg.sleep_en = 1; // enable sleep mode
727 *PSAVE_CFG = psave_cfg;
728
729 eg_bwctrl_cfg.fdesc_wm = 16;
730 eg_bwctrl_cfg.class_len = 128;
731 *EG_BWCTRL_CFG = eg_bwctrl_cfg;
732
733 //*GPIO_ADDR = (unsigned int)IFX_GPIO_P0_OUT;
734 *GPIO_ADDR = (unsigned int)0x00000000; // disabled by default
735
736 gpio_mode.gpio_bit_bc1 = 2;
737 gpio_mode.gpio_bit_bc0 = 1;
738 gpio_mode.gpio_bc1_en = 0;
739 gpio_mode.gpio_bc0_en = 0;
740 *GPIO_MODE = gpio_mode;
741
742 gpio_wm_cfg.stop_wm_bc1 = 2;
743 gpio_wm_cfg.start_wm_bc1 = 4;
744 gpio_wm_cfg.stop_wm_bc0 = 2;
745 gpio_wm_cfg.start_wm_bc0 = 4;
746 *GPIO_WM_CFG = gpio_wm_cfg;
747
748 test_mode.mib_clear_mode = 0;
749 test_mode.test_mode = 0;
750 *TEST_MODE = test_mode;
751
752 rx_bc_cfg.local_state = 0;
753 rx_bc_cfg.remote_state = 0;
754 rx_bc_cfg.to_false_th = 7;
755 rx_bc_cfg.to_looking_th = 3;
756 *RX_BC_CFG(0) = rx_bc_cfg;
757 *RX_BC_CFG(1) = rx_bc_cfg;
758
759 tx_bc_cfg.fill_wm = 2;
760 tx_bc_cfg.uflw_wm = 2;
761 *TX_BC_CFG(0) = tx_bc_cfg;
762 *TX_BC_CFG(1) = tx_bc_cfg;
763
764 rx_gamma_itf_cfg.receive_state = 0;
765 rx_gamma_itf_cfg.rx_min_len = 60;
766 rx_gamma_itf_cfg.rx_pad_en = 1;
767 rx_gamma_itf_cfg.rx_eth_fcs_ver_dis = 0;
768 rx_gamma_itf_cfg.rx_rm_eth_fcs = 1;
769 rx_gamma_itf_cfg.rx_tc_crc_ver_dis = 0;
770 rx_gamma_itf_cfg.rx_tc_crc_size = 1;
771 rx_gamma_itf_cfg.rx_eth_fcs_result = 0xC704DD7B;
772 rx_gamma_itf_cfg.rx_tc_crc_result = 0x1D0F1D0F;
773 rx_gamma_itf_cfg.rx_crc_cfg = 0x2500;
774 rx_gamma_itf_cfg.rx_eth_fcs_init_value = 0xFFFFFFFF;
775 rx_gamma_itf_cfg.rx_tc_crc_init_value = 0x0000FFFF;
776 rx_gamma_itf_cfg.rx_max_len_sel = 0;
777 rx_gamma_itf_cfg.rx_edit_num2 = 0;
778 rx_gamma_itf_cfg.rx_edit_pos2 = 0;
779 rx_gamma_itf_cfg.rx_edit_type2 = 0;
780 rx_gamma_itf_cfg.rx_edit_en2 = 0;
781 rx_gamma_itf_cfg.rx_edit_num1 = 0;
782 rx_gamma_itf_cfg.rx_edit_pos1 = 0;
783 rx_gamma_itf_cfg.rx_edit_type1 = 0;
784 rx_gamma_itf_cfg.rx_edit_en1 = 0;
785 rx_gamma_itf_cfg.rx_inserted_bytes_1l = 0;
786 rx_gamma_itf_cfg.rx_inserted_bytes_1h = 0;
787 rx_gamma_itf_cfg.rx_inserted_bytes_2l = 0;
788 rx_gamma_itf_cfg.rx_inserted_bytes_2h = 0;
789 rx_gamma_itf_cfg.rx_len_adj = -6;
790 for ( i = 0; i < 4; i++ )
791 *RX_GAMMA_ITF_CFG(i) = rx_gamma_itf_cfg;
792
793 tx_gamma_itf_cfg.tx_len_adj = 6;
794 tx_gamma_itf_cfg.tx_crc_off_adj = 6;
795 tx_gamma_itf_cfg.tx_min_len = 0;
796 tx_gamma_itf_cfg.tx_eth_fcs_gen_dis = 0;
797 tx_gamma_itf_cfg.tx_tc_crc_size = 1;
798 tx_gamma_itf_cfg.tx_crc_cfg = 0x2F00;
799 tx_gamma_itf_cfg.tx_eth_fcs_init_value = 0xFFFFFFFF;
800 tx_gamma_itf_cfg.tx_tc_crc_init_value = 0x0000FFFF;
801 for ( i = 0; i < ARRAY_SIZE(g_queue_gamma_map); i++ ) {
802 tx_gamma_itf_cfg.queue_mapping = g_queue_gamma_map[i];
803 *TX_GAMMA_ITF_CFG(i) = tx_gamma_itf_cfg;
804 }
805
806 for ( i = 0; i < __ETH_WAN_TX_QUEUE_NUM; i++ ) {
807 wtx_qos_q_desc_cfg.length = WAN_TX_DESC_NUM;
808 wtx_qos_q_desc_cfg.addr = __ETH_WAN_TX_DESC_BASE(i);
809 *WTX_QOS_Q_DESC_CFG(i) = wtx_qos_q_desc_cfg;
810 }
811
812 // default TX queue QoS config is all ZERO
813
814 // TX Ctrl K Table
815 IFX_REG_W32(0x90111293, TX_CTRL_K_TABLE(0));
816 IFX_REG_W32(0x14959617, TX_CTRL_K_TABLE(1));
817 IFX_REG_W32(0x18999A1B, TX_CTRL_K_TABLE(2));
818 IFX_REG_W32(0x9C1D1E9F, TX_CTRL_K_TABLE(3));
819 IFX_REG_W32(0xA02122A3, TX_CTRL_K_TABLE(4));
820 IFX_REG_W32(0x24A5A627, TX_CTRL_K_TABLE(5));
821 IFX_REG_W32(0x28A9AA2B, TX_CTRL_K_TABLE(6));
822 IFX_REG_W32(0xAC2D2EAF, TX_CTRL_K_TABLE(7));
823 IFX_REG_W32(0x30B1B233, TX_CTRL_K_TABLE(8));
824 IFX_REG_W32(0xB43536B7, TX_CTRL_K_TABLE(9));
825 IFX_REG_W32(0xB8393ABB, TX_CTRL_K_TABLE(10));
826 IFX_REG_W32(0x3CBDBE3F, TX_CTRL_K_TABLE(11));
827 IFX_REG_W32(0xC04142C3, TX_CTRL_K_TABLE(12));
828 IFX_REG_W32(0x44C5C647, TX_CTRL_K_TABLE(13));
829 IFX_REG_W32(0x48C9CA4B, TX_CTRL_K_TABLE(14));
830 IFX_REG_W32(0xCC4D4ECF, TX_CTRL_K_TABLE(15));
831
832 // init RX descriptor
833 rx_desc.own = 1;
834 rx_desc.c = 0;
835 rx_desc.sop = 1;
836 rx_desc.eop = 1;
837 rx_desc.byteoff = RX_HEAD_MAC_ADDR_ALIGNMENT;
838 rx_desc.datalen = RX_MAX_BUFFER_SIZE - RX_HEAD_MAC_ADDR_ALIGNMENT;
839 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
840 rx_desc.dataptr = (unsigned int)skb_pool[i]->data & 0x0FFFFFFF;
841 WAN_RX_DESC_BASE[i] = rx_desc;
842 }
843
844 // init TX descriptor
845 tx_desc.own = 0;
846 tx_desc.c = 0;
847 tx_desc.sop = 1;
848 tx_desc.eop = 1;
849 tx_desc.byteoff = 0;
850 tx_desc.qid = 0;
851 tx_desc.datalen = 0;
852 tx_desc.small = 0;
853 tx_desc.dataptr = 0;
854 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ )
855 CPU_TO_WAN_TX_DESC_BASE[i] = tx_desc;
856 for ( i = 0; i < WAN_TX_DESC_NUM_TOTAL; i++ )
857 WAN_TX_DESC_BASE(0)[i] = tx_desc;
858
859 // init Swap descriptor
860 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ )
861 WAN_SWAP_DESC_BASE[i] = tx_desc;
862
863 // init fastpath TX descriptor
864 tx_desc.own = 1;
865 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ )
866 FASTPATH_TO_WAN_TX_DESC_BASE[i] = tx_desc;
867
868 return 0;
869
870 ALLOC_SKB_RX_FAIL:
871 while ( i-- > 0 )
872 dev_kfree_skb_any(skb_pool[i]);
873 return -1;
874 }
875
876 static inline void clear_tables(void)
877 {
878 struct sk_buff *skb;
879 int i, j;
880
881 for ( i = 0; i < WAN_RX_DESC_NUM; i++ ) {
882 skb = get_skb_pointer(WAN_RX_DESC_BASE[i].dataptr);
883 if ( skb != NULL )
884 dev_kfree_skb_any(skb);
885 }
886
887 for ( i = 0; i < CPU_TO_WAN_TX_DESC_NUM; i++ ) {
888 skb = get_skb_pointer(CPU_TO_WAN_TX_DESC_BASE[i].dataptr);
889 if ( skb != NULL )
890 dev_kfree_skb_any(skb);
891 }
892
893 for ( j = 0; j < 8; j++ )
894 for ( i = 0; i < WAN_TX_DESC_NUM; i++ ) {
895 skb = get_skb_pointer(WAN_TX_DESC_BASE(j)[i].dataptr);
896 if ( skb != NULL )
897 dev_kfree_skb_any(skb);
898 }
899
900 for ( i = 0; i < WAN_SWAP_DESC_NUM; i++ ) {
901 skb = get_skb_pointer(WAN_SWAP_DESC_BASE[i].dataptr);
902 if ( skb != NULL )
903 dev_kfree_skb_any(skb);
904 }
905
906 for ( i = 0; i < FASTPATH_TO_WAN_TX_DESC_NUM; i++ ) {
907 skb = get_skb_pointer(FASTPATH_TO_WAN_TX_DESC_BASE[i].dataptr);
908 if ( skb != NULL )
909 dev_kfree_skb_any(skb);
910 }
911 }
912
913 static int ptm_showtime_enter(struct port_cell_info *port_cell, void *xdata_addr)
914 {
915 int i;
916
917 ASSERT(port_cell != NULL, "port_cell is NULL");
918 ASSERT(xdata_addr != NULL, "xdata_addr is NULL");
919
920 // TODO: ReTX set xdata_addr
921 g_xdata_addr = xdata_addr;
922
923 g_showtime = 1;
924
925 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
926 netif_carrier_on(g_net_dev[i]);
927
928 IFX_REG_W32(0x0F, UTP_CFG);
929
930 //#ifdef CONFIG_VR9
931 // IFX_REG_W32_MASK(1 << 17, 0, FFSM_CFG0);
932 //#endif
933
934 printk("enter showtime\n");
935
936 return 0;
937 }
938
939 static int ptm_showtime_exit(void)
940 {
941 int i;
942
943 if ( !g_showtime )
944 return -1;
945
946 //#ifdef CONFIG_VR9
947 // IFX_REG_W32_MASK(0, 1 << 17, FFSM_CFG0);
948 //#endif
949
950 IFX_REG_W32(0x00, UTP_CFG);
951
952 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
953 netif_carrier_off(g_net_dev[i]);
954
955 g_showtime = 0;
956
957 // TODO: ReTX clean state
958 g_xdata_addr = NULL;
959
960 printk("leave showtime\n");
961
962 return 0;
963 }
964
965 static const struct of_device_id ltq_ptm_match[] = {
966 #ifdef CONFIG_DANUBE
967 { .compatible = "lantiq,ppe-danube", .data = NULL },
968 #elif defined CONFIG_AMAZON_SE
969 { .compatible = "lantiq,ppe-ase", .data = NULL },
970 #elif defined CONFIG_AR9
971 { .compatible = "lantiq,ppe-arx100", .data = NULL },
972 #elif defined CONFIG_VR9
973 { .compatible = "lantiq,ppe-xrx200", .data = NULL },
974 #endif
975 {},
976 };
977 MODULE_DEVICE_TABLE(of, ltq_ptm_match);
978
979 static int ltq_ptm_probe(struct platform_device *pdev)
980 {
981 int ret;
982 int i;
983 char ver_str[256];
984 struct port_cell_info port_cell = {0};
985
986 ret = init_priv_data();
987 if ( ret != 0 ) {
988 err("INIT_PRIV_DATA_FAIL");
989 goto INIT_PRIV_DATA_FAIL;
990 }
991
992 ifx_ptm_init_chip(pdev);
993 ret = init_tables();
994 if ( ret != 0 ) {
995 err("INIT_TABLES_FAIL");
996 goto INIT_TABLES_FAIL;
997 }
998
999 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1000 g_net_dev[i] = alloc_netdev(0, g_net_dev_name[i], NET_NAME_UNKNOWN, ether_setup);
1001 if ( g_net_dev[i] == NULL )
1002 goto ALLOC_NETDEV_FAIL;
1003 ptm_setup(g_net_dev[i], i);
1004 }
1005
1006 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1007 ret = register_netdev(g_net_dev[i]);
1008 if ( ret != 0 )
1009 goto REGISTER_NETDEV_FAIL;
1010 }
1011
1012 /* register interrupt handler */
1013 ret = request_irq(PPE_MAILBOX_IGU1_INT, mailbox_irq_handler, 0, "ptm_mailbox_isr", &g_ptm_priv_data);
1014 if ( ret ) {
1015 if ( ret == -EBUSY ) {
1016 err("IRQ may be occupied by other driver, please reconfig to disable it.");
1017 }
1018 else {
1019 err("request_irq fail");
1020 }
1021 goto REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL;
1022 }
1023 disable_irq(PPE_MAILBOX_IGU1_INT);
1024
1025 ret = ifx_pp32_start(0);
1026 if ( ret ) {
1027 err("ifx_pp32_start fail!");
1028 goto PP32_START_FAIL;
1029 }
1030 IFX_REG_W32(1 << 16, MBOX_IGU1_IER); // enable SWAP interrupt
1031 IFX_REG_W32(~0, MBOX_IGU1_ISRC);
1032
1033 enable_irq(PPE_MAILBOX_IGU1_INT);
1034
1035 ifx_mei_atm_showtime_check(&g_showtime, &port_cell, &g_xdata_addr);
1036 if ( g_showtime ) {
1037 ptm_showtime_enter(&port_cell, &g_xdata_addr);
1038 }
1039
1040 ifx_mei_atm_showtime_enter = ptm_showtime_enter;
1041 ifx_mei_atm_showtime_exit = ptm_showtime_exit;
1042
1043 ifx_ptm_version(ver_str);
1044 printk(KERN_INFO "%s", ver_str);
1045
1046 printk("ifxmips_ptm: PTM init succeed\n");
1047
1048 return 0;
1049
1050 PP32_START_FAIL:
1051 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1052 REQUEST_IRQ_PPE_MAILBOX_IGU1_INT_FAIL:
1053 i = ARRAY_SIZE(g_net_dev);
1054 REGISTER_NETDEV_FAIL:
1055 while ( i-- )
1056 unregister_netdev(g_net_dev[i]);
1057 i = ARRAY_SIZE(g_net_dev);
1058 ALLOC_NETDEV_FAIL:
1059 while ( i-- ) {
1060 free_netdev(g_net_dev[i]);
1061 g_net_dev[i] = NULL;
1062 }
1063 INIT_TABLES_FAIL:
1064 INIT_PRIV_DATA_FAIL:
1065 clear_priv_data();
1066 printk("ifxmips_ptm: PTM init failed\n");
1067 return ret;
1068 }
1069
1070 static int ltq_ptm_remove(struct platform_device *pdev)
1071 {
1072 int i;
1073 ifx_mei_atm_showtime_enter = NULL;
1074 ifx_mei_atm_showtime_exit = NULL;
1075
1076
1077 ifx_pp32_stop(0);
1078
1079 free_irq(PPE_MAILBOX_IGU1_INT, &g_ptm_priv_data);
1080
1081 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ )
1082 unregister_netdev(g_net_dev[i]);
1083
1084 for ( i = 0; i < ARRAY_SIZE(g_net_dev); i++ ) {
1085 free_netdev(g_net_dev[i]);
1086 g_net_dev[i] = NULL;
1087 }
1088
1089 clear_tables();
1090
1091 ifx_ptm_uninit_chip();
1092
1093 clear_priv_data();
1094
1095 return 0;
1096 }
1097
1098 #ifndef MODULE
1099 static int __init wanqos_en_setup(char *line)
1100 {
1101 wanqos_en = simple_strtoul(line, NULL, 0);
1102
1103 if ( wanqos_en < 1 || wanqos_en > 8 )
1104 wanqos_en = 0;
1105
1106 return 0;
1107 }
1108
1109 static int __init queue_gamma_map_setup(char *line)
1110 {
1111 char *p;
1112 int i;
1113
1114 for ( i = 0, p = line; i < ARRAY_SIZE(queue_gamma_map) && isxdigit(*p); i++ )
1115 {
1116 queue_gamma_map[i] = simple_strtoul(p, &p, 0);
1117 if ( *p == ',' || *p == ';' || *p == ':' )
1118 p++;
1119 }
1120
1121 return 0;
1122 }
1123 #endif
1124 static struct platform_driver ltq_ptm_driver = {
1125 .probe = ltq_ptm_probe,
1126 .remove = ltq_ptm_remove,
1127 .driver = {
1128 .name = "ptm",
1129 .owner = THIS_MODULE,
1130 .of_match_table = ltq_ptm_match,
1131 },
1132 };
1133
1134 module_platform_driver(ltq_ptm_driver);
1135 #ifndef MODULE
1136 __setup("wanqos_en=", wanqos_en_setup);
1137 __setup("queue_gamma_map=", queue_gamma_map_setup);
1138 #endif
1139
1140 MODULE_LICENSE("GPL");