[package] fix hostapd frame injection in mac80211, backport 9b1ce526eb917c8b5c8497c32...
[openwrt/svn-archive/archive.git] / package / ifxmips-atm / src / irq.c
1 #include <linux/atmdev.h>
2 #include <linux/irq.h>
3
4 #include "common.h"
5
6 void mailbox_signal(unsigned int channel, int is_tx)
7 {
8 if(is_tx)
9 {
10 while(MBOX_IGU3_ISR_ISR(channel + 16));
11 *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(channel + 16);
12 } else {
13 while(MBOX_IGU3_ISR_ISR(channel));
14 *MBOX_IGU3_ISRS = MBOX_IGU3_ISRS_SET(channel);
15 }
16 }
17
18 static int mailbox_rx_irq_handler(unsigned int channel, unsigned int *len)
19 {
20 int conn;
21 int skb_base;
22 register struct rx_descriptor reg_desc;
23 struct rx_descriptor *desc;
24 struct sk_buff *skb;
25 struct atm_vcc *vcc;
26 struct rx_inband_trailer *trailer;
27
28 /* get sk_buff pointer and descriptor */
29 skb_base = ppe_dev.dma.rx_descriptor_number * channel + ppe_dev.dma.rx_desc_read_pos[channel];
30 desc = &ppe_dev.dma.rx_descriptor_base[skb_base];
31 reg_desc = *desc;
32 if ( reg_desc.own || !reg_desc.c )
33 return -EAGAIN;
34
35 if ( ++ppe_dev.dma.rx_desc_read_pos[channel] == ppe_dev.dma.rx_descriptor_number )
36 ppe_dev.dma.rx_desc_read_pos[channel] = 0;
37
38 skb = *(struct sk_buff **)((((u32)reg_desc.dataptr << 2) | KSEG0) - 4);
39 if ( (u32)skb <= 0x80000000 )
40 {
41 int key = 0;
42 printk("skb problem: skb = %08X, system is panic!\n", (u32)skb);
43 for ( ; !key; );
44 }
45
46 conn = reg_desc.id;
47
48 if ( conn == ppe_dev.oam_rx_queue )
49 {
50 /* OAM */
51 struct uni_cell_header *header = (struct uni_cell_header *)skb->data;
52
53 if ( header->pti == ATM_PTI_SEGF5 || header->pti == ATM_PTI_E2EF5 )
54 conn = find_vpivci(header->vpi, header->vci);
55 else if ( header->vci == 0x03 || header->vci == 0x04 )
56 conn = find_vpi(header->vpi);
57 else
58 conn = -1;
59
60 if ( conn >= 0 && ppe_dev.connection[conn].vcc != NULL )
61 {
62 vcc = ppe_dev.connection[conn].vcc;
63 ppe_dev.connection[conn].access_time = xtime;
64 if ( vcc->push_oam != NULL )
65 vcc->push_oam(vcc, skb->data);
66 }
67
68 /* don't need resize */
69 }
70 else
71 {
72 if ( len )
73 *len = 0;
74
75 if ( ppe_dev.connection[conn].vcc != NULL )
76 {
77 vcc = ppe_dev.connection[conn].vcc;
78
79 if ( !reg_desc.err )
80 if ( vcc->qos.aal == ATM_AAL5 )
81 {
82 /* AAL5 packet */
83 resize_skb_rx(skb, reg_desc.datalen + reg_desc.byteoff, 0);
84 skb_reserve(skb, reg_desc.byteoff);
85 skb_put(skb, reg_desc.datalen);
86
87 if ( (u32)ATM_SKB(skb) <= 0x80000000 )
88 {
89 int key = 0;
90 printk("ATM_SKB(skb) problem: ATM_SKB(skb) = %08X, system is panic!\n", (u32)ATM_SKB(skb));
91 for ( ; !key; );
92 }
93 ATM_SKB(skb)->vcc = vcc;
94 ppe_dev.connection[conn].access_time = xtime;
95 if ( atm_charge(vcc, skb->truesize) )
96 {
97 struct sk_buff *new_skb;
98
99 new_skb = alloc_skb_rx();
100 if ( new_skb )
101 {
102
103 UPDATE_VCC_STAT(conn, rx_pdu, 1);
104
105 ppe_dev.mib.wrx_pdu++;
106 if ( vcc->stats )
107 atomic_inc(&vcc->stats->rx);
108 vcc->push(vcc, skb);
109 {
110 struct k_atm_aal_stats stats = *vcc->stats;
111 int flag = 0;
112
113 vcc->push(vcc, skb);
114 if ( vcc->stats->rx.counter != stats.rx.counter )
115 {
116 printk("vcc->stats->rx (diff) = %d", vcc->stats->rx.counter - stats.rx.counter);
117 flag++;
118 }
119 if ( vcc->stats->rx_err.counter != stats.rx_err.counter )
120 {
121 printk("vcc->stats->rx_err (diff) = %d", vcc->stats->rx_err.counter - stats.rx_err.counter);
122 flag++;
123 }
124 if ( vcc->stats->rx_drop.counter != stats.rx_drop.counter )
125 {
126 printk("vcc->stats->rx_drop (diff) = %d", vcc->stats->rx_drop.counter - stats.rx_drop.counter);
127 flag++;
128 }
129 if ( vcc->stats->tx.counter != stats.tx.counter )
130 {
131 printk("vcc->stats->tx (diff) = %d", vcc->stats->tx.counter - stats.tx.counter);
132 flag++;
133 }
134 if ( vcc->stats->tx_err.counter != stats.tx_err.counter )
135 {
136 printk("vcc->stats->tx_err (diff) = %d", vcc->stats->tx_err.counter - stats.tx_err.counter);
137 flag++;
138 }
139 if ( !flag )
140 printk("vcc->stats not changed");
141 }
142 reg_desc.dataptr = (u32)new_skb->data >> 2;
143
144 if ( len )
145 *len = reg_desc.datalen;
146 }
147 else
148 {
149 /* no sk buffer */
150 UPDATE_VCC_STAT(conn, rx_sw_drop_pdu, 1);
151
152 ppe_dev.mib.wrx_drop_pdu++;
153 if ( vcc->stats )
154 atomic_inc(&vcc->stats->rx_drop);
155
156 resize_skb_rx(skb, ppe_dev.aal5.rx_buffer_size, 0);
157 }
158 }
159 else
160 {
161 /* no enough space */
162 UPDATE_VCC_STAT(conn, rx_sw_drop_pdu, 1);
163
164 ppe_dev.mib.wrx_drop_pdu++;
165 if ( vcc->stats )
166 atomic_inc(&vcc->stats->rx_drop);
167
168 resize_skb_rx(skb, ppe_dev.aal5.rx_buffer_size, 0);
169 }
170 }
171 else
172 {
173 /* AAL0 cell */
174 resize_skb_rx(skb, CELL_SIZE, 1);
175 skb_put(skb, CELL_SIZE);
176
177 ATM_SKB(skb)->vcc = vcc;
178 ppe_dev.connection[conn].access_time = xtime;
179 if ( atm_charge(vcc, skb->truesize) )
180 {
181 struct sk_buff *new_skb;
182
183 new_skb = alloc_skb_rx();
184 if ( new_skb )
185 {
186 if ( vcc->stats )
187 atomic_inc(&vcc->stats->rx);
188 vcc->push(vcc, skb);
189 reg_desc.dataptr = (u32)new_skb->data >> 2;
190
191 if ( len )
192 *len = CELL_SIZE;
193 }
194 else
195 {
196 if ( vcc->stats )
197 atomic_inc(&vcc->stats->rx_drop);
198 resize_skb_rx(skb, ppe_dev.aal5.rx_buffer_size, 0);
199 }
200 }
201 else
202 {
203 if ( vcc->stats )
204 atomic_inc(&vcc->stats->rx_drop);
205 resize_skb_rx(skb, ppe_dev.aal5.rx_buffer_size, 0);
206 }
207 }
208 else
209 {
210 printk("reg_desc.err\n");
211
212 /* drop packet/cell */
213 if ( vcc->qos.aal == ATM_AAL5 )
214 {
215 UPDATE_VCC_STAT(conn, rx_err_pdu, 1);
216
217 trailer = (struct rx_inband_trailer *)((u32)skb->data + ((reg_desc.byteoff + reg_desc.datalen + DMA_ALIGNMENT - 1) & ~ (DMA_ALIGNMENT - 1)));
218 if ( trailer->stw_crc )
219 ppe_dev.connection[conn].aal5_vcc_crc_err++;
220 if ( trailer->stw_ovz )
221 ppe_dev.connection[conn].aal5_vcc_oversize_sdu++;
222 }
223 if ( vcc->stats )
224 atomic_inc(&vcc->stats->rx_err);
225 /* don't need resize */
226 }
227 }
228 else
229 {
230 printk("ppe_dev.connection[%d].vcc == NULL\n", conn);
231
232 ppe_dev.mib.wrx_drop_pdu++;
233
234 /* don't need resize */
235 }
236 }
237
238 reg_desc.byteoff = 0;
239 reg_desc.datalen = ppe_dev.aal5.rx_buffer_size;
240 reg_desc.own = 1;
241 reg_desc.c = 0;
242
243 /* write discriptor to memory */
244 *desc = reg_desc;
245
246 printk("leave mailbox_rx_irq_handler");
247
248 return 0;
249 }
250
251 static inline void mailbox_tx_irq_handler(unsigned int conn)
252 {
253 if ( ppe_dev.dma.tx_desc_alloc_flag[conn] )
254 {
255 int desc_base;
256 int *release_pos;
257 struct sk_buff *skb;
258
259 release_pos = &ppe_dev.dma.tx_desc_release_pos[conn];
260 desc_base = ppe_dev.dma.tx_descriptor_number * (conn - QSB_QUEUE_NUMBER_BASE) + *release_pos;
261 while ( !ppe_dev.dma.tx_descriptor_base[desc_base].own )
262 {
263 skb = ppe_dev.dma.tx_skb_pointers[desc_base];
264
265 ppe_dev.dma.tx_descriptor_base[desc_base].own = 1; // pretend PP32 hold owner bit, so that won't be released more than once, so allocation process don't check this bit
266
267 if ( ++*release_pos == ppe_dev.dma.tx_descriptor_number )
268 *release_pos = 0;
269
270 if ( *release_pos == ppe_dev.dma.tx_desc_alloc_pos[conn] )
271 {
272 ppe_dev.dma.tx_desc_alloc_flag[conn] = 0;
273
274 atm_free_tx_skb_vcc(skb);
275 break;
276 }
277
278 if ( *release_pos == 0 )
279 desc_base = ppe_dev.dma.tx_descriptor_number * (conn - QSB_QUEUE_NUMBER_BASE);
280 else
281 desc_base++;
282
283 atm_free_tx_skb_vcc(skb);
284 }
285 }
286 }
287
288 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
289 static inline int check_desc_valid(unsigned int channel)
290 {
291 int skb_base;
292 struct rx_descriptor *desc;
293
294 skb_base = ppe_dev.dma.rx_descriptor_number * channel + ppe_dev.dma.rx_desc_read_pos[channel];
295 desc = &ppe_dev.dma.rx_descriptor_base[skb_base];
296 return !desc->own && desc->c ? 1 : 0;
297 }
298 #endif
299
300 irqreturn_t mailbox_irq_handler(int irq, void *dev_id)
301 {
302 int channel_mask; /* DMA channel accordant IRQ bit mask */
303 int channel;
304 unsigned int rx_irq_number[MAX_RX_DMA_CHANNEL_NUMBER] = {0};
305 unsigned int total_rx_irq_number = 0;
306
307 printk("mailbox_irq_handler");
308
309 if ( !*MBOX_IGU1_ISR )
310 return IRQ_RETVAL(1);
311
312 channel_mask = 1;
313 channel = 0;
314 while ( channel < ppe_dev.dma.rx_total_channel_used )
315 {
316 if ( (*MBOX_IGU1_ISR & channel_mask) )
317 {
318 /* RX */
319 /* clear IRQ */
320 *MBOX_IGU1_ISRC = channel_mask;
321 printk(" RX: *MBOX_IGU1_ISR = 0x%08X\n", *MBOX_IGU1_ISR);
322 /* wait for mailbox cleared */
323 while ( (*MBOX_IGU3_ISR & channel_mask) );
324
325 /* shadow the number of valid descriptor */
326 rx_irq_number[channel] = WRX_DMA_CHANNEL_CONFIG(channel)->vlddes;
327
328 total_rx_irq_number += rx_irq_number[channel];
329
330 printk("total_rx_irq_number = %d", total_rx_irq_number);
331 printk("vlddes = %d, rx_irq_number[%d] = %d, total_rx_irq_number = %d\n", WRX_DMA_CHANNEL_CONFIG(channel)->vlddes, channel, rx_irq_number[channel], total_rx_irq_number);
332 }
333
334 channel_mask <<= 1;
335 channel++;
336 }
337
338 channel_mask = 1 << (16 + QSB_QUEUE_NUMBER_BASE);
339 channel = QSB_QUEUE_NUMBER_BASE;
340 while ( channel - QSB_QUEUE_NUMBER_BASE < ppe_dev.dma.tx_total_channel_used )
341 {
342 if ( (*MBOX_IGU1_ISR & channel_mask) )
343 {
344 // if ( channel != 1 )
345 // {
346 printk("TX irq error\n");
347 // while ( 1 )
348 // {
349 // }
350 // }
351 /* TX */
352 /* clear IRQ */
353 *MBOX_IGU1_ISRC = channel_mask;
354 printk(" TX: *MBOX_IGU1_ISR = 0x%08X\n", *MBOX_IGU1_ISR);
355 mailbox_tx_irq_handler(channel);
356 }
357
358 channel_mask <<= 1;
359 channel++;
360 }
361
362 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
363 channel = 0;
364 while ( total_rx_irq_number )
365 {
366 switch ( channel )
367 {
368 case RX_DMA_CH_CBR:
369 case RX_DMA_CH_OAM:
370 /* handle it as soon as possible */
371 while ( rx_irq_number[channel] != 0 && mailbox_rx_irq_handler(channel, NULL) == 0 )
372 {
373 rx_irq_number[channel]--;
374 total_rx_irq_number--;
375 printk("RX_DMA_CH_CBR, total_rx_irq_number = %d", total_rx_irq_number);
376 printk("RX_DMA_CH_CBR, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
377 /* signal firmware that descriptor is updated */
378 mailbox_signal(channel, 0);
379 }
380 // if ( rx_irq_number[channel] != 0 )
381 printk("RX_DMA_CH_CBR, rx_irq_number[channel] = %d", rx_irq_number[channel]);
382 break;
383 case RX_DMA_CH_VBR_RT:
384 /* WFQ */
385 if ( rx_irq_number[RX_DMA_CH_VBR_RT] != 0
386 && (rx_irq_number[RX_DMA_CH_VBR_NRT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_NRT) || ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] < ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT])
387 && (rx_irq_number[RX_DMA_CH_AVR] == 0 || !check_desc_valid(RX_DMA_CH_AVR) || ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] < ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT])
388 )
389 {
390 unsigned int len;
391
392 if ( mailbox_rx_irq_handler(RX_DMA_CH_VBR_RT, &len) == 0 )
393 {
394 rx_irq_number[RX_DMA_CH_VBR_RT]--;
395 total_rx_irq_number--;
396 printk("RX_DMA_CH_VBR_RT, total_rx_irq_number = %d", total_rx_irq_number);
397 printk("RX_DMA_CH_VBR_RT, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
398 /* signal firmware that descriptor is updated */
399 mailbox_signal(channel, 0);
400
401 len = (len + CELL_SIZE - 1) / CELL_SIZE;
402 if ( ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] <= len )
403 ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] = ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_RT] + ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] - len;
404 }
405 }
406 // if ( rx_irq_number[channel] != 0 )
407 // {
408 printk("RX_DMA_CH_VBR_RT, rx_irq_number[channel] = %d, total_rx_irq_number = %d", rx_irq_number[channel], total_rx_irq_number);
409 // rx_irq_number[channel] = 0;
410 // total_rx_irq_number = 0;
411 // }
412 break;
413 case RX_DMA_CH_VBR_NRT:
414 /* WFQ */
415 if ( rx_irq_number[RX_DMA_CH_VBR_NRT] != 0
416 && (rx_irq_number[RX_DMA_CH_VBR_RT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_RT) || ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] < ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT])
417 && (rx_irq_number[RX_DMA_CH_AVR] == 0 || !check_desc_valid(RX_DMA_CH_AVR) || ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] < ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT])
418 )
419 {
420 unsigned int len;
421
422 if ( mailbox_rx_irq_handler(RX_DMA_CH_VBR_NRT, &len) == 0 )
423 {
424 rx_irq_number[RX_DMA_CH_VBR_NRT]--;
425 total_rx_irq_number--;
426 printk("RX_DMA_CH_VBR_NRT, total_rx_irq_number = %d", total_rx_irq_number);
427 printk("RX_DMA_CH_VBR_NRT, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
428 /* signal firmware that descriptor is updated */
429 mailbox_signal(channel, 0);
430
431 len = (len + CELL_SIZE - 1) / CELL_SIZE;
432 if ( ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] <= len )
433 ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] = ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_NRT] + ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] - len;
434 }
435 }
436 // if ( rx_irq_number[channel] != 0 )
437 printk("RX_DMA_CH_VBR_NRT, rx_irq_number[channel] = %d", rx_irq_number[channel]);
438 break;
439 case RX_DMA_CH_AVR:
440 /* WFQ */
441 if ( rx_irq_number[RX_DMA_CH_AVR] != 0
442 && (rx_irq_number[RX_DMA_CH_VBR_RT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_RT) || ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] < ppe_dev.dma.rx_weight[RX_DMA_CH_AVR])
443 && (rx_irq_number[RX_DMA_CH_VBR_NRT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_NRT) || ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] < ppe_dev.dma.rx_weight[RX_DMA_CH_AVR])
444 )
445 {
446 unsigned int len;
447
448 if ( mailbox_rx_irq_handler(RX_DMA_CH_AVR, &len) == 0 )
449 {
450 rx_irq_number[RX_DMA_CH_AVR]--;
451 total_rx_irq_number--;
452 printk("RX_DMA_CH_AVR, total_rx_irq_number = %d", total_rx_irq_number);
453 printk("RX_DMA_CH_AVR, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
454 /* signal firmware that descriptor is updated */
455 mailbox_signal(channel, 0);
456
457 len = (len + CELL_SIZE - 1) / CELL_SIZE;
458 if ( ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] <= len )
459 ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] = ppe_dev.dma.rx_default_weight[RX_DMA_CH_AVR] + ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] - len;
460 }
461 }
462 // if ( rx_irq_number[channel] != 0 )
463 printk("RX_DMA_CH_AVR, rx_irq_number[channel] = %d", rx_irq_number[channel]);
464 break;
465 case RX_DMA_CH_UBR:
466 default:
467 /* Handle it when all others are handled or others are not available to handle. */
468 if ( rx_irq_number[channel] != 0
469 && (rx_irq_number[RX_DMA_CH_VBR_RT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_RT))
470 && (rx_irq_number[RX_DMA_CH_VBR_NRT] == 0 || !check_desc_valid(RX_DMA_CH_VBR_NRT))
471 && (rx_irq_number[RX_DMA_CH_AVR] == 0 || !check_desc_valid(RX_DMA_CH_AVR)) )
472 if ( mailbox_rx_irq_handler(channel, NULL) == 0 )
473 {
474 rx_irq_number[channel]--;
475 total_rx_irq_number--;
476 printk("RX_DMA_CH_UBR, total_rx_irq_number = %d, rx_irq_number[%d] = %d", total_rx_irq_number, channel, rx_irq_number[channel]);
477 printk("RX_DMA_CH_UBR, total_rx_irq_number = %d, rx_irq_number[%d] = %d\n", total_rx_irq_number, channel, rx_irq_number[channel]);
478 /* signal firmware that descriptor is updated */
479 mailbox_signal(channel, 0);
480 }
481 printk("RX_DMA_CH_UBR, rx_irq_number[channel] = %d", rx_irq_number[channel]);
482 }
483
484 if ( ++channel == ppe_dev.dma.rx_total_channel_used )
485 channel = 0;
486 }
487 #else
488 channel = 0;
489 while ( total_rx_irq_number )
490 {
491 while ( rx_irq_number[channel] != 0 && mailbox_rx_irq_handler(channel, NULL) == 0 )
492 {
493 rx_irq_number[channel]--;
494 total_rx_irq_number--;
495 /* signal firmware that descriptor is updated */
496 mailbox_signal(channel, 0);
497 }
498
499 if ( ++channel == ppe_dev.dma.rx_total_channel_used )
500 channel = 0;
501 }
502 #endif // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
503 return IRQ_RETVAL(1);
504 }
505
506