65e4be9c657354ce4b3287f08f6b35c4d3e5b172
[openwrt/svn-archive/archive.git] / package / ifxmips-atm / src / ppe.c
1 #include <asm/mach-ifxmips/cgu.h>
2 #include "common.h"
3
4 #include "ifx_ppe_fw.h"
5 static void set_qsb(struct atm_vcc *vcc, struct atm_qos *qos, unsigned int connection)
6 {
7
8 u32 qsb_clk = cgu_get_fpi_bus_clock(2); /* FPI configuration 2 (slow FPI bus) */
9 union qsb_queue_parameter_table qsb_queue_parameter_table = {{0}};
10 union qsb_queue_vbr_parameter_table qsb_queue_vbr_parameter_table = {{0}};
11 u32 tmp;
12
13 /*
14 * Peak Cell Rate (PCR) Limiter
15 */
16 if ( qos->txtp.max_pcr == 0 )
17 qsb_queue_parameter_table.bit.tp = 0; /* disable PCR limiter */
18 else
19 {
20 /* peak cell rate would be slightly lower than requested [maximum_rate / pcr = (qsb_clock / 8) * (time_step / 4) / pcr] */
21 tmp = ((qsb_clk * ppe_dev.qsb.tstepc) >> 5) / qos->txtp.max_pcr + 1;
22 /* check if overflow takes place */
23 qsb_queue_parameter_table.bit.tp = tmp > QSB_TP_TS_MAX ? QSB_TP_TS_MAX : tmp;
24 }
25 /*
26 * Weighted Fair Queueing Factor (WFQF)
27 */
28 switch ( qos->txtp.traffic_class )
29 {
30 case ATM_CBR:
31 case ATM_VBR_RT:
32 /* real time queue gets weighted fair queueing bypass */
33 qsb_queue_parameter_table.bit.wfqf = 0;
34 break;
35 case ATM_VBR_NRT:
36 case ATM_UBR_PLUS:
37 /* WFQF calculation here is based on virtual cell rates, to reduce granularity for high rates */
38 /* WFQF is maximum cell rate / garenteed cell rate */
39 /* wfqf = qsb_minimum_cell_rate * QSB_WFQ_NONUBR_MAX / requested_minimum_peak_cell_rate */
40 if ( qos->txtp.min_pcr == 0 )
41 qsb_queue_parameter_table.bit.wfqf = QSB_WFQ_NONUBR_MAX;
42 else
43 {
44 tmp = QSB_GCR_MIN * QSB_WFQ_NONUBR_MAX / qos->txtp.min_pcr;
45 if ( tmp == 0 )
46 qsb_queue_parameter_table.bit.wfqf = 1;
47 else if ( tmp > QSB_WFQ_NONUBR_MAX )
48 qsb_queue_parameter_table.bit.wfqf = QSB_WFQ_NONUBR_MAX;
49 else
50 qsb_queue_parameter_table.bit.wfqf = tmp;
51 }
52 break;
53 default:
54 case ATM_UBR:
55 qsb_queue_parameter_table.bit.wfqf = QSB_WFQ_UBR_BYPASS;
56 }
57 /*
58 * Sustained Cell Rate (SCR) Leaky Bucket Shaper VBR.0/VBR.1
59 */
60 if ( qos->txtp.traffic_class == ATM_VBR_RT || qos->txtp.traffic_class == ATM_VBR_NRT )
61 {
62 if ( qos->txtp.scr == 0 )
63 {
64 /* disable shaper */
65 qsb_queue_vbr_parameter_table.bit.taus = 0;
66 qsb_queue_vbr_parameter_table.bit.ts = 0;
67 }
68 else
69 {
70 /* Cell Loss Priority (CLP) */
71 if ( (vcc->atm_options & ATM_ATMOPT_CLP) )
72 /* CLP1 */
73 qsb_queue_parameter_table.bit.vbr = 1;
74 else
75 /* CLP0 */
76 qsb_queue_parameter_table.bit.vbr = 0;
77 /* Rate Shaper Parameter (TS) and Burst Tolerance Parameter for SCR (tauS) */
78 tmp = ((qsb_clk * ppe_dev.qsb.tstepc) >> 5) / qos->txtp.scr + 1;
79 qsb_queue_vbr_parameter_table.bit.ts = tmp > QSB_TP_TS_MAX ? QSB_TP_TS_MAX : tmp;
80 tmp = (qos->txtp.mbs - 1) * (qsb_queue_vbr_parameter_table.bit.ts - qsb_queue_parameter_table.bit.tp) / 64;
81 if ( tmp == 0 )
82 qsb_queue_vbr_parameter_table.bit.taus = 1;
83 else if ( tmp > QSB_TAUS_MAX )
84 qsb_queue_vbr_parameter_table.bit.taus = QSB_TAUS_MAX;
85 else
86 qsb_queue_vbr_parameter_table.bit.taus = tmp;
87 }
88 }
89 else
90 {
91 qsb_queue_vbr_parameter_table.bit.taus = 0;
92 qsb_queue_vbr_parameter_table.bit.ts = 0;
93 }
94
95 /* Queue Parameter Table (QPT) */
96 *QSB_RTM = QSB_RTM_DM_SET(QSB_QPT_SET_MASK);
97 *QSB_RTD = QSB_RTD_TTV_SET(qsb_queue_parameter_table.dword);
98 *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_QPT) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(connection);
99 /* Queue VBR Paramter Table (QVPT) */
100 *QSB_RTM = QSB_RTM_DM_SET(QSB_QVPT_SET_MASK);
101 *QSB_RTD = QSB_RTD_TTV_SET(qsb_queue_vbr_parameter_table.dword);
102 *QSB_RAMAC = QSB_RAMAC_RW_SET(QSB_RAMAC_RW_WRITE) | QSB_RAMAC_TSEL_SET(QSB_RAMAC_TSEL_VBR) | QSB_RAMAC_LH_SET(QSB_RAMAC_LH_LOW) | QSB_RAMAC_TESEL_SET(connection);
103
104 }
105
106
107 static inline void u64_add_u32(ppe_u64_t opt1, u32 opt2,ppe_u64_t *ret)
108 {
109 ret->l = opt1.l + opt2;
110 if ( ret->l < opt1.l || ret->l < opt2 )
111 ret->h++;
112 }
113
114 int find_vcc(struct atm_vcc *vcc)
115 {
116 int i;
117 struct connection *connection = ppe_dev.connection;
118 int max_connections = ppe_dev.port[(int)vcc->dev->dev_data].max_connections;
119 u32 occupation_table = ppe_dev.port[(int)vcc->dev->dev_data].connection_table;
120 int base = ppe_dev.port[(int)vcc->dev->dev_data].connection_base;
121 for ( i = 0; i < max_connections; i++, base++ )
122 if ( (occupation_table & (1 << i))
123 && connection[base].vcc == vcc )
124 return base;
125 return -1;
126 }
127
128 int find_vpi(unsigned int vpi)
129 {
130 int i, j;
131 struct connection *connection = ppe_dev.connection;
132 struct port *port;
133 int base;
134
135 port = ppe_dev.port;
136 for ( i = 0; i < ATM_PORT_NUMBER; i++, port++ )
137 {
138 base = port->connection_base;
139 for ( j = 0; j < port->max_connections; j++, base++ )
140 if ( (port->connection_table & (1 << j))
141 && connection[base].vcc != NULL
142 && vpi == connection[base].vcc->vpi )
143 return base;
144 }
145 return -1;
146 }
147
148 int find_vpivci(unsigned int vpi, unsigned int vci)
149 {
150 int i, j;
151 struct connection *connection = ppe_dev.connection;
152 struct port *port;
153 int base;
154
155 port = ppe_dev.port;
156 for ( i = 0; i < ATM_PORT_NUMBER; i++, port++ )
157 {
158 base = port->connection_base;
159 for ( j = 0; j < port->max_connections; j++, base++ )
160 if ( (port->connection_table & (1 << j))
161 && connection[base].vcc != NULL
162 && vpi == connection[base].vcc->vpi
163 && vci == connection[base].vcc->vci )
164 return base;
165 }
166 return -1;
167 }
168
169
170 static inline void clear_htu_entry(unsigned int connection)
171 {
172 HTU_ENTRY(connection - QSB_QUEUE_NUMBER_BASE + OAM_HTU_ENTRY_NUMBER)->vld = 0;
173 }
174
175 static inline void set_htu_entry(unsigned int vpi, unsigned int vci, unsigned int connection, int aal5)
176 {
177 struct htu_entry htu_entry = { res1: 0x00,
178 pid: ppe_dev.connection[connection].port & 0x01,
179 vpi: vpi,
180 vci: vci,
181 pti: 0x00,
182 vld: 0x01};
183
184 struct htu_mask htu_mask = { set: 0x03,
185 pid_mask: 0x02,
186 vpi_mask: 0x00,
187 vci_mask: 0x0000,
188 pti_mask: 0x03, // 0xx, user data
189 clear: 0x00};
190
191 struct htu_result htu_result = {res1: 0x00,
192 cellid: connection,
193 res2: 0x00,
194 type: aal5 ? 0x00 : 0x01,
195 ven: 0x01,
196 res3: 0x00,
197 qid: connection};
198
199 *HTU_RESULT(connection - QSB_QUEUE_NUMBER_BASE + OAM_HTU_ENTRY_NUMBER) = htu_result;
200 *HTU_MASK(connection - QSB_QUEUE_NUMBER_BASE + OAM_HTU_ENTRY_NUMBER) = htu_mask;
201 *HTU_ENTRY(connection - QSB_QUEUE_NUMBER_BASE + OAM_HTU_ENTRY_NUMBER) = htu_entry;
202 }
203
204 int alloc_tx_connection(int connection)
205 {
206 unsigned long sys_flag;
207 int desc_base;
208
209 if ( ppe_dev.dma.tx_desc_alloc_pos[connection] == ppe_dev.dma.tx_desc_release_pos[connection] && ppe_dev.dma.tx_desc_alloc_flag[connection] )
210 return -1;
211
212 /* amend descriptor pointer and allocation number */
213 local_irq_save(sys_flag);
214 desc_base = ppe_dev.dma.tx_descriptor_number * (connection - QSB_QUEUE_NUMBER_BASE) + ppe_dev.dma.tx_desc_alloc_pos[connection];
215 if ( ++ppe_dev.dma.tx_desc_alloc_pos[connection] == ppe_dev.dma.tx_descriptor_number )
216 ppe_dev.dma.tx_desc_alloc_pos[connection] = 0;
217 ppe_dev.dma.tx_desc_alloc_flag[connection] = 1;
218 local_irq_restore(sys_flag);
219
220 return desc_base;
221 }
222
223
224 int ppe_open(struct atm_vcc *vcc)
225 {
226 int ret;
227 struct port *port = &ppe_dev.port[(int)vcc->dev->dev_data];
228 int conn;
229 int f_enable_irq = 0;
230 int i;
231 printk("%s:%s[%d] removed 2 args from signature\n", __FILE__, __func__, __LINE__);
232
233 printk("ppe_open");
234
235 if ( vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0 )
236 return -EPROTONOSUPPORT;
237
238 down(&ppe_dev.sem);
239
240 /* check bandwidth */
241 if ( (vcc->qos.txtp.traffic_class == ATM_CBR && vcc->qos.txtp.max_pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate))
242 || (vcc->qos.txtp.traffic_class == ATM_VBR_RT && vcc->qos.txtp.max_pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate))
243 || (vcc->qos.txtp.traffic_class == ATM_VBR_NRT && vcc->qos.txtp.pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate))
244 || (vcc->qos.txtp.traffic_class == ATM_UBR_PLUS && vcc->qos.txtp.min_pcr > (port->tx_max_cell_rate - port->tx_current_cell_rate)) )
245 {
246 ret = -EINVAL;
247 goto PPE_OPEN_EXIT;
248 }
249
250 printk("alloc vpi = %d, vci = %d\n", vcc->vpi, vcc->vci);
251
252 /* check existing vpi,vci */
253 conn = find_vpivci(vcc->vpi, vcc->vci);
254 if ( conn >= 0 )
255 {
256 ret = -EADDRINUSE;
257 goto PPE_OPEN_EXIT;
258 }
259
260 /* check whether it need to enable irq */
261 for ( i = 0; i < ATM_PORT_NUMBER; i++ )
262 if ( ppe_dev.port[i].max_connections != 0 && ppe_dev.port[i].connection_table != 0 )
263 break;
264 if ( i == ATM_PORT_NUMBER )
265 f_enable_irq = 1;
266
267 /* allocate connection */
268 for ( i = 0, conn = port->connection_base; i < port->max_connections; i++, conn++ )
269 if ( !(port->connection_table & (1 << i)) )
270 {
271 port->connection_table |= 1 << i;
272 ppe_dev.connection[conn].vcc = vcc;
273 break;
274 }
275 if ( i == port->max_connections )
276 {
277 ret = -EINVAL;
278 goto PPE_OPEN_EXIT;
279 }
280
281 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
282 /* assign DMA channel and setup weight value for RX QoS */
283 switch ( vcc->qos.rxtp.traffic_class )
284 {
285 case ATM_CBR:
286 ppe_dev.connection[conn].rx_dma_channel = RX_DMA_CH_CBR;
287 break;
288 case ATM_VBR_RT:
289 ppe_dev.connection[conn].rx_dma_channel = RX_DMA_CH_VBR_RT;
290 ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_RT] += vcc->qos.rxtp.max_pcr;
291 ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] += vcc->qos.rxtp.max_pcr;
292 break;
293 case ATM_VBR_NRT:
294 ppe_dev.connection[conn].rx_dma_channel = RX_DMA_CH_VBR_NRT;
295 ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_NRT] += vcc->qos.rxtp.pcr;
296 ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] += vcc->qos.rxtp.pcr;
297 break;
298 case ATM_ABR:
299 ppe_dev.connection[conn].rx_dma_channel = RX_DMA_CH_AVR;
300 ppe_dev.dma.rx_default_weight[RX_DMA_CH_AVR] += vcc->qos.rxtp.min_pcr;
301 ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] += vcc->qos.rxtp.min_pcr;
302 break;
303 case ATM_UBR_PLUS:
304 default:
305 ppe_dev.connection[conn].rx_dma_channel = RX_DMA_CH_UBR;
306 break;
307 }
308
309 /* update RX queue configuration table */
310 WRX_QUEUE_CONFIG(conn)->dmach = ppe_dev.connection[conn].rx_dma_channel;
311
312 printk("ppe_open: QID %d, DMA %d\n", conn, WRX_QUEUE_CONFIG(conn)->dmach);
313
314 printk("conn = %d, dmach = %d", conn, WRX_QUEUE_CONFIG(conn)->dmach);
315 #endif // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
316
317 /* reserve bandwidth */
318 switch ( vcc->qos.txtp.traffic_class )
319 {
320 case ATM_CBR:
321 case ATM_VBR_RT:
322 port->tx_current_cell_rate += vcc->qos.txtp.max_pcr;
323 break;
324 case ATM_VBR_NRT:
325 port->tx_current_cell_rate += vcc->qos.txtp.pcr;
326 break;
327 case ATM_UBR_PLUS:
328 port->tx_current_cell_rate += vcc->qos.txtp.min_pcr;
329 break;
330 }
331
332 /* set qsb */
333 set_qsb(vcc, &vcc->qos, conn);
334
335 /* update atm_vcc structure */
336 vcc->itf = (int)vcc->dev->dev_data;
337
338 set_bit(ATM_VF_READY, &vcc->flags);
339
340 /* enable irq */
341 printk("ppe_open: enable_irq\n");
342 if ( f_enable_irq )
343 enable_irq(IFXMIPS_PPE_MBOX_INT);
344
345 /* enable mailbox */
346 *MBOX_IGU1_ISRC = (1 << conn) | (1 << (conn + 16));
347 *MBOX_IGU1_IER |= (1 << conn) | (1 << (conn + 16));
348 *MBOX_IGU3_ISRC = (1 << conn) | (1 << (conn + 16));
349 *MBOX_IGU3_IER |= (1 << conn) | (1 << (conn + 16));
350
351 /* set htu entry */
352 set_htu_entry(vcc->vpi, vcc->vci, conn, vcc->qos.aal == ATM_AAL5 ? 1 : 0);
353
354 ret = 0;
355
356 printk("ppe_open(%d.%d): conn = %d, ppe_dev.dma = %08X\n", vcc->vpi, vcc->vci, conn, (u32)&ppe_dev.dma.rx_descriptor_number);
357
358
359 PPE_OPEN_EXIT:
360 up(&ppe_dev.sem);
361
362 printk("open ATM itf = %d, vpi = %d, vci = %d, ret = %d", (int)vcc->dev->dev_data, (int)vcc->vpi, vcc->vci, ret);
363 return ret;
364 }
365
366 void ppe_close(struct atm_vcc *vcc)
367 {
368 int conn;
369 struct port *port;
370 struct connection *connection;
371 int i;
372
373 if ( vcc == NULL )
374 return;
375
376 down(&ppe_dev.sem);
377
378 /* get connection id */
379 conn = find_vcc(vcc);
380 if ( conn < 0 )
381 {
382 printk("can't find vcc\n");
383 goto PPE_CLOSE_EXIT;
384 }
385 if(!((Atm_Priv *)vcc)->on)
386 goto PPE_CLOSE_EXIT;
387 connection = &ppe_dev.connection[conn];
388 port = &ppe_dev.port[connection->port];
389
390 /* clear htu */
391 clear_htu_entry(conn);
392
393 /* release connection */
394 port->connection_table &= ~(1 << (conn - port->connection_base));
395 connection->vcc = NULL;
396 connection->access_time.tv_sec = 0;
397 connection->access_time.tv_nsec = 0;
398 connection->aal5_vcc_crc_err = 0;
399 connection->aal5_vcc_oversize_sdu = 0;
400
401 /* disable irq */
402 for ( i = 0; i < ATM_PORT_NUMBER; i++ )
403 if ( ppe_dev.port[i].max_connections != 0 && ppe_dev.port[i].connection_table != 0 )
404 break;
405 if ( i == ATM_PORT_NUMBER )
406 disable_irq(IFXMIPS_PPE_MBOX_INT);
407
408 *MBOX_IGU1_ISRC = (1 << conn) | (1 << (conn + 16));
409
410 #if defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
411 /* remove weight value from RX DMA channel */
412 switch ( vcc->qos.rxtp.traffic_class )
413 {
414 case ATM_VBR_RT:
415 ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_RT] -= vcc->qos.rxtp.max_pcr;
416 if ( ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] > ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_RT] )
417 ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_RT] = ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_RT];
418 break;
419 case ATM_VBR_NRT:
420 ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_NRT] -= vcc->qos.rxtp.pcr;
421 if ( ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] > ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_NRT] )
422 ppe_dev.dma.rx_weight[RX_DMA_CH_VBR_NRT] = ppe_dev.dma.rx_default_weight[RX_DMA_CH_VBR_NRT];
423 break;
424 case ATM_ABR:
425 ppe_dev.dma.rx_default_weight[RX_DMA_CH_AVR] -= vcc->qos.rxtp.min_pcr;
426 if ( ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] > ppe_dev.dma.rx_default_weight[RX_DMA_CH_AVR] )
427 ppe_dev.dma.rx_weight[RX_DMA_CH_AVR] = ppe_dev.dma.rx_default_weight[RX_DMA_CH_AVR];
428 break;
429 case ATM_CBR:
430 case ATM_UBR_PLUS:
431 default:
432 break;
433 }
434 #endif // defined(ENABLE_RX_QOS) && ENABLE_RX_QOS
435
436 /* release bandwidth */
437 switch ( vcc->qos.txtp.traffic_class )
438 {
439 case ATM_CBR:
440 case ATM_VBR_RT:
441 port->tx_current_cell_rate -= vcc->qos.txtp.max_pcr;
442 break;
443 case ATM_VBR_NRT:
444 port->tx_current_cell_rate -= vcc->qos.txtp.pcr;
445 break;
446 case ATM_UBR_PLUS:
447 port->tx_current_cell_rate -= vcc->qos.txtp.min_pcr;
448 break;
449 }
450
451 /* idle for a while to let parallel operation finish */
452 for ( i = 0; i < IDLE_CYCLE_NUMBER; i++ );
453 ((Atm_Priv *)vcc)->on = 0;
454
455 PPE_CLOSE_EXIT:
456 up(&ppe_dev.sem);
457 }
458
459 int ppe_ioctl(struct atm_dev *dev, unsigned int cmd, void *arg)
460 {
461 return -ENOTTY;
462 }
463
464 int ppe_send(struct atm_vcc *vcc, struct sk_buff *skb)
465 {
466 int ret;
467 int conn;
468 int desc_base;
469 register struct tx_descriptor reg_desc;
470 struct tx_descriptor *desc;
471
472
473 printk("ppe_send");
474 printk("ppe_send\n");
475 printk("skb->users = %d\n", skb->users.counter);
476
477 if ( vcc == NULL || skb == NULL )
478 return -EINVAL;
479
480 // down(&ppe_dev.sem);
481
482 ATM_SKB(skb)->vcc = vcc;
483 conn = find_vcc(vcc);
484 // if ( conn != 1 )
485 printk("ppe_send: conn = %d\n", conn);
486 if ( conn < 0 )
487 {
488 ret = -EINVAL;
489 goto FIND_VCC_FAIL;
490 }
491
492 printk("find_vcc");
493
494 if ( vcc->qos.aal == ATM_AAL5 )
495 {
496 int byteoff;
497 int datalen;
498 struct tx_inband_header *header;
499
500 /* allocate descriptor */
501 desc_base = alloc_tx_connection(conn);
502 if ( desc_base < 0 )
503 {
504 ret = -EIO;
505 //goto ALLOC_TX_CONNECTION_FAIL;
506 }
507 desc = &ppe_dev.dma.tx_descriptor_base[desc_base];
508
509 /* load descriptor from memory */
510 reg_desc = *desc;
511
512 datalen = skb->len;
513 byteoff = (u32)skb->data & (DMA_ALIGNMENT - 1);
514 if ( skb_headroom(skb) < byteoff + TX_INBAND_HEADER_LENGTH )
515 {
516 struct sk_buff *new_skb;
517
518 printk("skb_headroom(skb) < byteoff + TX_INBAND_HEADER_LENGTH");
519 printk("skb_headroom(skb 0x%08X, skb->data 0x%08X) (%d) < byteoff (%d) + TX_INBAND_HEADER_LENGTH (%d)\n", (u32)skb, (u32)skb->data, skb_headroom(skb), byteoff, TX_INBAND_HEADER_LENGTH);
520
521 new_skb = alloc_skb_tx(datalen);
522 if ( new_skb == NULL )
523 {
524 printk("alloc_skb_tx: fail\n");
525 ret = -ENOMEM;
526 goto ALLOC_SKB_TX_FAIL;
527 }
528 ATM_SKB(new_skb)->vcc = NULL;
529 skb_put(new_skb, datalen);
530 memcpy(new_skb->data, skb->data, datalen);
531 atm_free_tx_skb_vcc(skb);
532 skb = new_skb;
533 byteoff = (u32)skb->data & (DMA_ALIGNMENT - 1);
534 }
535 else
536 {
537 printk("skb_headroom(skb) >= byteoff + TX_INBAND_HEADER_LENGTH");
538 }
539 printk("before skb_push, skb->data = 0x%08X", (u32)skb->data);
540 skb_push(skb, byteoff + TX_INBAND_HEADER_LENGTH);
541 printk("after skb_push, skb->data = 0x%08X", (u32)skb->data);
542
543 header = (struct tx_inband_header *)(u32)skb->data;
544 printk("header = 0x%08X", (u32)header);
545
546 /* setup inband trailer */
547 header->uu = 0;
548 header->cpi = 0;
549 header->pad = ppe_dev.aal5.padding_byte;
550 header->res1 = 0;
551
552 /* setup cell header */
553 header->clp = (vcc->atm_options & ATM_ATMOPT_CLP) ? 1 : 0;
554 header->pti = ATM_PTI_US0;
555 header->vci = vcc->vci;
556 header->vpi = vcc->vpi;
557 header->gfc = 0;
558
559 /* setup descriptor */
560 reg_desc.dataptr = (u32)skb->data >> 2;
561 reg_desc.datalen = datalen;
562 reg_desc.byteoff = byteoff;
563 reg_desc.iscell = 0;
564
565 printk("setup header, datalen = %d, byteoff = %d", reg_desc.datalen, reg_desc.byteoff);
566
567 UPDATE_VCC_STAT(conn, tx_pdu, 1);
568
569 if ( vcc->stats )
570 atomic_inc(&vcc->stats->tx);
571 }
572 else
573 {
574 /* allocate descriptor */
575 desc_base = alloc_tx_connection(conn);
576 if ( desc_base < 0 )
577 {
578 ret = -EIO;
579 goto ALLOC_TX_CONNECTION_FAIL;
580 }
581 desc = &ppe_dev.dma.tx_descriptor_base[desc_base];
582
583 /* load descriptor from memory */
584 reg_desc = *desc;
585
586 /* if data pointer is not aligned, allocate new sk_buff */
587 if ( ((u32)skb->data & (DMA_ALIGNMENT - 1)) )
588 {
589 struct sk_buff *new_skb;
590
591 printk("skb->data not aligned\n");
592
593 new_skb = alloc_skb_tx(skb->len);
594 if ( new_skb == NULL )
595 {
596 ret = -ENOMEM;
597 goto ALLOC_SKB_TX_FAIL;
598 }
599 ATM_SKB(new_skb)->vcc = NULL;
600 skb_put(new_skb, skb->len);
601 memcpy(new_skb->data, skb->data, skb->len);
602 atm_free_tx_skb_vcc(skb);
603 skb = new_skb;
604 }
605
606 reg_desc.dataptr = (u32)skb->data >> 2;
607 reg_desc.datalen = skb->len;
608 reg_desc.byteoff = 0;
609 reg_desc.iscell = 1;
610
611 if ( vcc->stats )
612 atomic_inc(&vcc->stats->tx);
613 }
614
615 reg_desc.own = 1;
616 reg_desc.c = 1;
617
618 printk("update descriptor send pointer, desc = 0x%08X", (u32)desc);
619
620 ppe_dev.dma.tx_skb_pointers[desc_base] = skb;
621 *desc = reg_desc;
622 dma_cache_wback((unsigned long)skb->data, skb->len);
623
624 mailbox_signal(conn, 1);
625
626 printk("ppe_send: success");
627 // up(&ppe_dev.sem);
628
629 return 0;
630
631 FIND_VCC_FAIL:
632 printk("FIND_VCC_FAIL\n");
633
634 // up(&ppe_dev.sem);
635 ppe_dev.mib.wtx_err_pdu++;
636 atm_free_tx_skb_vcc(skb);
637
638 return ret;
639
640 ALLOC_SKB_TX_FAIL:
641 printk("ALLOC_SKB_TX_FAIL\n");
642
643 // up(&ppe_dev.sem);
644 if ( vcc->qos.aal == ATM_AAL5 )
645 {
646 UPDATE_VCC_STAT(conn, tx_err_pdu, 1);
647 ppe_dev.mib.wtx_err_pdu++;
648 }
649 if ( vcc->stats )
650 atomic_inc(&vcc->stats->tx_err);
651 atm_free_tx_skb_vcc(skb);
652
653 return ret;
654
655 ALLOC_TX_CONNECTION_FAIL:
656 printk("ALLOC_TX_CONNECTION_FAIL\n");
657
658 // up(&ppe_dev.sem);
659 if ( vcc->qos.aal == ATM_AAL5 )
660 {
661 UPDATE_VCC_STAT(conn, tx_sw_drop_pdu, 1);
662 ppe_dev.mib.wtx_drop_pdu++;
663 }
664 if ( vcc->stats )
665 atomic_inc(&vcc->stats->tx_err);
666 atm_free_tx_skb_vcc(skb);
667
668 return ret;
669 }
670
671 int ppe_send_oam(struct atm_vcc *vcc, void *cell, int flags)
672 {
673 int conn;
674 struct uni_cell_header *uni_cell_header = (struct uni_cell_header *)cell;
675 int desc_base;
676 struct sk_buff *skb;
677 register struct tx_descriptor reg_desc;
678 struct tx_descriptor *desc;
679
680 printk("ppe_send_oam");
681
682 if ( ((uni_cell_header->pti == ATM_PTI_SEGF5 || uni_cell_header->pti == ATM_PTI_E2EF5)
683 && find_vpivci(uni_cell_header->vpi, uni_cell_header->vci) < 0)
684 || ((uni_cell_header->vci == 0x03 || uni_cell_header->vci == 0x04)
685 && find_vpi(uni_cell_header->vpi) < 0) )
686 return -EINVAL;
687
688 #if OAM_TX_QUEUE_NUMBER_PER_PORT != 0
689 /* get queue ID of OAM TX queue, and the TX DMA channel ID is the same as queue ID */
690 conn = ppe_dev.port[(int)vcc->dev->dev_data].oam_tx_queue;
691 #else
692 /* find queue ID */
693 conn = find_vcc(vcc);
694 if ( conn < 0 )
695 {
696 printk("OAM not find queue\n");
697 // up(&ppe_dev.sem);
698 return -EINVAL;
699 }
700 #endif // OAM_TX_QUEUE_NUMBER_PER_PORT != 0
701
702 /* allocate descriptor */
703 desc_base = alloc_tx_connection(conn);
704 if ( desc_base < 0 )
705 {
706 printk("OAM not alloc tx connection\n");
707 // up(&ppe_dev.sem);
708 return -EIO;
709 }
710
711 desc = &ppe_dev.dma.tx_descriptor_base[desc_base];
712
713 /* load descriptor from memory */
714 reg_desc = *(struct tx_descriptor *)desc;
715
716 /* allocate sk_buff */
717 skb = alloc_skb_tx(CELL_SIZE);
718 if ( skb == NULL )
719 {
720 // up(&ppe_dev.sem);
721 return -ENOMEM;
722 }
723 #if OAM_TX_QUEUE_NUMBER_PER_PORT != 0
724 ATM_SKB(skb)->vcc = NULL;
725 #else
726 ATM_SKB(skb)->vcc = vcc;
727 #endif // OAM_TX_QUEUE_NUMBER_PER_PORT != 0
728
729 /* copy data */
730 skb_put(skb, CELL_SIZE);
731 memcpy(skb->data, cell, CELL_SIZE);
732
733 /* setup descriptor */
734 reg_desc.dataptr = (u32)skb->data >> 2;
735 reg_desc.datalen = CELL_SIZE;
736 reg_desc.byteoff = 0;
737 reg_desc.iscell = 1;
738 reg_desc.own = 1;
739 reg_desc.c = 1;
740
741 /* update descriptor send pointer */
742 ppe_dev.dma.tx_skb_pointers[desc_base] = skb;
743
744 /* write discriptor to memory and write back cache */
745 *(struct tx_descriptor *)desc = reg_desc;
746 dma_cache_wback((unsigned long)skb->data, skb->len);
747
748 /* signal PPE */
749 mailbox_signal(conn, 1);
750
751 return 0;
752 }
753
754 int ppe_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags)
755 {
756 int conn;
757 printk("%s:%s[%d]\n", __FILE__, __func__, __LINE__);
758
759 if(vcc == NULL || qos == NULL )
760 return -EINVAL;
761 conn = find_vcc(vcc);
762 if ( conn < 0 )
763 return -EINVAL;
764 set_qsb(vcc, qos, conn);
765
766 return 0;
767 }
768
769 static inline void init_chip(void)
770 {
771 /* enable PPE module in PMU */
772 *(unsigned long *)0xBF10201C &= ~((1 << 15) | (1 << 13) | (1 << 9));
773
774 *EMA_CMDCFG = (EMA_CMD_BUF_LEN << 16) | (EMA_CMD_BASE_ADDR >> 2);
775 *EMA_DATACFG = (EMA_DATA_BUF_LEN << 16) | (EMA_DATA_BASE_ADDR >> 2);
776 *EMA_IER = 0x000000FF;
777 *EMA_CFG = EMA_READ_BURST | (EMA_WRITE_BURST << 2);
778
779 /* enable mailbox */
780 *MBOX_IGU1_ISRC = 0xFFFFFFFF;
781 *MBOX_IGU1_IER = 0x00000000;
782 *MBOX_IGU3_ISRC = 0xFFFFFFFF;
783 *MBOX_IGU3_IER = 0x00000000;
784 }
785
786 int pp32_download_code(u32 *code_src, unsigned int code_dword_len, u32 *data_src, unsigned int data_dword_len)
787 {
788 u32 reg_old_value;
789 volatile u32 *dest;
790
791 if ( code_src == 0 || ((unsigned long)code_src & 0x03) != 0
792 || data_src == 0 || ((unsigned long)data_src & 0x03) )
793 return -EINVAL;
794
795 /* save the old value of CDM_CFG and set PPE code memory to FPI bus access mode */
796 reg_old_value = *CDM_CFG;
797 if ( code_dword_len <= 4096 )
798 *CDM_CFG = CDM_CFG_RAM1_SET(0x00) | CDM_CFG_RAM0_SET(0x00);
799 else
800 *CDM_CFG = CDM_CFG_RAM1_SET(0x01) | CDM_CFG_RAM0_SET(0x00);
801
802 /* copy code */
803 dest = CDM_CODE_MEMORY_RAM0_ADDR(0);
804 while ( code_dword_len-- > 0 )
805 *dest++ = *code_src++;
806
807 /* copy data */
808 dest = PP32_DATA_MEMORY_RAM1_ADDR(0);
809 while ( data_dword_len-- > 0 )
810 *dest++ = *data_src++;
811
812 return 0;
813 }
814
815 int pp32_start(void)
816 {
817 int ret;
818 register int i;
819 init_chip();
820 /* download firmware */
821 ret = pp32_download_code(firmware_binary_code, sizeof(firmware_binary_code) / sizeof(*firmware_binary_code), firmware_binary_data, sizeof(firmware_binary_data) / sizeof(*firmware_binary_data));
822 if ( ret )
823 return ret;
824
825 /* run PP32 */
826 *PP32_DBG_CTRL = DBG_CTRL_START_SET(1);
827
828 /* idle for a while to let PP32 init itself */
829 for ( i = 0; i < IDLE_CYCLE_NUMBER; i++ );
830
831 return 0;
832 }
833
834 void pp32_stop(void)
835 {
836 /* halt PP32 */
837 *PP32_DBG_CTRL = DBG_CTRL_STOP_SET(1);
838 }