f4cae50781f713eb84873491eaab618c8d2fa442
[openwrt/svn-archive/archive.git] / target / linux / generic / files / crypto / ocf / kirkwood / mvHal / mv_hal / eth / gbe / mvEthGbe.h
1 /*******************************************************************************
2 Copyright (C) Marvell International Ltd. and its affiliates
3
4 This software file (the "File") is owned and distributed by Marvell
5 International Ltd. and/or its affiliates ("Marvell") under the following
6 alternative licensing terms. Once you have made an election to distribute the
7 File under one of the following license alternatives, please (i) delete this
8 introductory statement regarding license alternatives, (ii) delete the two
9 license alternatives that you have not elected to use and (iii) preserve the
10 Marvell copyright notice above.
11
12 ********************************************************************************
13 Marvell Commercial License Option
14
15 If you received this File from Marvell and you have entered into a commercial
16 license agreement (a "Commercial License") with Marvell, the File is licensed
17 to you under the terms of the applicable Commercial License.
18
19 ********************************************************************************
20 Marvell GPL License Option
21
22 If you received this File from Marvell, you may opt to use, redistribute and/or
23 modify this File in accordance with the terms and conditions of the General
24 Public License Version 2, June 1991 (the "GPL License"), a copy of which is
25 available along with the File in the license.txt file or by writing to the Free
26 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
27 on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
28
29 THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
30 WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
31 DISCLAIMED. The GPL License provides additional details about this warranty
32 disclaimer.
33 ********************************************************************************
34 Marvell BSD License Option
35
36 If you received this File from Marvell, you may opt to use, redistribute and/or
37 modify this File under the following licensing terms.
38 Redistribution and use in source and binary forms, with or without modification,
39 are permitted provided that the following conditions are met:
40
41 * Redistributions of source code must retain the above copyright notice,
42 this list of conditions and the following disclaimer.
43
44 * Redistributions in binary form must reproduce the above copyright
45 notice, this list of conditions and the following disclaimer in the
46 documentation and/or other materials provided with the distribution.
47
48 * Neither the name of Marvell nor the names of its contributors may be
49 used to endorse or promote products derived from this software without
50 specific prior written permission.
51
52 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
53 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
54 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
55 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
56 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
57 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
58 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
59 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
61 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
62
63 *******************************************************************************/
64
65 /*******************************************************************************
66 * mvEth.h - Header File for : Marvell Gigabit Ethernet Controller
67 *
68 * DESCRIPTION:
69 * This header file contains macros typedefs and function declaration specific to
70 * the Marvell Gigabit Ethernet Controller.
71 *
72 * DEPENDENCIES:
73 * None.
74 *
75 *******************************************************************************/
76
77 #ifndef __mvEthGbe_h__
78 #define __mvEthGbe_h__
79
80 extern MV_BOOL ethDescInSram;
81 extern MV_BOOL ethDescSwCoher;
82 extern ETH_PORT_CTRL* ethPortCtrl[];
83
84 static INLINE MV_ULONG ethDescVirtToPhy(ETH_QUEUE_CTRL* pQueueCtrl, MV_U8* pDesc)
85 {
86 #if defined (ETH_DESCR_IN_SRAM)
87 if( ethDescInSram )
88 return mvSramVirtToPhy(pDesc);
89 else
90 #endif /* ETH_DESCR_IN_SRAM */
91 return (pQueueCtrl->descBuf.bufPhysAddr + (pDesc - pQueueCtrl->descBuf.bufVirtPtr));
92 }
93 /* Return port handler */
94 #define mvEthPortHndlGet(port) ethPortCtrl[port]
95
96 /* Used as WA for HW/SW race on TX */
97 static INLINE int mvEthPortTxEnable(void* pPortHndl, int queue, int max_deep)
98 {
99 int deep = 0;
100 MV_U32 txCurrReg, txEnReg;
101 ETH_TX_DESC* pTxLastDesc;
102 ETH_QUEUE_CTRL* pQueueCtrl;
103 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
104
105 txEnReg = MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo));
106 if( (txEnReg & MV_32BIT_LE_FAST(ETH_TXQ_ENABLE_MASK)) == 0)
107 {
108 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) = pPortCtrl->portTxQueueCmdReg;
109 return 0;
110 }
111
112 pQueueCtrl = &pPortCtrl->txQueue[queue];
113 pTxLastDesc = pQueueCtrl->pCurrentDescr;
114 txCurrReg = MV_REG_READ(ETH_TX_CUR_DESC_PTR_REG(pPortCtrl->portNo, queue));
115 if(ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pTxLastDesc) == txCurrReg)
116 {
117 /* All descriptors are processed, no chance for race */
118 return 0;
119 }
120
121 /* Check distance betwee HW and SW location: */
122 /* If distance between HW and SW pointers is less than max_deep descriptors */
123 /* Race condition is possible, so wait end of TX and restart TXQ */
124 while(deep < max_deep)
125 {
126 pTxLastDesc = TX_PREV_DESC_PTR(pTxLastDesc, pQueueCtrl);
127 if(ethDescVirtToPhy(pQueueCtrl, (MV_U8*)pTxLastDesc) == txCurrReg)
128 {
129 int count = 0;
130
131 while( (txEnReg & MV_32BIT_LE_FAST(ETH_TXQ_ENABLE_MASK)) != 0)
132 {
133 count++;
134 if(count > 10000)
135 {
136 mvOsPrintf("mvEthPortTxEnable: timeout - TXQ_CMD=0x%08x\n",
137 MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) );
138 break;
139 }
140 txEnReg = MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo));
141 }
142
143 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) = pPortCtrl->portTxQueueCmdReg;
144 return count;
145 }
146 deep++;
147 }
148 /* Distance between HW and SW pointers is more than max_deep descriptors, */
149 /* So NO race condition - do nothing */
150 return -1;
151 }
152
153
154 /* defines */
155 #define ETH_CSUM_MIN_BYTE_COUNT 72
156
157 /* Tailgate and Kirwood have only 2K TX FIFO */
158 #if (MV_ETH_VERSION == 2) || (MV_ETH_VERSION == 4)
159 #define ETH_CSUM_MAX_BYTE_COUNT 1600
160 #else
161 #define ETH_CSUM_MAX_BYTE_COUNT 9*1024
162 #endif /* MV_ETH_VERSION */
163
164 #define ETH_MV_HEADER_SIZE 2
165 #define ETH_MV_TX_EN
166
167 /* An offest in Tx descriptors to store data for buffers less than 8 Bytes */
168 #define MIN_TX_BUFF_LOAD 8
169 #define TX_BUF_OFFSET_IN_DESC (ETH_TX_DESC_ALIGNED_SIZE - MIN_TX_BUFF_LOAD)
170
171 /* Default port configuration value */
172 #define PORT_CONFIG_VALUE \
173 ETH_DEF_RX_QUEUE_MASK(0) | \
174 ETH_DEF_RX_ARP_QUEUE_MASK(0) | \
175 ETH_DEF_RX_TCP_QUEUE_MASK(0) | \
176 ETH_DEF_RX_UDP_QUEUE_MASK(0) | \
177 ETH_DEF_RX_BPDU_QUEUE_MASK(0) | \
178 ETH_RX_CHECKSUM_WITH_PSEUDO_HDR
179
180 /* Default port extend configuration value */
181 #define PORT_CONFIG_EXTEND_VALUE 0
182
183 #define PORT_SERIAL_CONTROL_VALUE \
184 ETH_DISABLE_FC_AUTO_NEG_MASK | \
185 BIT9 | \
186 ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
187 ETH_MAX_RX_PACKET_1552BYTE | \
188 ETH_SET_FULL_DUPLEX_MASK
189
190 #define PORT_SERIAL_CONTROL_100MB_FORCE_VALUE \
191 ETH_FORCE_LINK_PASS_MASK | \
192 ETH_DISABLE_DUPLEX_AUTO_NEG_MASK | \
193 ETH_DISABLE_FC_AUTO_NEG_MASK | \
194 BIT9 | \
195 ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
196 ETH_DISABLE_SPEED_AUTO_NEG_MASK | \
197 ETH_SET_FULL_DUPLEX_MASK | \
198 ETH_SET_MII_SPEED_100_MASK | \
199 ETH_MAX_RX_PACKET_1552BYTE
200
201
202 #define PORT_SERIAL_CONTROL_1000MB_FORCE_VALUE \
203 ETH_FORCE_LINK_PASS_MASK | \
204 ETH_DISABLE_DUPLEX_AUTO_NEG_MASK | \
205 ETH_DISABLE_FC_AUTO_NEG_MASK | \
206 BIT9 | \
207 ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
208 ETH_DISABLE_SPEED_AUTO_NEG_MASK | \
209 ETH_SET_FULL_DUPLEX_MASK | \
210 ETH_SET_GMII_SPEED_1000_MASK | \
211 ETH_MAX_RX_PACKET_1552BYTE
212
213 #define PORT_SERIAL_CONTROL_SGMII_IBAN_VALUE \
214 ETH_DISABLE_FC_AUTO_NEG_MASK | \
215 BIT9 | \
216 ETH_IN_BAND_AN_EN_MASK | \
217 ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
218 ETH_MAX_RX_PACKET_1552BYTE
219
220 /* Function headers: */
221 MV_VOID mvEthSetSpecialMcastTable(int portNo, int queue);
222 MV_STATUS mvEthArpRxQueue(void* pPortHandle, int arpQueue);
223 MV_STATUS mvEthUdpRxQueue(void* pPortHandle, int udpQueue);
224 MV_STATUS mvEthTcpRxQueue(void* pPortHandle, int tcpQueue);
225 MV_STATUS mvEthMacAddrGet(int portNo, unsigned char *pAddr);
226 MV_VOID mvEthSetOtherMcastTable(int portNo, int queue);
227 MV_STATUS mvEthHeaderModeSet(void* pPortHandle, MV_ETH_HEADER_MODE headerMode);
228 /* Interrupt Coalesting functions */
229 MV_U32 mvEthRxCoalSet(void* pPortHndl, MV_U32 uSec);
230 MV_U32 mvEthTxCoalSet(void* pPortHndl, MV_U32 uSec);
231 MV_STATUS mvEthCoalGet(void* pPortHndl, MV_U32* pRxCoal, MV_U32* pTxCoal);
232
233 /******************************************************************************/
234 /* Data Flow functions */
235 /******************************************************************************/
236 static INLINE void mvEthPortTxRestart(void* pPortHndl)
237 {
238 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
239
240 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl->portNo)) = pPortCtrl->portTxQueueCmdReg;
241 }
242
243 /* Get number of Free resources in specific TX queue */
244 static INLINE int mvEthTxResourceGet(void* pPortHndl, int txQueue)
245 {
246 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
247
248 return (pPortCtrl->txQueue[txQueue].resource);
249 }
250
251 /* Get number of Free resources in specific RX queue */
252 static INLINE int mvEthRxResourceGet(void* pPortHndl, int rxQueue)
253 {
254 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
255
256 return (pPortCtrl->rxQueue[rxQueue].resource);
257 }
258
259 static INLINE int mvEthTxQueueIsFull(void* pPortHndl, int txQueue)
260 {
261 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
262
263 if(pPortCtrl->txQueue[txQueue].resource == 0)
264 return MV_TRUE;
265
266 return MV_FALSE;
267 }
268
269 /* Get number of Free resources in specific RX queue */
270 static INLINE int mvEthRxQueueIsFull(void* pPortHndl, int rxQueue)
271 {
272 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
273 ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->rxQueue[rxQueue];
274
275 if( (pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr) &&
276 (pQueueCtrl->resource != 0) )
277 return MV_TRUE;
278
279 return MV_FALSE;
280 }
281
282 static INLINE int mvEthTxQueueIsEmpty(void* pPortHndl, int txQueue)
283 {
284 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
285 ETH_QUEUE_CTRL* pQueueCtrl = &pPortCtrl->txQueue[txQueue];
286
287 if( (pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr) &&
288 (pQueueCtrl->resource != 0) )
289 {
290 return MV_TRUE;
291 }
292 return MV_FALSE;
293 }
294
295 /* Get number of Free resources in specific RX queue */
296 static INLINE int mvEthRxQueueIsEmpty(void* pPortHndl, int rxQueue)
297 {
298 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pPortHndl;
299
300 if(pPortCtrl->rxQueue[rxQueue].resource == 0)
301 return MV_TRUE;
302
303 return MV_FALSE;
304 }
305
306 /*******************************************************************************
307 * mvEthPortTx - Send an Ethernet packet
308 *
309 * DESCRIPTION:
310 * This routine send a given packet described by pPktInfo parameter.
311 * Single buffer only.
312 *
313 * INPUT:
314 * void* pEthPortHndl - Ethernet Port handler.
315 * int txQueue - Number of Tx queue.
316 * MV_PKT_INFO *pPktInfo - User packet to send.
317 *
318 * RETURN:
319 * MV_NO_RESOURCE - No enough resources to send this packet.
320 * MV_ERROR - Unexpected Fatal error.
321 * MV_OK - Packet send successfully.
322 *
323 *******************************************************************************/
324 static INLINE MV_STATUS mvEthPortTx(void* pEthPortHndl, int txQueue, MV_PKT_INFO* pPktInfo)
325 {
326 ETH_TX_DESC* pTxCurrDesc;
327 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
328 ETH_QUEUE_CTRL* pQueueCtrl;
329 int portNo;
330 MV_BUF_INFO* pBufInfo = pPktInfo->pFrags;
331
332 #ifdef ETH_DEBUG
333 if(pPortCtrl->portState != MV_ACTIVE)
334 return MV_BAD_STATE;
335 #endif /* ETH_DEBUG */
336
337 portNo = pPortCtrl->portNo;
338 pQueueCtrl = &pPortCtrl->txQueue[txQueue];
339
340 /* Get the Tx Desc ring indexes */
341 pTxCurrDesc = pQueueCtrl->pCurrentDescr;
342
343 /* Check if there is enough resources to send the packet */
344 if(pQueueCtrl->resource == 0)
345 return MV_NO_RESOURCE;
346
347 pTxCurrDesc->byteCnt = pBufInfo->dataSize;
348
349 /* Flash Buffer */
350 if(pPktInfo->pktSize != 0)
351 {
352 #ifdef MV_NETBSD
353 pTxCurrDesc->bufPtr = pBufInfo->bufPhysAddr;
354 ETH_PACKET_CACHE_FLUSH(pBufInfo->bufVirtPtr, pPktInfo->pktSize);
355 #else
356 pTxCurrDesc->bufPtr = ETH_PACKET_CACHE_FLUSH(pBufInfo->bufVirtPtr, pPktInfo->pktSize);
357 #endif
358 pPktInfo->pktSize = 0;
359 }
360 else
361 pTxCurrDesc->bufPtr = pBufInfo->bufPhysAddr;
362
363 pTxCurrDesc->returnInfo = (MV_ULONG)pPktInfo;
364
365 /* There is only one buffer in the packet */
366 /* The OSG might set some bits for checksum offload, so add them to first descriptor */
367 pTxCurrDesc->cmdSts = pPktInfo->status |
368 ETH_BUFFER_OWNED_BY_DMA |
369 ETH_TX_GENERATE_CRC_MASK |
370 ETH_TX_ENABLE_INTERRUPT_MASK |
371 ETH_TX_ZERO_PADDING_MASK |
372 ETH_TX_FIRST_DESC_MASK |
373 ETH_TX_LAST_DESC_MASK;
374
375 ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
376
377 pQueueCtrl->resource--;
378 pQueueCtrl->pCurrentDescr = TX_NEXT_DESC_PTR(pTxCurrDesc, pQueueCtrl);
379
380 /* Apply send command */
381 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(portNo)) = pPortCtrl->portTxQueueCmdReg;
382
383 return MV_OK;
384 }
385
386
387 /*******************************************************************************
388 * mvEthPortSgTx - Send an Ethernet packet
389 *
390 * DESCRIPTION:
391 * This routine send a given packet described by pBufInfo parameter. It
392 * supports transmitting of a packet spaned over multiple buffers.
393 *
394 * INPUT:
395 * void* pEthPortHndl - Ethernet Port handler.
396 * int txQueue - Number of Tx queue.
397 * MV_PKT_INFO *pPktInfo - User packet to send.
398 *
399 * RETURN:
400 * MV_NO_RESOURCE - No enough resources to send this packet.
401 * MV_ERROR - Unexpected Fatal error.
402 * MV_OK - Packet send successfully.
403 *
404 *******************************************************************************/
405 static INLINE MV_STATUS mvEthPortSgTx(void* pEthPortHndl, int txQueue, MV_PKT_INFO* pPktInfo)
406 {
407 ETH_TX_DESC* pTxFirstDesc;
408 ETH_TX_DESC* pTxCurrDesc;
409 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
410 ETH_QUEUE_CTRL* pQueueCtrl;
411 int portNo, bufCount;
412 MV_BUF_INFO* pBufInfo = pPktInfo->pFrags;
413 MV_U8* pTxBuf;
414
415 #ifdef ETH_DEBUG
416 if(pPortCtrl->portState != MV_ACTIVE)
417 return MV_BAD_STATE;
418 #endif /* ETH_DEBUG */
419
420 portNo = pPortCtrl->portNo;
421 pQueueCtrl = &pPortCtrl->txQueue[txQueue];
422
423 /* Get the Tx Desc ring indexes */
424 pTxCurrDesc = pQueueCtrl->pCurrentDescr;
425
426 /* Check if there is enough resources to send the packet */
427 if(pQueueCtrl->resource < pPktInfo->numFrags)
428 return MV_NO_RESOURCE;
429
430 /* Remember first desc */
431 pTxFirstDesc = pTxCurrDesc;
432
433 bufCount = 0;
434 while(MV_TRUE)
435 {
436 if(pBufInfo[bufCount].dataSize <= MIN_TX_BUFF_LOAD)
437 {
438 /* Buffers with a payload smaller than MIN_TX_BUFF_LOAD (8 bytes) must be aligned */
439 /* to 64-bit boundary. Two options here: */
440 /* 1) Usually, copy the payload to the reserved 8 bytes inside descriptor. */
441 /* 2) In the Half duplex workaround, the reserved 8 bytes inside descriptor are used */
442 /* as a pointer to the aligned buffer, copy the small payload to this buffer. */
443 pTxBuf = ((MV_U8*)pTxCurrDesc)+TX_BUF_OFFSET_IN_DESC;
444 mvOsBCopy(pBufInfo[bufCount].bufVirtPtr, pTxBuf, pBufInfo[bufCount].dataSize);
445 pTxCurrDesc->bufPtr = ethDescVirtToPhy(pQueueCtrl, pTxBuf);
446 }
447 else
448 {
449 /* Flash Buffer */
450 #ifdef MV_NETBSD
451 pTxCurrDesc->bufPtr = pBufInfo[bufCount].bufPhysAddr;
452 ETH_PACKET_CACHE_FLUSH(pBufInfo[bufCount].bufVirtPtr, pBufInfo[bufCount].dataSize);
453 #else
454 pTxCurrDesc->bufPtr = ETH_PACKET_CACHE_FLUSH(pBufInfo[bufCount].bufVirtPtr, pBufInfo[bufCount].dataSize);
455 #endif
456 }
457
458 pTxCurrDesc->byteCnt = pBufInfo[bufCount].dataSize;
459 bufCount++;
460
461 if(bufCount >= pPktInfo->numFrags)
462 break;
463
464 if(bufCount > 1)
465 {
466 /* There is middle buffer of the packet Not First and Not Last */
467 pTxCurrDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA;
468 ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
469 }
470 /* Go to next descriptor and next buffer */
471 pTxCurrDesc = TX_NEXT_DESC_PTR(pTxCurrDesc, pQueueCtrl);
472 }
473 /* Set last desc with DMA ownership and interrupt enable. */
474 pTxCurrDesc->returnInfo = (MV_ULONG)pPktInfo;
475 if(bufCount == 1)
476 {
477 /* There is only one buffer in the packet */
478 /* The OSG might set some bits for checksum offload, so add them to first descriptor */
479 pTxCurrDesc->cmdSts = pPktInfo->status |
480 ETH_BUFFER_OWNED_BY_DMA |
481 ETH_TX_GENERATE_CRC_MASK |
482 ETH_TX_ENABLE_INTERRUPT_MASK |
483 ETH_TX_ZERO_PADDING_MASK |
484 ETH_TX_FIRST_DESC_MASK |
485 ETH_TX_LAST_DESC_MASK;
486
487 ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
488 }
489 else
490 {
491 /* Last but not First */
492 pTxCurrDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA |
493 ETH_TX_ENABLE_INTERRUPT_MASK |
494 ETH_TX_ZERO_PADDING_MASK |
495 ETH_TX_LAST_DESC_MASK;
496
497 ETH_DESCR_FLUSH_INV(pPortCtrl, pTxCurrDesc);
498
499 /* Update First when more than one buffer in the packet */
500 /* The OSG might set some bits for checksum offload, so add them to first descriptor */
501 pTxFirstDesc->cmdSts = pPktInfo->status |
502 ETH_BUFFER_OWNED_BY_DMA |
503 ETH_TX_GENERATE_CRC_MASK |
504 ETH_TX_FIRST_DESC_MASK;
505
506 ETH_DESCR_FLUSH_INV(pPortCtrl, pTxFirstDesc);
507 }
508 /* Update txQueue state */
509 pQueueCtrl->resource -= bufCount;
510 pQueueCtrl->pCurrentDescr = TX_NEXT_DESC_PTR(pTxCurrDesc, pQueueCtrl);
511
512 /* Apply send command */
513 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(portNo)) = pPortCtrl->portTxQueueCmdReg;
514
515 return MV_OK;
516 }
517
518 /*******************************************************************************
519 * mvEthPortTxDone - Free all used Tx descriptors and mBlks.
520 *
521 * DESCRIPTION:
522 * This routine returns the transmitted packet information to the caller.
523 *
524 * INPUT:
525 * void* pEthPortHndl - Ethernet Port handler.
526 * int txQueue - Number of Tx queue.
527 *
528 * OUTPUT:
529 * MV_PKT_INFO *pPktInfo - Pointer to packet was sent.
530 *
531 * RETURN:
532 * MV_NOT_FOUND - No transmitted packets to return. Transmit in progress.
533 * MV_EMPTY - No transmitted packets to return. TX Queue is empty.
534 * MV_ERROR - Unexpected Fatal error.
535 * MV_OK - There is transmitted packet in the queue,
536 * 'pPktInfo' filled with relevant information.
537 *
538 *******************************************************************************/
539 static INLINE MV_PKT_INFO* mvEthPortTxDone(void* pEthPortHndl, int txQueue)
540 {
541 ETH_TX_DESC* pTxCurrDesc;
542 ETH_TX_DESC* pTxUsedDesc;
543 ETH_QUEUE_CTRL* pQueueCtrl;
544 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
545 MV_PKT_INFO* pPktInfo;
546 MV_U32 commandStatus;
547
548 pQueueCtrl = &pPortCtrl->txQueue[txQueue];
549
550 pTxUsedDesc = pQueueCtrl->pUsedDescr;
551 pTxCurrDesc = pQueueCtrl->pCurrentDescr;
552
553 while(MV_TRUE)
554 {
555 /* No more used descriptors */
556 commandStatus = pTxUsedDesc->cmdSts;
557 if (commandStatus & (ETH_BUFFER_OWNED_BY_DMA))
558 {
559 ETH_DESCR_INV(pPortCtrl, pTxUsedDesc);
560 return NULL;
561 }
562 if( (pTxUsedDesc == pTxCurrDesc) &&
563 (pQueueCtrl->resource != 0) )
564 {
565 return NULL;
566 }
567 pQueueCtrl->resource++;
568 pQueueCtrl->pUsedDescr = TX_NEXT_DESC_PTR(pTxUsedDesc, pQueueCtrl);
569 if(commandStatus & (ETH_TX_LAST_DESC_MASK))
570 {
571 pPktInfo = (MV_PKT_INFO*)pTxUsedDesc->returnInfo;
572 pPktInfo->status = commandStatus;
573 return pPktInfo;
574 }
575 pTxUsedDesc = pQueueCtrl->pUsedDescr;
576 }
577 }
578
579 /*******************************************************************************
580 * mvEthPortRx - Get new received packets from Rx queue.
581 *
582 * DESCRIPTION:
583 * This routine returns the received data to the caller. There is no
584 * data copying during routine operation. All information is returned
585 * using pointer to packet information struct passed from the caller.
586 *
587 * INPUT:
588 * void* pEthPortHndl - Ethernet Port handler.
589 * int rxQueue - Number of Rx queue.
590 *
591 * OUTPUT:
592 * MV_PKT_INFO *pPktInfo - Pointer to received packet.
593 *
594 * RETURN:
595 * MV_NO_RESOURCE - No free resources in RX queue.
596 * MV_ERROR - Unexpected Fatal error.
597 * MV_OK - New packet received and 'pBufInfo' structure filled
598 * with relevant information.
599 *
600 *******************************************************************************/
601 static INLINE MV_PKT_INFO* mvEthPortRx(void* pEthPortHndl, int rxQueue)
602 {
603 ETH_RX_DESC *pRxCurrDesc;
604 MV_U32 commandStatus;
605 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
606 ETH_QUEUE_CTRL* pQueueCtrl;
607 MV_PKT_INFO* pPktInfo;
608
609 pQueueCtrl = &(pPortCtrl->rxQueue[rxQueue]);
610
611 /* Check resources */
612 if(pQueueCtrl->resource == 0)
613 {
614 mvOsPrintf("ethPortRx: no more resources\n");
615 return NULL;
616 }
617 while(MV_TRUE)
618 {
619 /* Get the Rx Desc ring 'curr and 'used' indexes */
620 pRxCurrDesc = pQueueCtrl->pCurrentDescr;
621
622 commandStatus = pRxCurrDesc->cmdSts;
623 if (commandStatus & (ETH_BUFFER_OWNED_BY_DMA))
624 {
625 /* Nothing to receive... */
626 ETH_DESCR_INV(pPortCtrl, pRxCurrDesc);
627 return NULL;
628 }
629
630 /* Valid RX only if FIRST and LAST bits are set */
631 if( (commandStatus & (ETH_RX_LAST_DESC_MASK | ETH_RX_FIRST_DESC_MASK)) ==
632 (ETH_RX_LAST_DESC_MASK | ETH_RX_FIRST_DESC_MASK) )
633 {
634 pPktInfo = (MV_PKT_INFO*)pRxCurrDesc->returnInfo;
635 pPktInfo->pFrags->dataSize = pRxCurrDesc->byteCnt - 4;
636 pPktInfo->status = commandStatus;
637 pPktInfo->fragIP = pRxCurrDesc->bufSize & ETH_RX_IP_FRAGMENTED_FRAME_MASK;
638
639 pQueueCtrl->resource--;
640 /* Update 'curr' in data structure */
641 pQueueCtrl->pCurrentDescr = RX_NEXT_DESC_PTR(pRxCurrDesc, pQueueCtrl);
642
643 #ifdef INCLUDE_SYNC_BARR
644 mvCpuIfSyncBarr(DRAM_TARGET);
645 #endif
646 return pPktInfo;
647 }
648 else
649 {
650 ETH_RX_DESC* pRxUsedDesc = pQueueCtrl->pUsedDescr;
651
652 #ifdef ETH_DEBUG
653 mvOsPrintf("ethDrv: Unexpected Jumbo frame: "
654 "status=0x%08x, byteCnt=%d, pData=0x%x\n",
655 commandStatus, pRxCurrDesc->byteCnt, pRxCurrDesc->bufPtr);
656 #endif /* ETH_DEBUG */
657
658 /* move buffer from pCurrentDescr position to pUsedDescr position */
659 pRxUsedDesc->bufPtr = pRxCurrDesc->bufPtr;
660 pRxUsedDesc->returnInfo = pRxCurrDesc->returnInfo;
661 pRxUsedDesc->bufSize = pRxCurrDesc->bufSize & ETH_RX_BUFFER_MASK;
662
663 /* Return the descriptor to DMA ownership */
664 pRxUsedDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA |
665 ETH_RX_ENABLE_INTERRUPT_MASK;
666
667 /* Flush descriptor and CPU pipe */
668 ETH_DESCR_FLUSH_INV(pPortCtrl, pRxUsedDesc);
669
670 /* Move the used descriptor pointer to the next descriptor */
671 pQueueCtrl->pUsedDescr = RX_NEXT_DESC_PTR(pRxUsedDesc, pQueueCtrl);
672 pQueueCtrl->pCurrentDescr = RX_NEXT_DESC_PTR(pRxCurrDesc, pQueueCtrl);
673 }
674 }
675 }
676
677 /*******************************************************************************
678 * mvEthPortRxDone - Returns a Rx buffer back to the Rx ring.
679 *
680 * DESCRIPTION:
681 * This routine returns a Rx buffer back to the Rx ring.
682 *
683 * INPUT:
684 * void* pEthPortHndl - Ethernet Port handler.
685 * int rxQueue - Number of Rx queue.
686 * MV_PKT_INFO *pPktInfo - Pointer to received packet.
687 *
688 * RETURN:
689 * MV_ERROR - Unexpected Fatal error.
690 * MV_OUT_OF_RANGE - RX queue is already FULL, so this buffer can't be
691 * returned to this queue.
692 * MV_FULL - Buffer returned successfully and RX queue became full.
693 * More buffers should not be returned at the time.
694 * MV_OK - Buffer returned successfully and there are more free
695 * places in the queue.
696 *
697 *******************************************************************************/
698 static INLINE MV_STATUS mvEthPortRxDone(void* pEthPortHndl, int rxQueue, MV_PKT_INFO *pPktInfo)
699 {
700 ETH_RX_DESC* pRxUsedDesc;
701 ETH_QUEUE_CTRL* pQueueCtrl;
702 ETH_PORT_CTRL* pPortCtrl = (ETH_PORT_CTRL*)pEthPortHndl;
703
704 pQueueCtrl = &pPortCtrl->rxQueue[rxQueue];
705
706 /* Get 'used' Rx descriptor */
707 pRxUsedDesc = pQueueCtrl->pUsedDescr;
708
709 /* Check that ring is not FULL */
710 if( (pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr) &&
711 (pQueueCtrl->resource != 0) )
712 {
713 mvOsPrintf("%s %d: out of range Error resource=%d, curr=%p, used=%p\n",
714 __FUNCTION__, pPortCtrl->portNo, pQueueCtrl->resource,
715 pQueueCtrl->pCurrentDescr, pQueueCtrl->pUsedDescr);
716 return MV_OUT_OF_RANGE;
717 }
718
719 pRxUsedDesc->bufPtr = pPktInfo->pFrags->bufPhysAddr;
720 pRxUsedDesc->returnInfo = (MV_ULONG)pPktInfo;
721 pRxUsedDesc->bufSize = pPktInfo->pFrags->bufSize & ETH_RX_BUFFER_MASK;
722
723 /* Invalidate data buffer accordingly with pktSize */
724 if(pPktInfo->pktSize != 0)
725 {
726 ETH_PACKET_CACHE_INVALIDATE(pPktInfo->pFrags->bufVirtPtr, pPktInfo->pktSize);
727 pPktInfo->pktSize = 0;
728 }
729
730 /* Return the descriptor to DMA ownership */
731 pRxUsedDesc->cmdSts = ETH_BUFFER_OWNED_BY_DMA | ETH_RX_ENABLE_INTERRUPT_MASK;
732
733 /* Flush descriptor and CPU pipe */
734 ETH_DESCR_FLUSH_INV(pPortCtrl, pRxUsedDesc);
735
736 pQueueCtrl->resource++;
737
738 /* Move the used descriptor pointer to the next descriptor */
739 pQueueCtrl->pUsedDescr = RX_NEXT_DESC_PTR(pRxUsedDesc, pQueueCtrl);
740
741 /* If ring became Full return MV_FULL */
742 if(pQueueCtrl->pUsedDescr == pQueueCtrl->pCurrentDescr)
743 return MV_FULL;
744
745 return MV_OK;
746 }
747
748
749 #endif /* __mvEthGbe_h__ */
750
751