1 /*******************************************************************************
2 Copyright (C) Marvell International Ltd. and its affiliates
4 This software file (the "File") is owned and distributed by Marvell
5 International Ltd. and/or its affiliates ("Marvell") under the following
6 alternative licensing terms. Once you have made an election to distribute the
7 File under one of the following license alternatives, please (i) delete this
8 introductory statement regarding license alternatives, (ii) delete the two
9 license alternatives that you have not elected to use and (iii) preserve the
10 Marvell copyright notice above.
12 ********************************************************************************
13 Marvell Commercial License Option
15 If you received this File from Marvell and you have entered into a commercial
16 license agreement (a "Commercial License") with Marvell, the File is licensed
17 to you under the terms of the applicable Commercial License.
19 ********************************************************************************
20 Marvell GPL License Option
22 If you received this File from Marvell, you may opt to use, redistribute and/or
23 modify this File in accordance with the terms and conditions of the General
24 Public License Version 2, June 1991 (the "GPL License"), a copy of which is
25 available along with the File in the license.txt file or by writing to the Free
26 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 or
27 on the worldwide web at http://www.gnu.org/licenses/gpl.txt.
29 THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE IMPLIED
30 WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE ARE EXPRESSLY
31 DISCLAIMED. The GPL License provides additional details about this warranty
33 ********************************************************************************
34 Marvell BSD License Option
36 If you received this File from Marvell, you may opt to use, redistribute and/or
37 modify this File under the following licensing terms.
38 Redistribution and use in source and binary forms, with or without modification,
39 are permitted provided that the following conditions are met:
41 * Redistributions of source code must retain the above copyright notice,
42 this list of conditions and the following disclaimer.
44 * Redistributions in binary form must reproduce the above copyright
45 notice, this list of conditions and the following disclaimer in the
46 documentation and/or other materials provided with the distribution.
48 * Neither the name of Marvell nor the names of its contributors may be
49 used to endorse or promote products derived from this software without
50 specific prior written permission.
52 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
53 ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
54 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
55 DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
56 ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
57 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
58 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
59 ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
61 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *******************************************************************************/
65 /*******************************************************************************
66 * mvEth.h - Header File for : Marvell Gigabit Ethernet Controller
69 * This header file contains macros typedefs and function declaration specific to
70 * the Marvell Gigabit Ethernet Controller.
75 *******************************************************************************/
77 #ifndef __mvEthGbe_h__
78 #define __mvEthGbe_h__
80 extern MV_BOOL ethDescInSram
;
81 extern MV_BOOL ethDescSwCoher
;
82 extern ETH_PORT_CTRL
* ethPortCtrl
[];
84 static INLINE MV_ULONG
ethDescVirtToPhy(ETH_QUEUE_CTRL
* pQueueCtrl
, MV_U8
* pDesc
)
86 #if defined (ETH_DESCR_IN_SRAM)
88 return mvSramVirtToPhy(pDesc
);
90 #endif /* ETH_DESCR_IN_SRAM */
91 return (pQueueCtrl
->descBuf
.bufPhysAddr
+ (pDesc
- pQueueCtrl
->descBuf
.bufVirtPtr
));
93 /* Return port handler */
94 #define mvEthPortHndlGet(port) ethPortCtrl[port]
96 /* Used as WA for HW/SW race on TX */
97 static INLINE
int mvEthPortTxEnable(void* pPortHndl
, int queue
, int max_deep
)
100 MV_U32 txCurrReg
, txEnReg
;
101 ETH_TX_DESC
* pTxLastDesc
;
102 ETH_QUEUE_CTRL
* pQueueCtrl
;
103 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pPortHndl
;
105 txEnReg
= MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl
->portNo
));
106 if( (txEnReg
& MV_32BIT_LE_FAST(ETH_TXQ_ENABLE_MASK
)) == 0)
108 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl
->portNo
)) = pPortCtrl
->portTxQueueCmdReg
;
112 pQueueCtrl
= &pPortCtrl
->txQueue
[queue
];
113 pTxLastDesc
= pQueueCtrl
->pCurrentDescr
;
114 txCurrReg
= MV_REG_READ(ETH_TX_CUR_DESC_PTR_REG(pPortCtrl
->portNo
, queue
));
115 if(ethDescVirtToPhy(pQueueCtrl
, (MV_U8
*)pTxLastDesc
) == txCurrReg
)
117 /* All descriptors are processed, no chance for race */
121 /* Check distance betwee HW and SW location: */
122 /* If distance between HW and SW pointers is less than max_deep descriptors */
123 /* Race condition is possible, so wait end of TX and restart TXQ */
124 while(deep
< max_deep
)
126 pTxLastDesc
= TX_PREV_DESC_PTR(pTxLastDesc
, pQueueCtrl
);
127 if(ethDescVirtToPhy(pQueueCtrl
, (MV_U8
*)pTxLastDesc
) == txCurrReg
)
131 while( (txEnReg
& MV_32BIT_LE_FAST(ETH_TXQ_ENABLE_MASK
)) != 0)
136 mvOsPrintf("mvEthPortTxEnable: timeout - TXQ_CMD=0x%08x\n",
137 MV_REG_READ(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl
->portNo
)) );
140 txEnReg
= MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl
->portNo
));
143 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl
->portNo
)) = pPortCtrl
->portTxQueueCmdReg
;
148 /* Distance between HW and SW pointers is more than max_deep descriptors, */
149 /* So NO race condition - do nothing */
155 #define ETH_CSUM_MIN_BYTE_COUNT 72
157 /* Tailgate and Kirwood have only 2K TX FIFO */
158 #if (MV_ETH_VERSION == 2) || (MV_ETH_VERSION == 4)
159 #define ETH_CSUM_MAX_BYTE_COUNT 1600
161 #define ETH_CSUM_MAX_BYTE_COUNT 9*1024
162 #endif /* MV_ETH_VERSION */
164 #define ETH_MV_HEADER_SIZE 2
167 /* An offest in Tx descriptors to store data for buffers less than 8 Bytes */
168 #define MIN_TX_BUFF_LOAD 8
169 #define TX_BUF_OFFSET_IN_DESC (ETH_TX_DESC_ALIGNED_SIZE - MIN_TX_BUFF_LOAD)
171 /* Default port configuration value */
172 #define PORT_CONFIG_VALUE \
173 ETH_DEF_RX_QUEUE_MASK(0) | \
174 ETH_DEF_RX_ARP_QUEUE_MASK(0) | \
175 ETH_DEF_RX_TCP_QUEUE_MASK(0) | \
176 ETH_DEF_RX_UDP_QUEUE_MASK(0) | \
177 ETH_DEF_RX_BPDU_QUEUE_MASK(0) | \
178 ETH_RX_CHECKSUM_WITH_PSEUDO_HDR
180 /* Default port extend configuration value */
181 #define PORT_CONFIG_EXTEND_VALUE 0
183 #define PORT_SERIAL_CONTROL_VALUE \
184 ETH_DISABLE_FC_AUTO_NEG_MASK | \
186 ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
187 ETH_MAX_RX_PACKET_1552BYTE | \
188 ETH_SET_FULL_DUPLEX_MASK
190 #define PORT_SERIAL_CONTROL_100MB_FORCE_VALUE \
191 ETH_FORCE_LINK_PASS_MASK | \
192 ETH_DISABLE_DUPLEX_AUTO_NEG_MASK | \
193 ETH_DISABLE_FC_AUTO_NEG_MASK | \
195 ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
196 ETH_DISABLE_SPEED_AUTO_NEG_MASK | \
197 ETH_SET_FULL_DUPLEX_MASK | \
198 ETH_SET_MII_SPEED_100_MASK | \
199 ETH_MAX_RX_PACKET_1552BYTE
202 #define PORT_SERIAL_CONTROL_1000MB_FORCE_VALUE \
203 ETH_FORCE_LINK_PASS_MASK | \
204 ETH_DISABLE_DUPLEX_AUTO_NEG_MASK | \
205 ETH_DISABLE_FC_AUTO_NEG_MASK | \
207 ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
208 ETH_DISABLE_SPEED_AUTO_NEG_MASK | \
209 ETH_SET_FULL_DUPLEX_MASK | \
210 ETH_SET_GMII_SPEED_1000_MASK | \
211 ETH_MAX_RX_PACKET_1552BYTE
213 #define PORT_SERIAL_CONTROL_SGMII_IBAN_VALUE \
214 ETH_DISABLE_FC_AUTO_NEG_MASK | \
216 ETH_IN_BAND_AN_EN_MASK | \
217 ETH_DO_NOT_FORCE_LINK_FAIL_MASK | \
218 ETH_MAX_RX_PACKET_1552BYTE
220 /* Function headers: */
221 MV_VOID
mvEthSetSpecialMcastTable(int portNo
, int queue
);
222 MV_STATUS
mvEthArpRxQueue(void* pPortHandle
, int arpQueue
);
223 MV_STATUS
mvEthUdpRxQueue(void* pPortHandle
, int udpQueue
);
224 MV_STATUS
mvEthTcpRxQueue(void* pPortHandle
, int tcpQueue
);
225 MV_STATUS
mvEthMacAddrGet(int portNo
, unsigned char *pAddr
);
226 MV_VOID
mvEthSetOtherMcastTable(int portNo
, int queue
);
227 MV_STATUS
mvEthHeaderModeSet(void* pPortHandle
, MV_ETH_HEADER_MODE headerMode
);
228 /* Interrupt Coalesting functions */
229 MV_U32
mvEthRxCoalSet(void* pPortHndl
, MV_U32 uSec
);
230 MV_U32
mvEthTxCoalSet(void* pPortHndl
, MV_U32 uSec
);
231 MV_STATUS
mvEthCoalGet(void* pPortHndl
, MV_U32
* pRxCoal
, MV_U32
* pTxCoal
);
233 /******************************************************************************/
234 /* Data Flow functions */
235 /******************************************************************************/
236 static INLINE
void mvEthPortTxRestart(void* pPortHndl
)
238 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pPortHndl
;
240 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(pPortCtrl
->portNo
)) = pPortCtrl
->portTxQueueCmdReg
;
243 /* Get number of Free resources in specific TX queue */
244 static INLINE
int mvEthTxResourceGet(void* pPortHndl
, int txQueue
)
246 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pPortHndl
;
248 return (pPortCtrl
->txQueue
[txQueue
].resource
);
251 /* Get number of Free resources in specific RX queue */
252 static INLINE
int mvEthRxResourceGet(void* pPortHndl
, int rxQueue
)
254 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pPortHndl
;
256 return (pPortCtrl
->rxQueue
[rxQueue
].resource
);
259 static INLINE
int mvEthTxQueueIsFull(void* pPortHndl
, int txQueue
)
261 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pPortHndl
;
263 if(pPortCtrl
->txQueue
[txQueue
].resource
== 0)
269 /* Get number of Free resources in specific RX queue */
270 static INLINE
int mvEthRxQueueIsFull(void* pPortHndl
, int rxQueue
)
272 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pPortHndl
;
273 ETH_QUEUE_CTRL
* pQueueCtrl
= &pPortCtrl
->rxQueue
[rxQueue
];
275 if( (pQueueCtrl
->pUsedDescr
== pQueueCtrl
->pCurrentDescr
) &&
276 (pQueueCtrl
->resource
!= 0) )
282 static INLINE
int mvEthTxQueueIsEmpty(void* pPortHndl
, int txQueue
)
284 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pPortHndl
;
285 ETH_QUEUE_CTRL
* pQueueCtrl
= &pPortCtrl
->txQueue
[txQueue
];
287 if( (pQueueCtrl
->pUsedDescr
== pQueueCtrl
->pCurrentDescr
) &&
288 (pQueueCtrl
->resource
!= 0) )
295 /* Get number of Free resources in specific RX queue */
296 static INLINE
int mvEthRxQueueIsEmpty(void* pPortHndl
, int rxQueue
)
298 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pPortHndl
;
300 if(pPortCtrl
->rxQueue
[rxQueue
].resource
== 0)
306 /*******************************************************************************
307 * mvEthPortTx - Send an Ethernet packet
310 * This routine send a given packet described by pPktInfo parameter.
311 * Single buffer only.
314 * void* pEthPortHndl - Ethernet Port handler.
315 * int txQueue - Number of Tx queue.
316 * MV_PKT_INFO *pPktInfo - User packet to send.
319 * MV_NO_RESOURCE - No enough resources to send this packet.
320 * MV_ERROR - Unexpected Fatal error.
321 * MV_OK - Packet send successfully.
323 *******************************************************************************/
324 static INLINE MV_STATUS
mvEthPortTx(void* pEthPortHndl
, int txQueue
, MV_PKT_INFO
* pPktInfo
)
326 ETH_TX_DESC
* pTxCurrDesc
;
327 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pEthPortHndl
;
328 ETH_QUEUE_CTRL
* pQueueCtrl
;
330 MV_BUF_INFO
* pBufInfo
= pPktInfo
->pFrags
;
333 if(pPortCtrl
->portState
!= MV_ACTIVE
)
335 #endif /* ETH_DEBUG */
337 portNo
= pPortCtrl
->portNo
;
338 pQueueCtrl
= &pPortCtrl
->txQueue
[txQueue
];
340 /* Get the Tx Desc ring indexes */
341 pTxCurrDesc
= pQueueCtrl
->pCurrentDescr
;
343 /* Check if there is enough resources to send the packet */
344 if(pQueueCtrl
->resource
== 0)
345 return MV_NO_RESOURCE
;
347 pTxCurrDesc
->byteCnt
= pBufInfo
->dataSize
;
350 if(pPktInfo
->pktSize
!= 0)
353 pTxCurrDesc
->bufPtr
= pBufInfo
->bufPhysAddr
;
354 ETH_PACKET_CACHE_FLUSH(pBufInfo
->bufVirtPtr
, pPktInfo
->pktSize
);
356 pTxCurrDesc
->bufPtr
= ETH_PACKET_CACHE_FLUSH(pBufInfo
->bufVirtPtr
, pPktInfo
->pktSize
);
358 pPktInfo
->pktSize
= 0;
361 pTxCurrDesc
->bufPtr
= pBufInfo
->bufPhysAddr
;
363 pTxCurrDesc
->returnInfo
= (MV_ULONG
)pPktInfo
;
365 /* There is only one buffer in the packet */
366 /* The OSG might set some bits for checksum offload, so add them to first descriptor */
367 pTxCurrDesc
->cmdSts
= pPktInfo
->status
|
368 ETH_BUFFER_OWNED_BY_DMA
|
369 ETH_TX_GENERATE_CRC_MASK
|
370 ETH_TX_ENABLE_INTERRUPT_MASK
|
371 ETH_TX_ZERO_PADDING_MASK
|
372 ETH_TX_FIRST_DESC_MASK
|
373 ETH_TX_LAST_DESC_MASK
;
375 ETH_DESCR_FLUSH_INV(pPortCtrl
, pTxCurrDesc
);
377 pQueueCtrl
->resource
--;
378 pQueueCtrl
->pCurrentDescr
= TX_NEXT_DESC_PTR(pTxCurrDesc
, pQueueCtrl
);
380 /* Apply send command */
381 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(portNo
)) = pPortCtrl
->portTxQueueCmdReg
;
387 /*******************************************************************************
388 * mvEthPortSgTx - Send an Ethernet packet
391 * This routine send a given packet described by pBufInfo parameter. It
392 * supports transmitting of a packet spaned over multiple buffers.
395 * void* pEthPortHndl - Ethernet Port handler.
396 * int txQueue - Number of Tx queue.
397 * MV_PKT_INFO *pPktInfo - User packet to send.
400 * MV_NO_RESOURCE - No enough resources to send this packet.
401 * MV_ERROR - Unexpected Fatal error.
402 * MV_OK - Packet send successfully.
404 *******************************************************************************/
405 static INLINE MV_STATUS
mvEthPortSgTx(void* pEthPortHndl
, int txQueue
, MV_PKT_INFO
* pPktInfo
)
407 ETH_TX_DESC
* pTxFirstDesc
;
408 ETH_TX_DESC
* pTxCurrDesc
;
409 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pEthPortHndl
;
410 ETH_QUEUE_CTRL
* pQueueCtrl
;
411 int portNo
, bufCount
;
412 MV_BUF_INFO
* pBufInfo
= pPktInfo
->pFrags
;
416 if(pPortCtrl
->portState
!= MV_ACTIVE
)
418 #endif /* ETH_DEBUG */
420 portNo
= pPortCtrl
->portNo
;
421 pQueueCtrl
= &pPortCtrl
->txQueue
[txQueue
];
423 /* Get the Tx Desc ring indexes */
424 pTxCurrDesc
= pQueueCtrl
->pCurrentDescr
;
426 /* Check if there is enough resources to send the packet */
427 if(pQueueCtrl
->resource
< pPktInfo
->numFrags
)
428 return MV_NO_RESOURCE
;
430 /* Remember first desc */
431 pTxFirstDesc
= pTxCurrDesc
;
436 if(pBufInfo
[bufCount
].dataSize
<= MIN_TX_BUFF_LOAD
)
438 /* Buffers with a payload smaller than MIN_TX_BUFF_LOAD (8 bytes) must be aligned */
439 /* to 64-bit boundary. Two options here: */
440 /* 1) Usually, copy the payload to the reserved 8 bytes inside descriptor. */
441 /* 2) In the Half duplex workaround, the reserved 8 bytes inside descriptor are used */
442 /* as a pointer to the aligned buffer, copy the small payload to this buffer. */
443 pTxBuf
= ((MV_U8
*)pTxCurrDesc
)+TX_BUF_OFFSET_IN_DESC
;
444 mvOsBCopy(pBufInfo
[bufCount
].bufVirtPtr
, pTxBuf
, pBufInfo
[bufCount
].dataSize
);
445 pTxCurrDesc
->bufPtr
= ethDescVirtToPhy(pQueueCtrl
, pTxBuf
);
451 pTxCurrDesc
->bufPtr
= pBufInfo
[bufCount
].bufPhysAddr
;
452 ETH_PACKET_CACHE_FLUSH(pBufInfo
[bufCount
].bufVirtPtr
, pBufInfo
[bufCount
].dataSize
);
454 pTxCurrDesc
->bufPtr
= ETH_PACKET_CACHE_FLUSH(pBufInfo
[bufCount
].bufVirtPtr
, pBufInfo
[bufCount
].dataSize
);
458 pTxCurrDesc
->byteCnt
= pBufInfo
[bufCount
].dataSize
;
461 if(bufCount
>= pPktInfo
->numFrags
)
466 /* There is middle buffer of the packet Not First and Not Last */
467 pTxCurrDesc
->cmdSts
= ETH_BUFFER_OWNED_BY_DMA
;
468 ETH_DESCR_FLUSH_INV(pPortCtrl
, pTxCurrDesc
);
470 /* Go to next descriptor and next buffer */
471 pTxCurrDesc
= TX_NEXT_DESC_PTR(pTxCurrDesc
, pQueueCtrl
);
473 /* Set last desc with DMA ownership and interrupt enable. */
474 pTxCurrDesc
->returnInfo
= (MV_ULONG
)pPktInfo
;
477 /* There is only one buffer in the packet */
478 /* The OSG might set some bits for checksum offload, so add them to first descriptor */
479 pTxCurrDesc
->cmdSts
= pPktInfo
->status
|
480 ETH_BUFFER_OWNED_BY_DMA
|
481 ETH_TX_GENERATE_CRC_MASK
|
482 ETH_TX_ENABLE_INTERRUPT_MASK
|
483 ETH_TX_ZERO_PADDING_MASK
|
484 ETH_TX_FIRST_DESC_MASK
|
485 ETH_TX_LAST_DESC_MASK
;
487 ETH_DESCR_FLUSH_INV(pPortCtrl
, pTxCurrDesc
);
491 /* Last but not First */
492 pTxCurrDesc
->cmdSts
= ETH_BUFFER_OWNED_BY_DMA
|
493 ETH_TX_ENABLE_INTERRUPT_MASK
|
494 ETH_TX_ZERO_PADDING_MASK
|
495 ETH_TX_LAST_DESC_MASK
;
497 ETH_DESCR_FLUSH_INV(pPortCtrl
, pTxCurrDesc
);
499 /* Update First when more than one buffer in the packet */
500 /* The OSG might set some bits for checksum offload, so add them to first descriptor */
501 pTxFirstDesc
->cmdSts
= pPktInfo
->status
|
502 ETH_BUFFER_OWNED_BY_DMA
|
503 ETH_TX_GENERATE_CRC_MASK
|
504 ETH_TX_FIRST_DESC_MASK
;
506 ETH_DESCR_FLUSH_INV(pPortCtrl
, pTxFirstDesc
);
508 /* Update txQueue state */
509 pQueueCtrl
->resource
-= bufCount
;
510 pQueueCtrl
->pCurrentDescr
= TX_NEXT_DESC_PTR(pTxCurrDesc
, pQueueCtrl
);
512 /* Apply send command */
513 MV_REG_VALUE(ETH_TX_QUEUE_COMMAND_REG(portNo
)) = pPortCtrl
->portTxQueueCmdReg
;
518 /*******************************************************************************
519 * mvEthPortTxDone - Free all used Tx descriptors and mBlks.
522 * This routine returns the transmitted packet information to the caller.
525 * void* pEthPortHndl - Ethernet Port handler.
526 * int txQueue - Number of Tx queue.
529 * MV_PKT_INFO *pPktInfo - Pointer to packet was sent.
532 * MV_NOT_FOUND - No transmitted packets to return. Transmit in progress.
533 * MV_EMPTY - No transmitted packets to return. TX Queue is empty.
534 * MV_ERROR - Unexpected Fatal error.
535 * MV_OK - There is transmitted packet in the queue,
536 * 'pPktInfo' filled with relevant information.
538 *******************************************************************************/
539 static INLINE MV_PKT_INFO
* mvEthPortTxDone(void* pEthPortHndl
, int txQueue
)
541 ETH_TX_DESC
* pTxCurrDesc
;
542 ETH_TX_DESC
* pTxUsedDesc
;
543 ETH_QUEUE_CTRL
* pQueueCtrl
;
544 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pEthPortHndl
;
545 MV_PKT_INFO
* pPktInfo
;
546 MV_U32 commandStatus
;
548 pQueueCtrl
= &pPortCtrl
->txQueue
[txQueue
];
550 pTxUsedDesc
= pQueueCtrl
->pUsedDescr
;
551 pTxCurrDesc
= pQueueCtrl
->pCurrentDescr
;
555 /* No more used descriptors */
556 commandStatus
= pTxUsedDesc
->cmdSts
;
557 if (commandStatus
& (ETH_BUFFER_OWNED_BY_DMA
))
559 ETH_DESCR_INV(pPortCtrl
, pTxUsedDesc
);
562 if( (pTxUsedDesc
== pTxCurrDesc
) &&
563 (pQueueCtrl
->resource
!= 0) )
567 pQueueCtrl
->resource
++;
568 pQueueCtrl
->pUsedDescr
= TX_NEXT_DESC_PTR(pTxUsedDesc
, pQueueCtrl
);
569 if(commandStatus
& (ETH_TX_LAST_DESC_MASK
))
571 pPktInfo
= (MV_PKT_INFO
*)pTxUsedDesc
->returnInfo
;
572 pPktInfo
->status
= commandStatus
;
575 pTxUsedDesc
= pQueueCtrl
->pUsedDescr
;
579 /*******************************************************************************
580 * mvEthPortRx - Get new received packets from Rx queue.
583 * This routine returns the received data to the caller. There is no
584 * data copying during routine operation. All information is returned
585 * using pointer to packet information struct passed from the caller.
588 * void* pEthPortHndl - Ethernet Port handler.
589 * int rxQueue - Number of Rx queue.
592 * MV_PKT_INFO *pPktInfo - Pointer to received packet.
595 * MV_NO_RESOURCE - No free resources in RX queue.
596 * MV_ERROR - Unexpected Fatal error.
597 * MV_OK - New packet received and 'pBufInfo' structure filled
598 * with relevant information.
600 *******************************************************************************/
601 static INLINE MV_PKT_INFO
* mvEthPortRx(void* pEthPortHndl
, int rxQueue
)
603 ETH_RX_DESC
*pRxCurrDesc
;
604 MV_U32 commandStatus
;
605 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pEthPortHndl
;
606 ETH_QUEUE_CTRL
* pQueueCtrl
;
607 MV_PKT_INFO
* pPktInfo
;
609 pQueueCtrl
= &(pPortCtrl
->rxQueue
[rxQueue
]);
611 /* Check resources */
612 if(pQueueCtrl
->resource
== 0)
614 mvOsPrintf("ethPortRx: no more resources\n");
619 /* Get the Rx Desc ring 'curr and 'used' indexes */
620 pRxCurrDesc
= pQueueCtrl
->pCurrentDescr
;
622 commandStatus
= pRxCurrDesc
->cmdSts
;
623 if (commandStatus
& (ETH_BUFFER_OWNED_BY_DMA
))
625 /* Nothing to receive... */
626 ETH_DESCR_INV(pPortCtrl
, pRxCurrDesc
);
630 /* Valid RX only if FIRST and LAST bits are set */
631 if( (commandStatus
& (ETH_RX_LAST_DESC_MASK
| ETH_RX_FIRST_DESC_MASK
)) ==
632 (ETH_RX_LAST_DESC_MASK
| ETH_RX_FIRST_DESC_MASK
) )
634 pPktInfo
= (MV_PKT_INFO
*)pRxCurrDesc
->returnInfo
;
635 pPktInfo
->pFrags
->dataSize
= pRxCurrDesc
->byteCnt
- 4;
636 pPktInfo
->status
= commandStatus
;
637 pPktInfo
->fragIP
= pRxCurrDesc
->bufSize
& ETH_RX_IP_FRAGMENTED_FRAME_MASK
;
639 pQueueCtrl
->resource
--;
640 /* Update 'curr' in data structure */
641 pQueueCtrl
->pCurrentDescr
= RX_NEXT_DESC_PTR(pRxCurrDesc
, pQueueCtrl
);
643 #ifdef INCLUDE_SYNC_BARR
644 mvCpuIfSyncBarr(DRAM_TARGET
);
650 ETH_RX_DESC
* pRxUsedDesc
= pQueueCtrl
->pUsedDescr
;
653 mvOsPrintf("ethDrv: Unexpected Jumbo frame: "
654 "status=0x%08x, byteCnt=%d, pData=0x%x\n",
655 commandStatus
, pRxCurrDesc
->byteCnt
, pRxCurrDesc
->bufPtr
);
656 #endif /* ETH_DEBUG */
658 /* move buffer from pCurrentDescr position to pUsedDescr position */
659 pRxUsedDesc
->bufPtr
= pRxCurrDesc
->bufPtr
;
660 pRxUsedDesc
->returnInfo
= pRxCurrDesc
->returnInfo
;
661 pRxUsedDesc
->bufSize
= pRxCurrDesc
->bufSize
& ETH_RX_BUFFER_MASK
;
663 /* Return the descriptor to DMA ownership */
664 pRxUsedDesc
->cmdSts
= ETH_BUFFER_OWNED_BY_DMA
|
665 ETH_RX_ENABLE_INTERRUPT_MASK
;
667 /* Flush descriptor and CPU pipe */
668 ETH_DESCR_FLUSH_INV(pPortCtrl
, pRxUsedDesc
);
670 /* Move the used descriptor pointer to the next descriptor */
671 pQueueCtrl
->pUsedDescr
= RX_NEXT_DESC_PTR(pRxUsedDesc
, pQueueCtrl
);
672 pQueueCtrl
->pCurrentDescr
= RX_NEXT_DESC_PTR(pRxCurrDesc
, pQueueCtrl
);
677 /*******************************************************************************
678 * mvEthPortRxDone - Returns a Rx buffer back to the Rx ring.
681 * This routine returns a Rx buffer back to the Rx ring.
684 * void* pEthPortHndl - Ethernet Port handler.
685 * int rxQueue - Number of Rx queue.
686 * MV_PKT_INFO *pPktInfo - Pointer to received packet.
689 * MV_ERROR - Unexpected Fatal error.
690 * MV_OUT_OF_RANGE - RX queue is already FULL, so this buffer can't be
691 * returned to this queue.
692 * MV_FULL - Buffer returned successfully and RX queue became full.
693 * More buffers should not be returned at the time.
694 * MV_OK - Buffer returned successfully and there are more free
695 * places in the queue.
697 *******************************************************************************/
698 static INLINE MV_STATUS
mvEthPortRxDone(void* pEthPortHndl
, int rxQueue
, MV_PKT_INFO
*pPktInfo
)
700 ETH_RX_DESC
* pRxUsedDesc
;
701 ETH_QUEUE_CTRL
* pQueueCtrl
;
702 ETH_PORT_CTRL
* pPortCtrl
= (ETH_PORT_CTRL
*)pEthPortHndl
;
704 pQueueCtrl
= &pPortCtrl
->rxQueue
[rxQueue
];
706 /* Get 'used' Rx descriptor */
707 pRxUsedDesc
= pQueueCtrl
->pUsedDescr
;
709 /* Check that ring is not FULL */
710 if( (pQueueCtrl
->pUsedDescr
== pQueueCtrl
->pCurrentDescr
) &&
711 (pQueueCtrl
->resource
!= 0) )
713 mvOsPrintf("%s %d: out of range Error resource=%d, curr=%p, used=%p\n",
714 __FUNCTION__
, pPortCtrl
->portNo
, pQueueCtrl
->resource
,
715 pQueueCtrl
->pCurrentDescr
, pQueueCtrl
->pUsedDescr
);
716 return MV_OUT_OF_RANGE
;
719 pRxUsedDesc
->bufPtr
= pPktInfo
->pFrags
->bufPhysAddr
;
720 pRxUsedDesc
->returnInfo
= (MV_ULONG
)pPktInfo
;
721 pRxUsedDesc
->bufSize
= pPktInfo
->pFrags
->bufSize
& ETH_RX_BUFFER_MASK
;
723 /* Invalidate data buffer accordingly with pktSize */
724 if(pPktInfo
->pktSize
!= 0)
726 ETH_PACKET_CACHE_INVALIDATE(pPktInfo
->pFrags
->bufVirtPtr
, pPktInfo
->pktSize
);
727 pPktInfo
->pktSize
= 0;
730 /* Return the descriptor to DMA ownership */
731 pRxUsedDesc
->cmdSts
= ETH_BUFFER_OWNED_BY_DMA
| ETH_RX_ENABLE_INTERRUPT_MASK
;
733 /* Flush descriptor and CPU pipe */
734 ETH_DESCR_FLUSH_INV(pPortCtrl
, pRxUsedDesc
);
736 pQueueCtrl
->resource
++;
738 /* Move the used descriptor pointer to the next descriptor */
739 pQueueCtrl
->pUsedDescr
= RX_NEXT_DESC_PTR(pRxUsedDesc
, pQueueCtrl
);
741 /* If ring became Full return MV_FULL */
742 if(pQueueCtrl
->pUsedDescr
== pQueueCtrl
->pCurrentDescr
)
749 #endif /* __mvEthGbe_h__ */